aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--Documentation/ABI/testing/sysfs-bus-usb2
-rw-r--r--Documentation/filesystems/ceph.txt139
-rw-r--r--Documentation/ioctl/ioctl-number.txt1
-rw-r--r--Documentation/kobject.txt60
-rw-r--r--Documentation/networking/stmmac.txt143
-rw-r--r--MAINTAINERS26
-rw-r--r--Makefile2
-rw-r--r--arch/alpha/include/asm/core_marvel.h1
-rw-r--r--arch/alpha/include/asm/core_mcpcia.h1
-rw-r--r--arch/alpha/include/asm/core_titan.h1
-rw-r--r--arch/alpha/include/asm/core_tsunami.h1
-rw-r--r--arch/alpha/kernel/sys_dp264.c2
-rw-r--r--arch/alpha/kernel/sys_titan.c2
-rw-r--r--arch/alpha/kernel/traps.c10
-rw-r--r--arch/arm/Kconfig81
-rw-r--r--arch/arm/boot/compressed/decompress.c1
-rw-r--r--arch/arm/boot/compressed/head.S2
-rw-r--r--arch/arm/boot/compressed/misc.c1
-rw-r--r--arch/arm/configs/cm_t35_defconfig2
-rw-r--r--arch/arm/configs/n770_defconfig1
-rw-r--r--arch/arm/configs/n8x0_defconfig160
-rw-r--r--arch/arm/configs/omap3_beagle_defconfig1
-rw-r--r--arch/arm/configs/omap3_defconfig2
-rw-r--r--arch/arm/configs/omap3_evm_defconfig1
-rw-r--r--arch/arm/configs/omap3_touchbook_defconfig2
-rw-r--r--arch/arm/configs/omap_3430sdp_defconfig1
-rw-r--r--arch/arm/configs/omap_3630sdp_defconfig2
-rw-r--r--arch/arm/configs/omap_h2_1610_defconfig1
-rw-r--r--arch/arm/configs/omap_zoom2_defconfig1
-rw-r--r--arch/arm/configs/omap_zoom3_defconfig2
-rw-r--r--arch/arm/configs/rx51_defconfig1
-rw-r--r--arch/arm/include/asm/elf.h1
-rw-r--r--arch/arm/include/asm/pgtable-nommu.h1
-rw-r--r--arch/arm/kernel/entry-header.S2
-rw-r--r--arch/arm/kernel/kgdb.c13
-rw-r--r--arch/arm/kernel/perf_event.c5
-rw-r--r--arch/arm/kernel/smp.c4
-rw-r--r--arch/arm/mach-at91/board-sam9g20ek.c10
-rw-r--r--arch/arm/mach-omap2/Makefile3
-rw-r--r--arch/arm/mach-omap2/board-3430sdp.c2
-rw-r--r--arch/arm/mach-omap2/board-3630sdp.c2
-rw-r--r--arch/arm/mach-omap2/board-4430sdp.c26
-rw-r--r--arch/arm/mach-omap2/board-am3517evm.c2
-rw-r--r--arch/arm/mach-omap2/board-cm-t35.c2
-rw-r--r--arch/arm/mach-omap2/board-devkit8000.c2
-rw-r--r--arch/arm/mach-omap2/board-igep0020.c56
-rw-r--r--arch/arm/mach-omap2/board-n8x0.c98
-rw-r--r--arch/arm/mach-omap2/board-omap3beagle.c2
-rw-r--r--arch/arm/mach-omap2/board-omap3evm.c2
-rw-r--r--arch/arm/mach-omap2/board-omap3pandora.c12
-rw-r--r--arch/arm/mach-omap2/board-omap3touchbook.c12
-rw-r--r--arch/arm/mach-omap2/board-overo.c2
-rw-r--r--arch/arm/mach-omap2/board-zoom3.c2
-rw-r--r--arch/arm/mach-omap2/clock2420_data.c1
-rw-r--r--arch/arm/mach-omap2/io.c21
-rw-r--r--arch/arm/mach-omap2/mailbox.c12
-rw-r--r--arch/arm/mach-omap2/omap44xx-smc.S32
-rw-r--r--arch/arm/mach-omap2/prcm.c4
-rw-r--r--arch/arm/mach-omap2/serial.c15
-rw-r--r--arch/arm/mach-omap2/usb-ehci.c6
-rw-r--r--arch/arm/mach-rpc/include/mach/uncompress.h2
-rw-r--r--arch/arm/mach-s3c2440/s3c2440-cpufreq.c (renamed from arch/arm/plat-s3c24xx/s3c2440-cpufreq.c)0
-rw-r--r--arch/arm/mach-s3c64xx/include/mach/debug-macro.S2
-rw-r--r--arch/arm/mach-s5p6440/include/mach/debug-macro.S2
-rw-r--r--arch/arm/mach-s5p6442/include/mach/debug-macro.S2
-rw-r--r--arch/arm/mach-shmobile/board-ap4evb.c32
-rw-r--r--arch/arm/mach-shmobile/board-g3evm.c122
-rw-r--r--arch/arm/mach-shmobile/board-g4evm.c57
-rw-r--r--arch/arm/mach-shmobile/clock-sh7367.c7
-rw-r--r--arch/arm/mach-shmobile/intc-sh7367.c46
-rw-r--r--arch/arm/mach-shmobile/intc-sh7372.c46
-rw-r--r--arch/arm/mach-shmobile/intc-sh7377.c36
-rw-r--r--arch/arm/plat-omap/gpio.c10
-rw-r--r--arch/arm/plat-omap/include/plat/blizzard.h2
-rw-r--r--arch/arm/plat-omap/include/plat/cpu.h8
-rw-r--r--arch/arm/plat-omap/include/plat/prcm.h2
-rw-r--r--arch/arm/plat-omap/include/plat/system.h6
-rw-r--r--arch/arm/plat-omap/include/plat/usb.h2
-rw-r--r--arch/arm/plat-omap/mcbsp.c12
-rw-r--r--arch/arm/plat-samsung/include/plat/uncompress.h2
-rw-r--r--arch/arm/plat-samsung/pwm.c35
-rw-r--r--arch/blackfin/Kconfig81
-rw-r--r--arch/blackfin/Kconfig.debug12
-rw-r--r--arch/blackfin/Makefile7
-rw-r--r--arch/blackfin/boot/Makefile23
-rw-r--r--arch/blackfin/configs/BF518F-EZBRD_defconfig270
-rw-r--r--arch/blackfin/configs/BF526-EZBRD_defconfig336
-rw-r--r--arch/blackfin/configs/BF527-EZKIT-V2_defconfig1811
-rw-r--r--arch/blackfin/configs/BF527-EZKIT_defconfig344
-rw-r--r--arch/blackfin/configs/BF533-EZKIT_defconfig250
-rw-r--r--arch/blackfin/configs/BF533-STAMP_defconfig295
-rw-r--r--arch/blackfin/configs/BF537-STAMP_defconfig332
-rw-r--r--arch/blackfin/configs/BF538-EZKIT_defconfig291
-rw-r--r--arch/blackfin/configs/BF548-EZKIT_defconfig108
-rw-r--r--arch/blackfin/configs/BF561-ACVILON_defconfig15
-rw-r--r--arch/blackfin/configs/BF561-EZKIT_defconfig82
-rw-r--r--arch/blackfin/configs/H8606_defconfig7
-rw-r--r--arch/blackfin/configs/PNAV-10_defconfig295
-rw-r--r--arch/blackfin/configs/TCM-BF518_defconfig1375
-rw-r--r--arch/blackfin/include/asm/bfin-lq035q1.h12
-rw-r--r--arch/blackfin/include/asm/bfin_can.h725
-rw-r--r--arch/blackfin/include/asm/bfin_sport.h227
-rw-r--r--arch/blackfin/include/asm/bfin_watchdog.h30
-rw-r--r--arch/blackfin/include/asm/bitops.h74
-rw-r--r--arch/blackfin/include/asm/context.S22
-rw-r--r--arch/blackfin/include/asm/cpu.h2
-rw-r--r--arch/blackfin/include/asm/def_LPBlackfin.h58
-rw-r--r--arch/blackfin/include/asm/delay.h16
-rw-r--r--arch/blackfin/include/asm/dma-mapping.h15
-rw-r--r--arch/blackfin/include/asm/dma.h4
-rw-r--r--arch/blackfin/include/asm/dpmc.h4
-rw-r--r--arch/blackfin/include/asm/elf.h8
-rw-r--r--arch/blackfin/include/asm/ftrace.h53
-rw-r--r--arch/blackfin/include/asm/gpio.h17
-rw-r--r--arch/blackfin/include/asm/irq.h7
-rw-r--r--arch/blackfin/include/asm/mmu_context.h14
-rw-r--r--arch/blackfin/include/asm/nmi.h12
-rw-r--r--arch/blackfin/include/asm/page.h3
-rw-r--r--arch/blackfin/include/asm/ptrace.h25
-rw-r--r--arch/blackfin/include/asm/sections.h3
-rw-r--r--arch/blackfin/include/asm/smp.h15
-rw-r--r--arch/blackfin/include/asm/syscall.h96
-rw-r--r--arch/blackfin/include/asm/thread_info.h25
-rw-r--r--arch/blackfin/include/asm/time.h13
-rw-r--r--arch/blackfin/kernel/Makefile1
-rw-r--r--arch/blackfin/kernel/bfin_dma_5xx.c10
-rw-r--r--arch/blackfin/kernel/bfin_gpio.c20
-rw-r--r--arch/blackfin/kernel/cplb-mpu/cplbinit.c9
-rw-r--r--arch/blackfin/kernel/cplb-mpu/cplbmgr.c54
-rw-r--r--arch/blackfin/kernel/cplb-nompu/cplbinit.c9
-rw-r--r--arch/blackfin/kernel/dma-mapping.c2
-rw-r--r--arch/blackfin/kernel/entry.S8
-rw-r--r--arch/blackfin/kernel/ftrace-entry.S23
-rw-r--r--arch/blackfin/kernel/ftrace.c6
-rw-r--r--arch/blackfin/kernel/init_task.c2
-rw-r--r--arch/blackfin/kernel/kgdb.c10
-rw-r--r--arch/blackfin/kernel/nmi.c299
-rw-r--r--arch/blackfin/kernel/process.c7
-rw-r--r--arch/blackfin/kernel/ptrace.c360
-rw-r--r--arch/blackfin/kernel/setup.c34
-rw-r--r--arch/blackfin/kernel/signal.c24
-rw-r--r--arch/blackfin/kernel/time-ts.c205
-rw-r--r--arch/blackfin/kernel/traps.c32
-rw-r--r--arch/blackfin/kernel/vmlinux.lds.S70
-rw-r--r--arch/blackfin/mach-bf518/boards/Kconfig5
-rw-r--r--arch/blackfin/mach-bf518/boards/Makefile1
-rw-r--r--arch/blackfin/mach-bf518/boards/ezbrd.c178
-rw-r--r--arch/blackfin/mach-bf518/boards/tcm-bf518.c753
-rw-r--r--arch/blackfin/mach-bf518/include/mach/irq.h12
-rw-r--r--arch/blackfin/mach-bf518/include/mach/mem_map.h2
-rw-r--r--arch/blackfin/mach-bf527/boards/Kconfig5
-rw-r--r--arch/blackfin/mach-bf527/boards/Makefile1
-rw-r--r--arch/blackfin/mach-bf527/boards/cm_bf527.c245
-rw-r--r--arch/blackfin/mach-bf527/boards/ezbrd.c199
-rw-r--r--arch/blackfin/mach-bf527/boards/ezkit.c431
-rw-r--r--arch/blackfin/mach-bf527/include/mach/irq.h12
-rw-r--r--arch/blackfin/mach-bf533/boards/H8606.c66
-rw-r--r--arch/blackfin/mach-bf533/boards/blackstamp.c134
-rw-r--r--arch/blackfin/mach-bf533/boards/cm_bf533.c138
-rw-r--r--arch/blackfin/mach-bf533/boards/ezkit.c102
-rw-r--r--arch/blackfin/mach-bf533/boards/ip0x.c63
-rw-r--r--arch/blackfin/mach-bf533/boards/stamp.c174
-rw-r--r--arch/blackfin/mach-bf533/include/mach/irq.h3
-rw-r--r--arch/blackfin/mach-bf537/boards/cm_bf537e.c116
-rw-r--r--arch/blackfin/mach-bf537/boards/cm_bf537u.c187
-rw-r--r--arch/blackfin/mach-bf537/boards/minotaur.c183
-rw-r--r--arch/blackfin/mach-bf537/boards/pnav10.c169
-rw-r--r--arch/blackfin/mach-bf537/boards/stamp.c684
-rw-r--r--arch/blackfin/mach-bf537/boards/tcm_bf537.c187
-rw-r--r--arch/blackfin/mach-bf537/include/mach/irq.h12
-rw-r--r--arch/blackfin/mach-bf538/boards/ezkit.c346
-rw-r--r--arch/blackfin/mach-bf538/include/mach/irq.h3
-rw-r--r--arch/blackfin/mach-bf548/boards/cm_bf548.c399
-rw-r--r--arch/blackfin/mach-bf548/boards/ezkit.c443
-rw-r--r--arch/blackfin/mach-bf548/include/mach/irq.h3
-rw-r--r--arch/blackfin/mach-bf561/Makefile1
-rw-r--r--arch/blackfin/mach-bf561/boards/acvilon.c2
-rw-r--r--arch/blackfin/mach-bf561/boards/cm_bf561.c66
-rw-r--r--arch/blackfin/mach-bf561/boards/ezkit.c70
-rw-r--r--arch/blackfin/mach-bf561/boards/tepla.c68
-rw-r--r--arch/blackfin/mach-bf561/hotplug.c32
-rw-r--r--arch/blackfin/mach-bf561/include/mach/irq.h3
-rw-r--r--arch/blackfin/mach-bf561/include/mach/smp.h2
-rw-r--r--arch/blackfin/mach-bf561/secondary.S50
-rw-r--r--arch/blackfin/mach-bf561/smp.c44
-rw-r--r--arch/blackfin/mach-common/cpufreq.c168
-rw-r--r--arch/blackfin/mach-common/entry.S81
-rw-r--r--arch/blackfin/mach-common/head.S16
-rw-r--r--arch/blackfin/mach-common/interrupt.S35
-rw-r--r--arch/blackfin/mach-common/ints-priority.c208
-rw-r--r--arch/blackfin/mach-common/smp.c87
-rw-r--r--arch/blackfin/mm/isram-driver.c6
-rw-r--r--arch/blackfin/mm/sram-alloc.c27
-rw-r--r--arch/microblaze/Kconfig64
-rw-r--r--arch/microblaze/Makefile1
-rw-r--r--arch/microblaze/include/asm/device.h4
-rw-r--r--arch/microblaze/include/asm/dma-mapping.h154
-rw-r--r--arch/microblaze/include/asm/io.h31
-rw-r--r--arch/microblaze/include/asm/irq.h37
-rw-r--r--arch/microblaze/include/asm/page.h12
-rw-r--r--arch/microblaze/include/asm/pci-bridge.h195
-rw-r--r--arch/microblaze/include/asm/pci.h178
-rw-r--r--arch/microblaze/include/asm/pgalloc.h2
-rw-r--r--arch/microblaze/include/asm/pgtable.h40
-rw-r--r--arch/microblaze/include/asm/prom.h15
-rw-r--r--arch/microblaze/include/asm/system.h3
-rw-r--r--arch/microblaze/include/asm/tlbflush.h2
-rw-r--r--arch/microblaze/kernel/Makefile2
-rw-r--r--arch/microblaze/kernel/asm-offsets.c1
-rw-r--r--arch/microblaze/kernel/cpu/cache.c211
-rw-r--r--arch/microblaze/kernel/dma.c156
-rw-r--r--arch/microblaze/kernel/entry.S116
-rw-r--r--arch/microblaze/kernel/head.S13
-rw-r--r--arch/microblaze/kernel/irq.c15
-rw-r--r--arch/microblaze/kernel/setup.c45
-rw-r--r--arch/microblaze/mm/Makefile2
-rw-r--r--arch/microblaze/mm/consistent.c246
-rw-r--r--arch/microblaze/mm/init.c39
-rw-r--r--arch/microblaze/mm/pgtable.c2
-rw-r--r--arch/microblaze/pci/Makefile6
-rw-r--r--arch/microblaze/pci/indirect_pci.c163
-rw-r--r--arch/microblaze/pci/iomap.c39
-rw-r--r--arch/microblaze/pci/pci-common.c1642
-rw-r--r--arch/microblaze/pci/pci_32.c430
-rw-r--r--arch/microblaze/pci/xilinx_pci.c168
-rw-r--r--arch/powerpc/Kconfig13
-rw-r--r--arch/powerpc/configs/52xx/cm5200_defconfig24
-rw-r--r--arch/powerpc/configs/52xx/lite5200b_defconfig28
-rw-r--r--arch/powerpc/configs/52xx/motionpro_defconfig25
-rw-r--r--arch/powerpc/configs/52xx/pcm030_defconfig27
-rw-r--r--arch/powerpc/configs/52xx/tqm5200_defconfig28
-rw-r--r--arch/powerpc/configs/mpc5200_defconfig134
-rw-r--r--arch/powerpc/include/asm/ppc-opcode.h6
-rw-r--r--arch/powerpc/include/asm/syscall.h6
-rw-r--r--arch/powerpc/kernel/head_fsl_booke.S7
-rw-r--r--arch/powerpc/kernel/iommu.c7
-rw-r--r--arch/powerpc/kernel/perf_event.c21
-rw-r--r--arch/powerpc/kernel/setup_32.c6
-rw-r--r--arch/powerpc/kernel/setup_64.c6
-rw-r--r--arch/powerpc/mm/mem.c6
-rw-r--r--arch/powerpc/platforms/52xx/mpc52xx_gpt.c6
-rw-r--r--arch/sh/include/asm/mmu.h7
-rw-r--r--arch/sh/kernel/perf_event.c20
-rw-r--r--arch/sh/mm/uncached.c9
-rw-r--r--arch/x86/kernel/cpu/perf_event.c183
-rw-r--r--arch/x86/kernel/cpu/perf_event_amd.c70
-rw-r--r--arch/x86/kernel/cpu/perf_event_intel.c57
-rw-r--r--arch/x86/kernel/cpu/perf_event_p6.c10
-rw-r--r--arch/x86/kernel/dumpstack.h15
-rw-r--r--arch/x86/kernel/dumpstack_64.c4
-rw-r--r--drivers/acpi/video.c9
-rw-r--r--drivers/ata/ahci.c66
-rw-r--r--drivers/base/class.c2
-rw-r--r--drivers/base/core.c6
-rw-r--r--drivers/base/cpu.c16
-rw-r--r--drivers/base/firmware_class.c2
-rw-r--r--drivers/base/memory.c15
-rw-r--r--drivers/base/node.c7
-rw-r--r--drivers/base/platform.c33
-rw-r--r--drivers/char/hpet.c4
-rw-r--r--drivers/char/hvc_console.c31
-rw-r--r--drivers/char/ipmi/ipmi_msghandler.c10
-rw-r--r--drivers/char/tty_buffer.c4
-rw-r--r--drivers/char/tty_port.c2
-rw-r--r--drivers/char/virtio_console.c15
-rw-r--r--drivers/char/vt_ioctl.c39
-rw-r--r--drivers/edac/edac_mce_amd.c7
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_backlight.c12
-rw-r--r--drivers/hid/hid-core.c1
-rw-r--r--drivers/hid/hid-debug.c6
-rw-r--r--drivers/hid/hid-ids.h1
-rw-r--r--drivers/hid/hid-magicmouse.c7
-rw-r--r--drivers/hid/hid-ntrig.c11
-rw-r--r--drivers/hid/hid-tmff.c2
-rw-r--r--drivers/hid/usbhid/hid-quirks.c1
-rw-r--r--drivers/infiniband/core/sysfs.c1
-rw-r--r--drivers/infiniband/ulp/iser/iscsi_iser.c2
-rw-r--r--drivers/isdn/hisax/avma1_cs.c12
-rw-r--r--drivers/isdn/hisax/elsa_cs.c12
-rw-r--r--drivers/isdn/hisax/sedlbauer_cs.c12
-rw-r--r--drivers/isdn/hisax/teles_cs.c12
-rw-r--r--drivers/leds/Kconfig80
-rw-r--r--drivers/leds/Makefile1
-rw-r--r--drivers/leds/dell-led.c200
-rw-r--r--drivers/leds/led-class.c42
-rw-r--r--drivers/leds/leds-gpio.c3
-rw-r--r--drivers/leds/leds-ss4200.c2
-rw-r--r--drivers/macintosh/via-pmu-backlight.c7
-rw-r--r--drivers/md/linear.c12
-rw-r--r--drivers/md/multipath.c20
-rw-r--r--drivers/md/raid0.c13
-rw-r--r--drivers/md/raid1.c28
-rw-r--r--drivers/md/raid10.c28
-rw-r--r--drivers/mtd/maps/omap_nor.c0
-rw-r--r--drivers/mtd/nand/Kconfig2
-rw-r--r--drivers/net/Kconfig25
-rw-r--r--drivers/net/Makefile1
-rw-r--r--drivers/net/atlx/atl1.c2
-rw-r--r--drivers/net/benet/be_cmds.c4
-rw-r--r--drivers/net/benet/be_ethtool.c2
-rw-r--r--drivers/net/benet/be_main.c21
-rw-r--r--drivers/net/bnx2.c14
-rw-r--r--drivers/net/bonding/bond_main.c66
-rw-r--r--drivers/net/can/bfin_can.c97
-rw-r--r--drivers/net/cxgb4/Makefile7
-rw-r--r--drivers/net/cxgb4/cxgb4.h741
-rw-r--r--drivers/net/cxgb4/cxgb4_main.c3388
-rw-r--r--drivers/net/cxgb4/cxgb4_uld.h239
-rw-r--r--drivers/net/cxgb4/l2t.c624
-rw-r--r--drivers/net/cxgb4/l2t.h110
-rw-r--r--drivers/net/cxgb4/sge.c2431
-rw-r--r--drivers/net/cxgb4/t4_hw.c3131
-rw-r--r--drivers/net/cxgb4/t4_hw.h100
-rw-r--r--drivers/net/cxgb4/t4_msg.h664
-rw-r--r--drivers/net/cxgb4/t4_regs.h878
-rw-r--r--drivers/net/cxgb4/t4fw_api.h1580
-rw-r--r--drivers/net/e1000/e1000.h1
-rw-r--r--drivers/net/e1000/e1000_main.c9
-rw-r--r--drivers/net/e1000e/e1000.h1
-rw-r--r--drivers/net/e1000e/netdev.c11
-rw-r--r--drivers/net/gianfar.c17
-rw-r--r--drivers/net/gianfar.h6
-rw-r--r--drivers/net/igb/e1000_mac.c6
-rw-r--r--drivers/net/igb/igb.h1
-rw-r--r--drivers/net/igb/igb_main.c22
-rw-r--r--drivers/net/igbvf/igbvf.h1
-rw-r--r--drivers/net/igbvf/netdev.c11
-rw-r--r--drivers/net/ixgbe/ixgbe.h7
-rw-r--r--drivers/net/ixgbe/ixgbe_ethtool.c21
-rw-r--r--drivers/net/ixgbe/ixgbe_fcoe.c33
-rw-r--r--drivers/net/ixgbe/ixgbe_main.c43
-rw-r--r--drivers/net/ixgbe/ixgbe_type.h1
-rw-r--r--drivers/net/ixgbevf/ixgbevf_main.c3
-rw-r--r--drivers/net/ksz884x.c2
-rw-r--r--drivers/net/mlx4/main.c1
-rw-r--r--drivers/net/netxen/netxen_nic.h4
-rw-r--r--drivers/net/netxen/netxen_nic_ctx.c14
-rw-r--r--drivers/net/netxen/netxen_nic_init.c2
-rw-r--r--drivers/net/netxen/netxen_nic_main.c49
-rw-r--r--drivers/net/pcmcia/pcnet_cs.c3
-rw-r--r--drivers/net/pcmcia/smc91c92_cs.c12
-rw-r--r--drivers/net/r8169.c54
-rw-r--r--drivers/net/sgiseeq.c4
-rw-r--r--drivers/net/stmmac/Kconfig1
-rw-r--r--drivers/net/tulip/uli526x.c8
-rw-r--r--drivers/net/via-velocity.c2
-rw-r--r--drivers/net/wireless/ath/ar9170/usb.c4
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-4965.c6
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-agn.c12
-rw-r--r--drivers/net/wireless/iwlwifi/iwl3945-base.c4
-rw-r--r--drivers/net/wireless/libertas/cfg.c8
-rw-r--r--drivers/net/wireless/libertas/dev.h1
-rw-r--r--drivers/net/wireless/mwl8k.c1
-rw-r--r--drivers/net/wireless/p54/p54usb.c1
-rw-r--r--drivers/net/wireless/rt2x00/rt2500usb.c5
-rw-r--r--drivers/net/wireless/rt2x00/rt2800lib.c4
-rw-r--r--drivers/pci/Makefile1
-rw-r--r--drivers/pci/pci-sysfs.c2
-rw-r--r--drivers/pcmcia/i82092.c1
-rw-r--r--drivers/pcmcia/i82365.h1
-rw-r--r--drivers/pcmcia/pcmcia_resource.c36
-rw-r--r--drivers/pcmcia/pd6729.c1
-rw-r--r--drivers/pcmcia/ti113x.h37
-rw-r--r--drivers/pcmcia/vrc4171_card.c5
-rw-r--r--drivers/pcmcia/yenta_socket.c46
-rw-r--r--drivers/platform/x86/acer-wmi.c7
-rw-r--r--drivers/platform/x86/asus-laptop.c7
-rw-r--r--drivers/platform/x86/asus_acpi.c7
-rw-r--r--drivers/platform/x86/classmate-laptop.c12
-rw-r--r--drivers/platform/x86/compal-laptop.c11
-rw-r--r--drivers/platform/x86/dell-laptop.c13
-rw-r--r--drivers/platform/x86/eeepc-laptop.c8
-rw-r--r--drivers/platform/x86/fujitsu-laptop.c14
-rw-r--r--drivers/platform/x86/msi-laptop.c7
-rw-r--r--drivers/platform/x86/msi-wmi.c15
-rw-r--r--drivers/platform/x86/panasonic-laptop.c28
-rw-r--r--drivers/platform/x86/sony-laptop.c8
-rw-r--r--drivers/platform/x86/thinkpad_acpi.c12
-rw-r--r--drivers/platform/x86/toshiba_acpi.c10
-rw-r--r--drivers/s390/char/sclp_cmd.c7
-rw-r--r--drivers/scsi/Kconfig6
-rw-r--r--drivers/scsi/be2iscsi/be_cmds.c7
-rw-r--r--drivers/scsi/be2iscsi/be_iscsi.c4
-rw-r--r--drivers/scsi/be2iscsi/be_main.c201
-rw-r--r--drivers/scsi/be2iscsi/be_main.h11
-rw-r--r--drivers/scsi/be2iscsi/be_mgmt.c14
-rw-r--r--drivers/scsi/be2iscsi/be_mgmt.h8
-rw-r--r--drivers/scsi/bfa/Makefile8
-rw-r--r--drivers/scsi/bfa/bfa_core.c19
-rw-r--r--drivers/scsi/bfa/bfa_fcport.c1709
-rw-r--r--drivers/scsi/bfa/bfa_fcs.c63
-rw-r--r--drivers/scsi/bfa/bfa_fcs_lport.c75
-rw-r--r--drivers/scsi/bfa/bfa_fcs_port.c11
-rw-r--r--drivers/scsi/bfa/bfa_fcs_uf.c8
-rw-r--r--drivers/scsi/bfa/bfa_hw_cb.c13
-rw-r--r--drivers/scsi/bfa/bfa_hw_ct.c9
-rw-r--r--drivers/scsi/bfa/bfa_intr.c111
-rw-r--r--drivers/scsi/bfa/bfa_ioc.c762
-rw-r--r--drivers/scsi/bfa/bfa_ioc.h57
-rw-r--r--drivers/scsi/bfa/bfa_ioc_cb.c274
-rw-r--r--drivers/scsi/bfa/bfa_ioc_ct.c423
-rw-r--r--drivers/scsi/bfa/bfa_iocfc.c24
-rw-r--r--drivers/scsi/bfa/bfa_iocfc.h3
-rw-r--r--drivers/scsi/bfa/bfa_ioim.c22
-rw-r--r--drivers/scsi/bfa/bfa_itnim.c30
-rw-r--r--drivers/scsi/bfa/bfa_lps.c134
-rw-r--r--drivers/scsi/bfa/bfa_module.c4
-rw-r--r--drivers/scsi/bfa/bfa_modules_priv.h2
-rw-r--r--drivers/scsi/bfa/bfa_port_priv.h57
-rw-r--r--drivers/scsi/bfa/bfa_priv.h2
-rw-r--r--drivers/scsi/bfa/bfa_rport.c26
-rw-r--r--drivers/scsi/bfa/bfa_trcmod_priv.h62
-rw-r--r--drivers/scsi/bfa/bfa_tskim.c14
-rw-r--r--drivers/scsi/bfa/bfad.c208
-rw-r--r--drivers/scsi/bfa/bfad_attr.c76
-rw-r--r--drivers/scsi/bfa/bfad_attr.h9
-rw-r--r--drivers/scsi/bfa/bfad_drv.h35
-rw-r--r--drivers/scsi/bfa/bfad_im.c53
-rw-r--r--drivers/scsi/bfa/bfad_im.h5
-rw-r--r--drivers/scsi/bfa/bfad_intr.c11
-rw-r--r--drivers/scsi/bfa/fabric.c59
-rw-r--r--drivers/scsi/bfa/fcbuild.h6
-rw-r--r--drivers/scsi/bfa/fcpim.c51
-rw-r--r--drivers/scsi/bfa/fcs_fabric.h2
-rw-r--r--drivers/scsi/bfa/fcs_fcpim.h5
-rw-r--r--drivers/scsi/bfa/fcs_lport.h7
-rw-r--r--drivers/scsi/bfa/fcs_port.h3
-rw-r--r--drivers/scsi/bfa/fcs_rport.h3
-rw-r--r--drivers/scsi/bfa/fcs_uf.h3
-rw-r--r--drivers/scsi/bfa/fcs_vport.h8
-rw-r--r--drivers/scsi/bfa/fdmi.c79
-rw-r--r--drivers/scsi/bfa/include/aen/bfa_aen.h50
-rw-r--r--drivers/scsi/bfa/include/bfa.h22
-rw-r--r--drivers/scsi/bfa/include/bfa_svc.h101
-rw-r--r--drivers/scsi/bfa/include/bfa_timer.h2
-rw-r--r--drivers/scsi/bfa/include/bfi/bfi.h4
-rw-r--r--drivers/scsi/bfa/include/bfi/bfi_cbreg.h16
-rw-r--r--drivers/scsi/bfa/include/bfi/bfi_ctreg.h26
-rw-r--r--drivers/scsi/bfa/include/bfi/bfi_ioc.h2
-rw-r--r--drivers/scsi/bfa/include/bfi/bfi_lps.h8
-rw-r--r--drivers/scsi/bfa/include/bfi/bfi_pport.h172
-rw-r--r--drivers/scsi/bfa/include/cna/bfa_cna_trcmod.h4
-rw-r--r--drivers/scsi/bfa/include/cs/bfa_log.h2
-rw-r--r--drivers/scsi/bfa/include/cs/bfa_plog.h9
-rw-r--r--drivers/scsi/bfa/include/cs/bfa_sm.h8
-rw-r--r--drivers/scsi/bfa/include/defs/bfa_defs_aen.h10
-rw-r--r--drivers/scsi/bfa/include/defs/bfa_defs_auth.h22
-rw-r--r--drivers/scsi/bfa/include/defs/bfa_defs_cee.h14
-rw-r--r--drivers/scsi/bfa/include/defs/bfa_defs_driver.h3
-rw-r--r--drivers/scsi/bfa/include/defs/bfa_defs_ethport.h1
-rw-r--r--drivers/scsi/bfa/include/defs/bfa_defs_fcport.h94
-rw-r--r--drivers/scsi/bfa/include/defs/bfa_defs_im_common.h32
-rw-r--r--drivers/scsi/bfa/include/defs/bfa_defs_im_team.h72
-rw-r--r--drivers/scsi/bfa/include/defs/bfa_defs_ioc.h3
-rw-r--r--drivers/scsi/bfa/include/defs/bfa_defs_iocfc.h12
-rw-r--r--drivers/scsi/bfa/include/defs/bfa_defs_lport.h4
-rw-r--r--drivers/scsi/bfa/include/defs/bfa_defs_mfg.h111
-rw-r--r--drivers/scsi/bfa/include/defs/bfa_defs_port.h19
-rw-r--r--drivers/scsi/bfa/include/defs/bfa_defs_pport.h151
-rw-r--r--drivers/scsi/bfa/include/defs/bfa_defs_status.h17
-rw-r--r--drivers/scsi/bfa/include/fcb/bfa_fcb_fcpim.h1
-rw-r--r--drivers/scsi/bfa/include/fcs/bfa_fcs.h5
-rw-r--r--drivers/scsi/bfa/include/fcs/bfa_fcs_lport.h8
-rw-r--r--drivers/scsi/bfa/include/log/bfa_log_hal.h6
-rw-r--r--drivers/scsi/bfa/include/log/bfa_log_linux.h16
-rw-r--r--drivers/scsi/bfa/include/protocol/fc.h5
-rw-r--r--drivers/scsi/bfa/include/protocol/pcifw.h75
-rw-r--r--drivers/scsi/bfa/loop.c2
-rw-r--r--drivers/scsi/bfa/lport_api.c5
-rw-r--r--drivers/scsi/bfa/ms.c29
-rw-r--r--drivers/scsi/bfa/ns.c36
-rw-r--r--drivers/scsi/bfa/rport.c91
-rw-r--r--drivers/scsi/bfa/rport_api.c2
-rw-r--r--drivers/scsi/bfa/rport_ftrs.c12
-rw-r--r--drivers/scsi/bfa/scn.c10
-rw-r--r--drivers/scsi/bfa/vport.c86
-rw-r--r--drivers/scsi/bnx2i/bnx2i_iscsi.c2
-rw-r--r--drivers/scsi/cxgb3i/cxgb3i_iscsi.c2
-rw-r--r--drivers/scsi/device_handler/scsi_dh_emc.c6
-rw-r--r--drivers/scsi/hpsa.c330
-rw-r--r--drivers/scsi/hpsa.h7
-rw-r--r--drivers/scsi/hpsa_cmd.h20
-rw-r--r--drivers/scsi/ibmvscsi/ibmvfc.c27
-rw-r--r--drivers/scsi/ibmvscsi/ibmvscsi.c19
-rw-r--r--drivers/scsi/ibmvscsi/ibmvscsi.h1
-rw-r--r--drivers/scsi/ibmvscsi/iseries_vscsi.c6
-rw-r--r--drivers/scsi/ibmvscsi/rpa_vscsi.c13
-rw-r--r--drivers/scsi/ipr.c1756
-rw-r--r--drivers/scsi/ipr.h467
-rw-r--r--drivers/scsi/iscsi_tcp.c2
-rw-r--r--drivers/scsi/libiscsi.c23
-rw-r--r--drivers/scsi/lpfc/lpfc.h10
-rw-r--r--drivers/scsi/lpfc/lpfc_attr.c7
-rw-r--r--drivers/scsi/lpfc/lpfc_bsg.c332
-rw-r--r--drivers/scsi/lpfc/lpfc_bsg.h12
-rw-r--r--drivers/scsi/lpfc/lpfc_crtn.h7
-rw-r--r--drivers/scsi/lpfc/lpfc_els.c142
-rw-r--r--drivers/scsi/lpfc/lpfc_hbadisc.c527
-rw-r--r--drivers/scsi/lpfc/lpfc_init.c277
-rw-r--r--drivers/scsi/lpfc/lpfc_logmsg.h1
-rw-r--r--drivers/scsi/lpfc/lpfc_mbox.c8
-rw-r--r--drivers/scsi/lpfc/lpfc_scsi.c49
-rw-r--r--drivers/scsi/lpfc/lpfc_sli.c413
-rw-r--r--drivers/scsi/lpfc/lpfc_sli.h1
-rw-r--r--drivers/scsi/lpfc/lpfc_sli4.h38
-rw-r--r--drivers/scsi/lpfc/lpfc_version.h2
-rw-r--r--drivers/scsi/lpfc/lpfc_vport.c7
-rw-r--r--drivers/scsi/osd/osd_initiator.c4
-rw-r--r--drivers/scsi/pcmcia/nsp_cs.c1
-rw-r--r--drivers/scsi/raid_class.c2
-rw-r--r--drivers/scsi/scsi_transport_fc.c24
-rw-r--r--drivers/scsi/sd.c4
-rw-r--r--drivers/serial/cpm_uart/cpm_uart_cpm2.c4
-rw-r--r--drivers/serial/serial_cs.c1
-rw-r--r--drivers/serial/sh-sci.c1
-rw-r--r--drivers/serial/sunsab.c2
-rw-r--r--drivers/serial/uartlite.c10
-rw-r--r--drivers/sh/intc.c31
-rw-r--r--drivers/spi/omap2_mcspi.c18
-rw-r--r--drivers/staging/samsung-laptop/samsung-laptop.c7
-rw-r--r--drivers/usb/class/cdc-acm.c2
-rw-r--r--drivers/usb/class/cdc-wdm.c134
-rw-r--r--drivers/usb/core/devio.c17
-rw-r--r--drivers/usb/core/urb.c1
-rw-r--r--drivers/usb/gadget/Kconfig2
-rw-r--r--drivers/usb/gadget/epautoconf.c2
-rw-r--r--drivers/usb/gadget/f_mass_storage.c3
-rw-r--r--drivers/usb/gadget/gadget_chips.h8
-rw-r--r--drivers/usb/gadget/goku_udc.c2
-rw-r--r--drivers/usb/gadget/multi.c2
-rw-r--r--drivers/usb/gadget/r8a66597-udc.c1
-rw-r--r--drivers/usb/host/Makefile4
-rw-r--r--drivers/usb/host/ehci-hcd.c2
-rw-r--r--drivers/usb/host/ehci-sched.c28
-rw-r--r--drivers/usb/host/ehci.h5
-rw-r--r--drivers/usb/host/r8a66597-hcd.c16
-rw-r--r--drivers/usb/host/xhci-mem.c9
-rw-r--r--drivers/usb/host/xhci.c (renamed from drivers/usb/host/xhci-hcd.c)1
-rw-r--r--drivers/usb/misc/appledisplay.c7
-rw-r--r--drivers/usb/musb/musb_core.c13
-rw-r--r--drivers/usb/musb/musb_core.h4
-rw-r--r--drivers/usb/musb/musb_host.c2
-rw-r--r--drivers/usb/musb/musb_regs.h28
-rw-r--r--drivers/usb/serial/Kconfig4
-rw-r--r--drivers/usb/serial/console.c1
-rw-r--r--drivers/usb/serial/cp210x.c5
-rw-r--r--drivers/usb/serial/ftdi_sio.c7
-rw-r--r--drivers/usb/serial/ftdi_sio_ids.h7
-rw-r--r--drivers/usb/serial/generic.c49
-rw-r--r--drivers/usb/serial/option.c53
-rw-r--r--drivers/usb/serial/qcserial.c29
-rw-r--r--drivers/usb/storage/unusual_devs.h23
-rw-r--r--drivers/uwb/hwa-rc.c2
-rw-r--r--drivers/uwb/i1480/dfu/usb.c12
-rw-r--r--drivers/uwb/wlp/messages.c106
-rw-r--r--drivers/video/Kconfig4
-rw-r--r--drivers/video/amba-clcd.c31
-rw-r--r--drivers/video/atmel_lcdfb.c8
-rw-r--r--drivers/video/aty/aty128fb.c7
-rw-r--r--drivers/video/aty/atyfb_base.c7
-rw-r--r--drivers/video/aty/radeon_backlight.c7
-rw-r--r--drivers/video/backlight/88pm860x_bl.c6
-rw-r--r--drivers/video/backlight/Kconfig7
-rw-r--r--drivers/video/backlight/Makefile1
-rw-r--r--drivers/video/backlight/adp5520_bl.c11
-rw-r--r--drivers/video/backlight/adx_bl.c10
-rw-r--r--drivers/video/backlight/atmel-pwm-bl.c8
-rw-r--r--drivers/video/backlight/backlight.c10
-rw-r--r--drivers/video/backlight/corgi_lcd.c8
-rw-r--r--drivers/video/backlight/cr_bllcd.c8
-rw-r--r--drivers/video/backlight/da903x_bl.c7
-rw-r--r--drivers/video/backlight/generic_bl.c8
-rw-r--r--drivers/video/backlight/hp680_bl.c8
-rw-r--r--drivers/video/backlight/jornada720_bl.c7
-rw-r--r--drivers/video/backlight/kb3886_bl.c8
-rw-r--r--drivers/video/backlight/l4f00242t03.c257
-rw-r--r--drivers/video/backlight/locomolcd.c8
-rw-r--r--drivers/video/backlight/max8925_bl.c6
-rw-r--r--drivers/video/backlight/mbp_nvidia_bl.c55
-rw-r--r--drivers/video/backlight/omap1_bl.c7
-rw-r--r--drivers/video/backlight/progear_bl.c23
-rw-r--r--drivers/video/backlight/pwm_bl.c8
-rw-r--r--drivers/video/backlight/tosa_bl.c8
-rw-r--r--drivers/video/backlight/wm831x_bl.c7
-rw-r--r--drivers/video/bf54x-lq043fb.c19
-rw-r--r--drivers/video/bfin-t350mcqb-fb.c19
-rw-r--r--drivers/video/nvidia/nv_backlight.c7
-rw-r--r--drivers/video/omap2/displays/panel-taal.c15
-rw-r--r--drivers/video/riva/fbdev.c7
-rw-r--r--fs/Kconfig1
-rw-r--r--fs/Makefile1
-rw-r--r--fs/afs/security.c5
-rw-r--r--fs/btrfs/btrfs_inode.h5
-rw-r--r--fs/btrfs/compression.c2
-rw-r--r--fs/btrfs/ctree.h13
-rw-r--r--fs/btrfs/disk-io.c15
-rw-r--r--fs/btrfs/export.c4
-rw-r--r--fs/btrfs/extent-tree.c11
-rw-r--r--fs/btrfs/extent_io.c79
-rw-r--r--fs/btrfs/extent_io.h10
-rw-r--r--fs/btrfs/file.c23
-rw-r--r--fs/btrfs/inode.c139
-rw-r--r--fs/btrfs/ioctl.c706
-rw-r--r--fs/btrfs/ioctl.h111
-rw-r--r--fs/btrfs/ordered-data.c41
-rw-r--r--fs/btrfs/ordered-data.h7
-rw-r--r--fs/btrfs/relocation.c4
-rw-r--r--fs/btrfs/super.c238
-rw-r--r--fs/btrfs/transaction.c5
-rw-r--r--fs/btrfs/tree-log.c2
-rw-r--r--fs/btrfs/volumes.c39
-rw-r--r--fs/ceph/Kconfig27
-rw-r--r--fs/ceph/Makefile39
-rw-r--r--fs/ceph/README20
-rw-r--r--fs/ceph/addr.c1188
-rw-r--r--fs/ceph/armor.c99
-rw-r--r--fs/ceph/auth.c257
-rw-r--r--fs/ceph/auth.h84
-rw-r--r--fs/ceph/auth_none.c121
-rw-r--r--fs/ceph/auth_none.h28
-rw-r--r--fs/ceph/auth_x.c656
-rw-r--r--fs/ceph/auth_x.h49
-rw-r--r--fs/ceph/auth_x_protocol.h90
-rw-r--r--fs/ceph/buffer.c78
-rw-r--r--fs/ceph/buffer.h39
-rw-r--r--fs/ceph/caps.c2927
-rw-r--r--fs/ceph/ceph_debug.h37
-rw-r--r--fs/ceph/ceph_frag.c21
-rw-r--r--fs/ceph/ceph_frag.h109
-rw-r--r--fs/ceph/ceph_fs.c74
-rw-r--r--fs/ceph/ceph_fs.h650
-rw-r--r--fs/ceph/ceph_hash.c118
-rw-r--r--fs/ceph/ceph_hash.h13
-rw-r--r--fs/ceph/ceph_strings.c176
-rw-r--r--fs/ceph/crush/crush.c151
-rw-r--r--fs/ceph/crush/crush.h180
-rw-r--r--fs/ceph/crush/hash.c149
-rw-r--r--fs/ceph/crush/hash.h17
-rw-r--r--fs/ceph/crush/mapper.c596
-rw-r--r--fs/ceph/crush/mapper.h20
-rw-r--r--fs/ceph/crypto.c408
-rw-r--r--fs/ceph/crypto.h48
-rw-r--r--fs/ceph/debugfs.c483
-rw-r--r--fs/ceph/decode.h194
-rw-r--r--fs/ceph/dir.c1220
-rw-r--r--fs/ceph/export.c223
-rw-r--r--fs/ceph/file.c937
-rw-r--r--fs/ceph/inode.c1750
-rw-r--r--fs/ceph/ioctl.c160
-rw-r--r--fs/ceph/ioctl.h40
-rw-r--r--fs/ceph/mds_client.c3021
-rw-r--r--fs/ceph/mds_client.h335
-rw-r--r--fs/ceph/mdsmap.c174
-rw-r--r--fs/ceph/mdsmap.h54
-rw-r--r--fs/ceph/messenger.c2240
-rw-r--r--fs/ceph/messenger.h254
-rw-r--r--fs/ceph/mon_client.c834
-rw-r--r--fs/ceph/mon_client.h119
-rw-r--r--fs/ceph/msgpool.c186
-rw-r--r--fs/ceph/msgpool.h27
-rw-r--r--fs/ceph/msgr.h158
-rw-r--r--fs/ceph/osd_client.c1537
-rw-r--r--fs/ceph/osd_client.h166
-rw-r--r--fs/ceph/osdmap.c1019
-rw-r--r--fs/ceph/osdmap.h125
-rw-r--r--fs/ceph/pagelist.c54
-rw-r--r--fs/ceph/pagelist.h54
-rw-r--r--fs/ceph/rados.h374
-rw-r--r--fs/ceph/snap.c904
-rw-r--r--fs/ceph/super.c1030
-rw-r--r--fs/ceph/super.h901
-rw-r--r--fs/ceph/types.h29
-rw-r--r--fs/ceph/xattr.c844
-rw-r--r--fs/cifs/cifsfs.c3
-rw-r--r--fs/cifs/cifsfs.h3
-rw-r--r--fs/cifs/cifsglob.h1
-rw-r--r--fs/cifs/cifsproto.h6
-rw-r--r--fs/cifs/cifssmb.c135
-rw-r--r--fs/cifs/dir.c2
-rw-r--r--fs/cifs/file.c7
-rw-r--r--fs/cifs/inode.c297
-rw-r--r--fs/jffs2/readinode.c2
-rw-r--r--fs/nfs/callback_xdr.c1
-rw-r--r--fs/nfs/delegation.h6
-rw-r--r--fs/nfs/dir.c2
-rw-r--r--fs/nfs/inode.c2
-rw-r--r--fs/nfs/nfs4proc.c1
-rw-r--r--fs/nfs/pagelist.c23
-rw-r--r--fs/nfs/super.c25
-rw-r--r--fs/ntfs/super.c25
-rw-r--r--fs/xfs/linux-2.6/xfs_aops.c13
-rw-r--r--fs/xfs/linux-2.6/xfs_buf.c81
-rw-r--r--include/linux/amba/clcd.h33
-rw-r--r--include/linux/backlight.h11
-rw-r--r--include/linux/ftrace_event.h23
-rw-r--r--include/linux/if_tunnel.h1
-rw-r--r--include/linux/memory.h2
-rw-r--r--include/linux/netfilter_ipv6.h1
-rw-r--r--include/linux/of.h2
-rw-r--r--include/linux/perf_event.h59
-rw-r--r--include/linux/serial_sci.h4
-rw-r--r--include/linux/skbuff.h6
-rw-r--r--include/linux/socket.h1
-rw-r--r--include/linux/spi/l4f00242t03.h31
-rw-r--r--include/linux/syscalls.h26
-rw-r--r--include/linux/tty.h10
-rw-r--r--include/linux/usb.h18
-rw-r--r--include/linux/vt.h3
-rw-r--r--include/net/bluetooth/bluetooth.h2
-rw-r--r--include/scsi/libiscsi.h3
-rw-r--r--include/trace/ftrace.h44
-rw-r--r--include/trace/syscall.h8
-rw-r--r--ipc/syscall.c2
-rw-r--r--kernel/kprobes.c3
-rw-r--r--kernel/lockdep.c9
-rw-r--r--kernel/perf_event.c84
-rw-r--r--kernel/trace/Makefile2
-rw-r--r--kernel/trace/trace_event_perf.c (renamed from kernel/trace/trace_event_profile.c)50
-rw-r--r--kernel/trace/trace_events.c2
-rw-r--r--kernel/trace/trace_kprobe.c29
-rw-r--r--kernel/trace/trace_syscalls.c72
-rw-r--r--mm/page_cgroup.c20
-rw-r--r--net/8021q/vlan.c2
-rw-r--r--net/8021q/vlan_dev.c71
-rw-r--r--net/bluetooth/hci_sysfs.c3
-rw-r--r--net/bluetooth/l2cap.c51
-rw-r--r--net/bluetooth/rfcomm/core.c41
-rw-r--r--net/bluetooth/rfcomm/sock.c41
-rw-r--r--net/bluetooth/sco.c41
-rw-r--r--net/can/bcm.c3
-rw-r--r--net/core/netpoll.c7
-rw-r--r--net/ieee802154/af_ieee802154.c3
-rw-r--r--net/ipv4/af_inet.c5
-rw-r--r--net/ipv4/devinet.c2
-rw-r--r--net/ipv4/fib_trie.c4
-rw-r--r--net/ipv4/ip_gre.c4
-rw-r--r--net/ipv4/ipmr.c11
-rw-r--r--net/ipv4/route.c24
-rw-r--r--net/ipv4/tcp.c1
-rw-r--r--net/ipv6/addrconf.c2
-rw-r--r--net/ipv6/ip6mr.c11
-rw-r--r--net/ipv6/netfilter/ip6table_raw.c2
-rw-r--r--net/ipv6/route.c13
-rw-r--r--net/key/af_key.c8
-rw-r--r--net/mac80211/mesh_hwmp.c4
-rw-r--r--net/mac80211/tx.c6
-rw-r--r--net/mac80211/util.c18
-rw-r--r--net/netfilter/xt_hashlimit.c4
-rw-r--r--net/netfilter/xt_recent.c2
-rw-r--r--net/netlabel/netlabel_domainhash.c28
-rw-r--r--net/netlabel/netlabel_unlabeled.c66
-rw-r--r--net/netlink/af_netlink.c3
-rw-r--r--net/rxrpc/ar-accept.c6
-rw-r--r--net/sched/Kconfig5
-rw-r--r--net/sched/cls_cgroup.c36
-rw-r--r--net/socket.c4
-rw-r--r--net/sunrpc/xprtsock.c8
-rw-r--r--net/wireless/reg.c12
-rw-r--r--sound/oss/sequencer.c2
-rw-r--r--sound/pci/hda/hda_codec.c4
-rw-r--r--sound/pci/hda/hda_intel.c9
-rw-r--r--sound/pci/hda/patch_realtek.c6
-rw-r--r--tools/perf/Documentation/Makefile4
-rw-r--r--tools/perf/Makefile6
-rw-r--r--tools/perf/builtin-annotate.c6
-rw-r--r--tools/perf/builtin-diff.c13
-rw-r--r--tools/perf/builtin-record.c13
-rw-r--r--tools/perf/builtin-report.c112
-rw-r--r--tools/perf/builtin-stat.c10
-rw-r--r--tools/perf/builtin-top.c9
-rw-r--r--tools/perf/util/cpumap.c59
-rw-r--r--tools/perf/util/cpumap.h7
-rw-r--r--tools/perf/util/event.h9
-rw-r--r--tools/perf/util/hist.c50
-rw-r--r--tools/perf/util/hist.h12
-rw-r--r--tools/perf/util/probe-finder.c2
-rw-r--r--tools/perf/util/session.c1
-rw-r--r--tools/perf/util/session.h1
-rw-r--r--tools/perf/util/thread.c41
-rw-r--r--tools/perf/util/thread.h3
780 files changed, 71698 insertions, 8445 deletions
diff --git a/Documentation/ABI/testing/sysfs-bus-usb b/Documentation/ABI/testing/sysfs-bus-usb
index a986e9bbba3d..bcebb9eaedce 100644
--- a/Documentation/ABI/testing/sysfs-bus-usb
+++ b/Documentation/ABI/testing/sysfs-bus-usb
@@ -160,7 +160,7 @@ Description:
160 match the driver to the device. For example: 160 match the driver to the device. For example:
161 # echo "046d c315" > /sys/bus/usb/drivers/foo/remove_id 161 # echo "046d c315" > /sys/bus/usb/drivers/foo/remove_id
162 162
163What: /sys/bus/usb/device/.../avoid_reset 163What: /sys/bus/usb/device/.../avoid_reset_quirk
164Date: December 2009 164Date: December 2009
165Contact: Oliver Neukum <oliver@neukum.org> 165Contact: Oliver Neukum <oliver@neukum.org>
166Description: 166Description:
diff --git a/Documentation/filesystems/ceph.txt b/Documentation/filesystems/ceph.txt
new file mode 100644
index 000000000000..6e03917316bd
--- /dev/null
+++ b/Documentation/filesystems/ceph.txt
@@ -0,0 +1,139 @@
1Ceph Distributed File System
2============================
3
4Ceph is a distributed network file system designed to provide good
5performance, reliability, and scalability.
6
7Basic features include:
8
9 * POSIX semantics
10 * Seamless scaling from 1 to many thousands of nodes
11 * High availability and reliability. No single points of failure.
12 * N-way replication of data across storage nodes
13 * Fast recovery from node failures
14 * Automatic rebalancing of data on node addition/removal
15 * Easy deployment: most FS components are userspace daemons
16
17Also,
18 * Flexible snapshots (on any directory)
19 * Recursive accounting (nested files, directories, bytes)
20
21In contrast to cluster filesystems like GFS, OCFS2, and GPFS that rely
22on symmetric access by all clients to shared block devices, Ceph
23separates data and metadata management into independent server
24clusters, similar to Lustre. Unlike Lustre, however, metadata and
25storage nodes run entirely as user space daemons. Storage nodes
26utilize btrfs to store data objects, leveraging its advanced features
27(checksumming, metadata replication, etc.). File data is striped
28across storage nodes in large chunks to distribute workload and
29facilitate high throughputs. When storage nodes fail, data is
30re-replicated in a distributed fashion by the storage nodes themselves
31(with some minimal coordination from a cluster monitor), making the
32system extremely efficient and scalable.
33
34Metadata servers effectively form a large, consistent, distributed
35in-memory cache above the file namespace that is extremely scalable,
36dynamically redistributes metadata in response to workload changes,
37and can tolerate arbitrary (well, non-Byzantine) node failures. The
38metadata server takes a somewhat unconventional approach to metadata
39storage to significantly improve performance for common workloads. In
40particular, inodes with only a single link are embedded in
41directories, allowing entire directories of dentries and inodes to be
42loaded into its cache with a single I/O operation. The contents of
43extremely large directories can be fragmented and managed by
44independent metadata servers, allowing scalable concurrent access.
45
46The system offers automatic data rebalancing/migration when scaling
47from a small cluster of just a few nodes to many hundreds, without
48requiring an administrator carve the data set into static volumes or
49go through the tedious process of migrating data between servers.
50When the file system approaches full, new nodes can be easily added
51and things will "just work."
52
53Ceph includes flexible snapshot mechanism that allows a user to create
54a snapshot on any subdirectory (and its nested contents) in the
55system. Snapshot creation and deletion are as simple as 'mkdir
56.snap/foo' and 'rmdir .snap/foo'.
57
58Ceph also provides some recursive accounting on directories for nested
59files and bytes. That is, a 'getfattr -d foo' on any directory in the
60system will reveal the total number of nested regular files and
61subdirectories, and a summation of all nested file sizes. This makes
62the identification of large disk space consumers relatively quick, as
63no 'du' or similar recursive scan of the file system is required.
64
65
66Mount Syntax
67============
68
69The basic mount syntax is:
70
71 # mount -t ceph monip[:port][,monip2[:port]...]:/[subdir] mnt
72
73You only need to specify a single monitor, as the client will get the
74full list when it connects. (However, if the monitor you specify
75happens to be down, the mount won't succeed.) The port can be left
76off if the monitor is using the default. So if the monitor is at
771.2.3.4,
78
79 # mount -t ceph 1.2.3.4:/ /mnt/ceph
80
81is sufficient. If /sbin/mount.ceph is installed, a hostname can be
82used instead of an IP address.
83
84
85
86Mount Options
87=============
88
89 ip=A.B.C.D[:N]
90 Specify the IP and/or port the client should bind to locally.
91 There is normally not much reason to do this. If the IP is not
92 specified, the client's IP address is determined by looking at the
93 address it's connection to the monitor originates from.
94
95 wsize=X
96 Specify the maximum write size in bytes. By default there is no
97 maximu. Ceph will normally size writes based on the file stripe
98 size.
99
100 rsize=X
101 Specify the maximum readahead.
102
103 mount_timeout=X
104 Specify the timeout value for mount (in seconds), in the case
105 of a non-responsive Ceph file system. The default is 30
106 seconds.
107
108 rbytes
109 When stat() is called on a directory, set st_size to 'rbytes',
110 the summation of file sizes over all files nested beneath that
111 directory. This is the default.
112
113 norbytes
114 When stat() is called on a directory, set st_size to the
115 number of entries in that directory.
116
117 nocrc
118 Disable CRC32C calculation for data writes. If set, the OSD
119 must rely on TCP's error correction to detect data corruption
120 in the data payload.
121
122 noasyncreaddir
123 Disable client's use its local cache to satisfy readdir
124 requests. (This does not change correctness; the client uses
125 cached metadata only when a lease or capability ensures it is
126 valid.)
127
128
129More Information
130================
131
132For more information on Ceph, see the home page at
133 http://ceph.newdream.net/
134
135The Linux kernel client source tree is available at
136 git://ceph.newdream.net/linux-ceph-client.git
137
138and the source for the full system is at
139 git://ceph.newdream.net/ceph.git
diff --git a/Documentation/ioctl/ioctl-number.txt b/Documentation/ioctl/ioctl-number.txt
index 35c9b51d20ea..dd5806f4fcc4 100644
--- a/Documentation/ioctl/ioctl-number.txt
+++ b/Documentation/ioctl/ioctl-number.txt
@@ -291,6 +291,7 @@ Code Seq#(hex) Include File Comments
2910x92 00-0F drivers/usb/mon/mon_bin.c 2910x92 00-0F drivers/usb/mon/mon_bin.c
2920x93 60-7F linux/auto_fs.h 2920x93 60-7F linux/auto_fs.h
2930x94 all fs/btrfs/ioctl.h 2930x94 all fs/btrfs/ioctl.h
2940x97 00-7F fs/ceph/ioctl.h Ceph file system
2940x99 00-0F 537-Addinboard driver 2950x99 00-0F 537-Addinboard driver
295 <mailto:buk@buks.ipn.de> 296 <mailto:buk@buks.ipn.de>
2960xA0 all linux/sdp/sdp.h Industrial Device Project 2970xA0 all linux/sdp/sdp.h Industrial Device Project
diff --git a/Documentation/kobject.txt b/Documentation/kobject.txt
index bdb13817e1e9..3ab2472509cb 100644
--- a/Documentation/kobject.txt
+++ b/Documentation/kobject.txt
@@ -59,37 +59,56 @@ nice to have in other objects. The C language does not allow for the
59direct expression of inheritance, so other techniques - such as structure 59direct expression of inheritance, so other techniques - such as structure
60embedding - must be used. 60embedding - must be used.
61 61
62So, for example, the UIO code has a structure that defines the memory 62(As an aside, for those familiar with the kernel linked list implementation,
63region associated with a uio device: 63this is analogous as to how "list_head" structs are rarely useful on
64their own, but are invariably found embedded in the larger objects of
65interest.)
64 66
65struct uio_mem { 67So, for example, the UIO code in drivers/uio/uio.c has a structure that
68defines the memory region associated with a uio device:
69
70 struct uio_map {
66 struct kobject kobj; 71 struct kobject kobj;
67 unsigned long addr; 72 struct uio_mem *mem;
68 unsigned long size; 73 };
69 int memtype;
70 void __iomem *internal_addr;
71};
72 74
73If you have a struct uio_mem structure, finding its embedded kobject is 75If you have a struct uio_map structure, finding its embedded kobject is
74just a matter of using the kobj member. Code that works with kobjects will 76just a matter of using the kobj member. Code that works with kobjects will
75often have the opposite problem, however: given a struct kobject pointer, 77often have the opposite problem, however: given a struct kobject pointer,
76what is the pointer to the containing structure? You must avoid tricks 78what is the pointer to the containing structure? You must avoid tricks
77(such as assuming that the kobject is at the beginning of the structure) 79(such as assuming that the kobject is at the beginning of the structure)
78and, instead, use the container_of() macro, found in <linux/kernel.h>: 80and, instead, use the container_of() macro, found in <linux/kernel.h>:
79 81
80 container_of(pointer, type, member) 82 container_of(pointer, type, member)
83
84where:
85
86 * "pointer" is the pointer to the embedded kobject,
87 * "type" is the type of the containing structure, and
88 * "member" is the name of the structure field to which "pointer" points.
89
90The return value from container_of() is a pointer to the corresponding
91container type. So, for example, a pointer "kp" to a struct kobject
92embedded *within* a struct uio_map could be converted to a pointer to the
93*containing* uio_map structure with:
94
95 struct uio_map *u_map = container_of(kp, struct uio_map, kobj);
96
97For convenience, programmers often define a simple macro for "back-casting"
98kobject pointers to the containing type. Exactly this happens in the
99earlier drivers/uio/uio.c, as you can see here:
100
101 struct uio_map {
102 struct kobject kobj;
103 struct uio_mem *mem;
104 };
81 105
82where pointer is the pointer to the embedded kobject, type is the type of 106 #define to_map(map) container_of(map, struct uio_map, kobj)
83the containing structure, and member is the name of the structure field to
84which pointer points. The return value from container_of() is a pointer to
85the given type. So, for example, a pointer "kp" to a struct kobject
86embedded within a struct uio_mem could be converted to a pointer to the
87containing uio_mem structure with:
88 107
89 struct uio_mem *u_mem = container_of(kp, struct uio_mem, kobj); 108where the macro argument "map" is a pointer to the struct kobject in
109question. That macro is subsequently invoked with:
90 110
91Programmers often define a simple macro for "back-casting" kobject pointers 111 struct uio_map *map = to_map(kobj);
92to the containing type.
93 112
94 113
95Initialization of kobjects 114Initialization of kobjects
@@ -387,4 +406,5 @@ called, and the objects in the former circle release each other.
387Example code to copy from 406Example code to copy from
388 407
389For a more complete example of using ksets and kobjects properly, see the 408For a more complete example of using ksets and kobjects properly, see the
390sample/kobject/kset-example.c code. 409example programs samples/kobject/{kobject-example.c,kset-example.c},
410which will be built as loadable modules if you select CONFIG_SAMPLE_KOBJECT.
diff --git a/Documentation/networking/stmmac.txt b/Documentation/networking/stmmac.txt
new file mode 100644
index 000000000000..7ee770b5ef5f
--- /dev/null
+++ b/Documentation/networking/stmmac.txt
@@ -0,0 +1,143 @@
1 STMicroelectronics 10/100/1000 Synopsys Ethernet driver
2
3Copyright (C) 2007-2010 STMicroelectronics Ltd
4Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
5
6This is the driver for the MAC 10/100/1000 on-chip Ethernet controllers
7(Synopsys IP blocks); it has been fully tested on STLinux platforms.
8
9Currently this network device driver is for all STM embedded MAC/GMAC
10(7xxx SoCs).
11
12DWC Ether MAC 10/100/1000 Universal version 3.41a and DWC Ether MAC 10/100
13Universal version 4.0 have been used for developing the first code
14implementation.
15
16Please, for more information also visit: www.stlinux.com
17
181) Kernel Configuration
19The kernel configuration option is STMMAC_ETH:
20 Device Drivers ---> Network device support ---> Ethernet (1000 Mbit) --->
21 STMicroelectronics 10/100/1000 Ethernet driver (STMMAC_ETH)
22
232) Driver parameters list:
24 debug: message level (0: no output, 16: all);
25 phyaddr: to manually provide the physical address to the PHY device;
26 dma_rxsize: DMA rx ring size;
27 dma_txsize: DMA tx ring size;
28 buf_sz: DMA buffer size;
29 tc: control the HW FIFO threshold;
30 tx_coe: Enable/Disable Tx Checksum Offload engine;
31 watchdog: transmit timeout (in milliseconds);
32 flow_ctrl: Flow control ability [on/off];
33 pause: Flow Control Pause Time;
34 tmrate: timer period (only if timer optimisation is configured).
35
363) Command line options
37Driver parameters can be also passed in command line by using:
38 stmmaceth=dma_rxsize:128,dma_txsize:512
39
404) Driver information and notes
41
424.1) Transmit process
43The xmit method is invoked when the kernel needs to transmit a packet; it sets
44the descriptors in the ring and informs the DMA engine that there is a packet
45ready to be transmitted.
46Once the controller has finished transmitting the packet, an interrupt is
47triggered; So the driver will be able to release the socket buffers.
48By default, the driver sets the NETIF_F_SG bit in the features field of the
49net_device structure enabling the scatter/gather feature.
50
514.2) Receive process
52When one or more packets are received, an interrupt happens. The interrupts
53are not queued so the driver has to scan all the descriptors in the ring during
54the receive process.
55This is based on NAPI so the interrupt handler signals only if there is work to be
56done, and it exits.
57Then the poll method will be scheduled at some future point.
58The incoming packets are stored, by the DMA, in a list of pre-allocated socket
59buffers in order to avoid the memcpy (Zero-copy).
60
614.3) Timer-Driver Interrupt
62Instead of having the device that asynchronously notifies the frame receptions, the
63driver configures a timer to generate an interrupt at regular intervals.
64Based on the granularity of the timer, the frames that are received by the device
65will experience different levels of latency. Some NICs have dedicated timer
66device to perform this task. STMMAC can use either the RTC device or the TMU
67channel 2 on STLinux platforms.
68The timers frequency can be passed to the driver as parameter; when change it,
69take care of both hardware capability and network stability/performance impact.
70Several performance tests on STM platforms showed this optimisation allows to spare
71the CPU while having the maximum throughput.
72
734.4) WOL
74Wake up on Lan feature through Magic Frame is only supported for the GMAC
75core.
76
774.5) DMA descriptors
78Driver handles both normal and enhanced descriptors. The latter has been only
79tested on DWC Ether MAC 10/100/1000 Universal version 3.41a.
80
814.6) Ethtool support
82Ethtool is supported. Driver statistics and internal errors can be taken using:
83ethtool -S ethX command. It is possible to dump registers etc.
84
854.7) Jumbo and Segmentation Offloading
86Jumbo frames are supported and tested for the GMAC.
87The GSO has been also added but it's performed in software.
88LRO is not supported.
89
904.8) Physical
91The driver is compatible with PAL to work with PHY and GPHY devices.
92
934.9) Platform information
94Several information came from the platform; please refer to the
95driver's Header file in include/linux directory.
96
97struct plat_stmmacenet_data {
98 int bus_id;
99 int pbl;
100 int has_gmac;
101 void (*fix_mac_speed)(void *priv, unsigned int speed);
102 void (*bus_setup)(unsigned long ioaddr);
103#ifdef CONFIG_STM_DRIVERS
104 struct stm_pad_config *pad_config;
105#endif
106 void *bsp_priv;
107};
108
109Where:
110- pbl (Programmable Burst Length) is maximum number of
111 beats to be transferred in one DMA transaction.
112 GMAC also enables the 4xPBL by default.
113- fix_mac_speed and bus_setup are used to configure internal target
114 registers (on STM platforms);
115- has_gmac: GMAC core is on board (get it at run-time in the next step);
116- bus_id: bus identifier.
117
118struct plat_stmmacphy_data {
119 int bus_id;
120 int phy_addr;
121 unsigned int phy_mask;
122 int interface;
123 int (*phy_reset)(void *priv);
124 void *priv;
125};
126
127Where:
128- bus_id: bus identifier;
129- phy_addr: physical address used for the attached phy device;
130 set it to -1 to get it at run-time;
131- interface: physical MII interface mode;
132- phy_reset: hook to reset HW function.
133
134TODO:
135- Continue to make the driver more generic and suitable for other Synopsys
136 Ethernet controllers used on other architectures (i.e. ARM).
137- 10G controllers are not supported.
138- MAC uses Normal descriptors and GMAC uses enhanced ones.
139 This is a limit that should be reviewed. MAC could want to
140 use the enhanced structure.
141- Checksumming: Rx/Tx csum is done in HW in case of GMAC only.
142- Review the timer optimisation code to use an embedded device that seems to be
143 available in new chip generations.
diff --git a/MAINTAINERS b/MAINTAINERS
index 704d3d6da1b1..2c569f506498 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -1441,6 +1441,15 @@ F: arch/powerpc/include/asm/spu*.h
1441F: arch/powerpc/oprofile/*cell* 1441F: arch/powerpc/oprofile/*cell*
1442F: arch/powerpc/platforms/cell/ 1442F: arch/powerpc/platforms/cell/
1443 1443
1444CEPH DISTRIBUTED FILE SYSTEM CLIENT
1445M: Sage Weil <sage@newdream.net>
1446L: ceph-devel@lists.sourceforge.net
1447W: http://ceph.newdream.net/
1448T: git git://git.kernel.org/pub/scm/linux/kernel/git/sage/ceph-client.git
1449S: Supported
1450F: Documentation/filesystems/ceph.txt
1451F: fs/ceph
1452
1444CERTIFIED WIRELESS USB (WUSB) SUBSYSTEM: 1453CERTIFIED WIRELESS USB (WUSB) SUBSYSTEM:
1445M: David Vrabel <david.vrabel@csr.com> 1454M: David Vrabel <david.vrabel@csr.com>
1446L: linux-usb@vger.kernel.org 1455L: linux-usb@vger.kernel.org
@@ -3074,6 +3083,7 @@ F: include/scsi/*iscsi*
3074ISDN SUBSYSTEM 3083ISDN SUBSYSTEM
3075M: Karsten Keil <isdn@linux-pingi.de> 3084M: Karsten Keil <isdn@linux-pingi.de>
3076L: isdn4linux@listserv.isdn4linux.de (subscribers-only) 3085L: isdn4linux@listserv.isdn4linux.de (subscribers-only)
3086L: netdev@vger.kernel.org
3077W: http://www.isdn4linux.de 3087W: http://www.isdn4linux.de
3078T: git git://git.kernel.org/pub/scm/linux/kernel/git/kkeil/isdn-2.6.git 3088T: git git://git.kernel.org/pub/scm/linux/kernel/git/kkeil/isdn-2.6.git
3079S: Maintained 3089S: Maintained
@@ -5213,6 +5223,21 @@ T: git git://git.kernel.org/pub/scm/linux/kernel/git/davem/sparc-next-2.6.git
5213S: Maintained 5223S: Maintained
5214F: arch/sparc/ 5224F: arch/sparc/
5215 5225
5226SPARC SERIAL DRIVERS
5227M: "David S. Miller" <davem@davemloft.net>
5228L: sparclinux@vger.kernel.org
5229T: git git://git.kernel.org/pub/scm/linux/kernel/git/davem/sparc-2.6.git
5230T: git git://git.kernel.org/pub/scm/linux/kernel/git/davem/sparc-next-2.6.git
5231S: Maintained
5232F: drivers/serial/suncore.c
5233F: drivers/serial/suncore.h
5234F: drivers/serial/sunhv.c
5235F: drivers/serial/sunsab.c
5236F: drivers/serial/sunsab.h
5237F: drivers/serial/sunsu.c
5238F: drivers/serial/sunzilog.c
5239F: drivers/serial/sunzilog.h
5240
5216SPECIALIX IO8+ MULTIPORT SERIAL CARD DRIVER 5241SPECIALIX IO8+ MULTIPORT SERIAL CARD DRIVER
5217M: Roger Wolff <R.E.Wolff@BitWizard.nl> 5242M: Roger Wolff <R.E.Wolff@BitWizard.nl>
5218S: Supported 5243S: Supported
@@ -5398,7 +5423,6 @@ S: Maintained
5398F: sound/soc/codecs/twl4030* 5423F: sound/soc/codecs/twl4030*
5399 5424
5400TIPC NETWORK LAYER 5425TIPC NETWORK LAYER
5401M: Per Liden <per.liden@ericsson.com>
5402M: Jon Maloy <jon.maloy@ericsson.com> 5426M: Jon Maloy <jon.maloy@ericsson.com>
5403M: Allan Stephens <allan.stephens@windriver.com> 5427M: Allan Stephens <allan.stephens@windriver.com>
5404L: tipc-discussion@lists.sourceforge.net 5428L: tipc-discussion@lists.sourceforge.net
diff --git a/Makefile b/Makefile
index 08ff02da7ce3..a5ba759e0fd5 100644
--- a/Makefile
+++ b/Makefile
@@ -1,7 +1,7 @@
1VERSION = 2 1VERSION = 2
2PATCHLEVEL = 6 2PATCHLEVEL = 6
3SUBLEVEL = 34 3SUBLEVEL = 34
4EXTRAVERSION = -rc1 4EXTRAVERSION = -rc2
5NAME = Man-Eating Seals of Antiquity 5NAME = Man-Eating Seals of Antiquity
6 6
7# *DOCUMENTATION* 7# *DOCUMENTATION*
diff --git a/arch/alpha/include/asm/core_marvel.h b/arch/alpha/include/asm/core_marvel.h
index 30d55fe7aaf6..dad300fa14ce 100644
--- a/arch/alpha/include/asm/core_marvel.h
+++ b/arch/alpha/include/asm/core_marvel.h
@@ -12,7 +12,6 @@
12#define __ALPHA_MARVEL__H__ 12#define __ALPHA_MARVEL__H__
13 13
14#include <linux/types.h> 14#include <linux/types.h>
15#include <linux/pci.h>
16#include <linux/spinlock.h> 15#include <linux/spinlock.h>
17 16
18#include <asm/compiler.h> 17#include <asm/compiler.h>
diff --git a/arch/alpha/include/asm/core_mcpcia.h b/arch/alpha/include/asm/core_mcpcia.h
index acf55b483472..21ac53383b37 100644
--- a/arch/alpha/include/asm/core_mcpcia.h
+++ b/arch/alpha/include/asm/core_mcpcia.h
@@ -6,7 +6,6 @@
6#define MCPCIA_ONE_HAE_WINDOW 1 6#define MCPCIA_ONE_HAE_WINDOW 1
7 7
8#include <linux/types.h> 8#include <linux/types.h>
9#include <linux/pci.h>
10#include <asm/compiler.h> 9#include <asm/compiler.h>
11 10
12/* 11/*
diff --git a/arch/alpha/include/asm/core_titan.h b/arch/alpha/include/asm/core_titan.h
index a17f6f33b68e..8cf79d1219e1 100644
--- a/arch/alpha/include/asm/core_titan.h
+++ b/arch/alpha/include/asm/core_titan.h
@@ -2,7 +2,6 @@
2#define __ALPHA_TITAN__H__ 2#define __ALPHA_TITAN__H__
3 3
4#include <linux/types.h> 4#include <linux/types.h>
5#include <linux/pci.h>
6#include <asm/compiler.h> 5#include <asm/compiler.h>
7 6
8/* 7/*
diff --git a/arch/alpha/include/asm/core_tsunami.h b/arch/alpha/include/asm/core_tsunami.h
index 58d4fe48742c..8e39ecf09419 100644
--- a/arch/alpha/include/asm/core_tsunami.h
+++ b/arch/alpha/include/asm/core_tsunami.h
@@ -2,7 +2,6 @@
2#define __ALPHA_TSUNAMI__H__ 2#define __ALPHA_TSUNAMI__H__
3 3
4#include <linux/types.h> 4#include <linux/types.h>
5#include <linux/pci.h>
6#include <asm/compiler.h> 5#include <asm/compiler.h>
7 6
8/* 7/*
diff --git a/arch/alpha/kernel/sys_dp264.c b/arch/alpha/kernel/sys_dp264.c
index d64e1e497e76..4026502ab707 100644
--- a/arch/alpha/kernel/sys_dp264.c
+++ b/arch/alpha/kernel/sys_dp264.c
@@ -224,7 +224,7 @@ static void
224dp264_device_interrupt(unsigned long vector) 224dp264_device_interrupt(unsigned long vector)
225{ 225{
226#if 1 226#if 1
227 printk("dp264_device_interrupt: NOT IMPLEMENTED YET!! \n"); 227 printk("dp264_device_interrupt: NOT IMPLEMENTED YET!!\n");
228#else 228#else
229 unsigned long pld; 229 unsigned long pld;
230 unsigned int i; 230 unsigned int i;
diff --git a/arch/alpha/kernel/sys_titan.c b/arch/alpha/kernel/sys_titan.c
index 288053342c83..9008d0f20c53 100644
--- a/arch/alpha/kernel/sys_titan.c
+++ b/arch/alpha/kernel/sys_titan.c
@@ -171,7 +171,7 @@ titan_set_irq_affinity(unsigned int irq, const struct cpumask *affinity)
171static void 171static void
172titan_device_interrupt(unsigned long vector) 172titan_device_interrupt(unsigned long vector)
173{ 173{
174 printk("titan_device_interrupt: NOT IMPLEMENTED YET!! \n"); 174 printk("titan_device_interrupt: NOT IMPLEMENTED YET!!\n");
175} 175}
176 176
177static void 177static void
diff --git a/arch/alpha/kernel/traps.c b/arch/alpha/kernel/traps.c
index 6ee7655b7568..b14f015008ad 100644
--- a/arch/alpha/kernel/traps.c
+++ b/arch/alpha/kernel/traps.c
@@ -17,6 +17,7 @@
17#include <linux/module.h> 17#include <linux/module.h>
18#include <linux/init.h> 18#include <linux/init.h>
19#include <linux/kallsyms.h> 19#include <linux/kallsyms.h>
20#include <linux/ratelimit.h>
20 21
21#include <asm/gentrap.h> 22#include <asm/gentrap.h>
22#include <asm/uaccess.h> 23#include <asm/uaccess.h>
@@ -771,8 +772,7 @@ asmlinkage void
771do_entUnaUser(void __user * va, unsigned long opcode, 772do_entUnaUser(void __user * va, unsigned long opcode,
772 unsigned long reg, struct pt_regs *regs) 773 unsigned long reg, struct pt_regs *regs)
773{ 774{
774 static int cnt = 0; 775 static DEFINE_RATELIMIT_STATE(ratelimit, 5 * HZ, 5);
775 static unsigned long last_time;
776 776
777 unsigned long tmp1, tmp2, tmp3, tmp4; 777 unsigned long tmp1, tmp2, tmp3, tmp4;
778 unsigned long fake_reg, *reg_addr = &fake_reg; 778 unsigned long fake_reg, *reg_addr = &fake_reg;
@@ -783,15 +783,11 @@ do_entUnaUser(void __user * va, unsigned long opcode,
783 with the unaliged access. */ 783 with the unaliged access. */
784 784
785 if (!test_thread_flag (TIF_UAC_NOPRINT)) { 785 if (!test_thread_flag (TIF_UAC_NOPRINT)) {
786 if (cnt >= 5 && time_after(jiffies, last_time + 5 * HZ)) { 786 if (__ratelimit(&ratelimit)) {
787 cnt = 0;
788 }
789 if (++cnt < 5) {
790 printk("%s(%d): unaligned trap at %016lx: %p %lx %ld\n", 787 printk("%s(%d): unaligned trap at %016lx: %p %lx %ld\n",
791 current->comm, task_pid_nr(current), 788 current->comm, task_pid_nr(current),
792 regs->pc - 4, va, opcode, reg); 789 regs->pc - 4, va, opcode, reg);
793 } 790 }
794 last_time = jiffies;
795 } 791 }
796 if (test_thread_flag (TIF_UAC_SIGBUS)) 792 if (test_thread_flag (TIF_UAC_SIGBUS))
797 goto give_sigbus; 793 goto give_sigbus;
diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig
index cadfe2ee66a5..c5408bf1bf43 100644
--- a/arch/arm/Kconfig
+++ b/arch/arm/Kconfig
@@ -218,6 +218,10 @@ config MMU
218 Select if you want MMU-based virtualised addressing space 218 Select if you want MMU-based virtualised addressing space
219 support by paged memory management. If unsure, say 'Y'. 219 support by paged memory management. If unsure, say 'Y'.
220 220
221#
222# The "ARM system type" choice list is ordered alphabetically by option
223# text. Please add new entries in the option alphabetic order.
224#
221choice 225choice
222 prompt "ARM system type" 226 prompt "ARM system type"
223 default ARCH_VERSATILE 227 default ARCH_VERSATILE
@@ -274,6 +278,18 @@ config ARCH_AT91
274 This enables support for systems based on the Atmel AT91RM9200, 278 This enables support for systems based on the Atmel AT91RM9200,
275 AT91SAM9 and AT91CAP9 processors. 279 AT91SAM9 and AT91CAP9 processors.
276 280
281config ARCH_BCMRING
282 bool "Broadcom BCMRING"
283 depends on MMU
284 select CPU_V6
285 select ARM_AMBA
286 select COMMON_CLKDEV
287 select GENERIC_TIME
288 select GENERIC_CLOCKEVENTS
289 select ARCH_WANT_OPTIONAL_GPIOLIB
290 help
291 Support for Broadcom's BCMRing platform.
292
277config ARCH_CLPS711X 293config ARCH_CLPS711X
278 bool "Cirrus Logic CLPS711x/EP721x-based" 294 bool "Cirrus Logic CLPS711x/EP721x-based"
279 select CPU_ARM720T 295 select CPU_ARM720T
@@ -359,20 +375,6 @@ config ARCH_H720X
359 help 375 help
360 This enables support for systems based on the Hynix HMS720x 376 This enables support for systems based on the Hynix HMS720x
361 377
362config ARCH_NOMADIK
363 bool "STMicroelectronics Nomadik"
364 select ARM_AMBA
365 select ARM_VIC
366 select CPU_ARM926T
367 select HAVE_CLK
368 select COMMON_CLKDEV
369 select GENERIC_TIME
370 select GENERIC_CLOCKEVENTS
371 select GENERIC_GPIO
372 select ARCH_REQUIRE_GPIOLIB
373 help
374 Support for the Nomadik platform by ST-Ericsson
375
376config ARCH_IOP13XX 378config ARCH_IOP13XX
377 bool "IOP13xx-based" 379 bool "IOP13xx-based"
378 depends on MMU 380 depends on MMU
@@ -747,6 +749,30 @@ config ARCH_U300
747 help 749 help
748 Support for ST-Ericsson U300 series mobile platforms. 750 Support for ST-Ericsson U300 series mobile platforms.
749 751
752config ARCH_U8500
753 bool "ST-Ericsson U8500 Series"
754 select CPU_V7
755 select ARM_AMBA
756 select GENERIC_TIME
757 select GENERIC_CLOCKEVENTS
758 select COMMON_CLKDEV
759 help
760 Support for ST-Ericsson's Ux500 architecture
761
762config ARCH_NOMADIK
763 bool "STMicroelectronics Nomadik"
764 select ARM_AMBA
765 select ARM_VIC
766 select CPU_ARM926T
767 select HAVE_CLK
768 select COMMON_CLKDEV
769 select GENERIC_TIME
770 select GENERIC_CLOCKEVENTS
771 select GENERIC_GPIO
772 select ARCH_REQUIRE_GPIOLIB
773 help
774 Support for the Nomadik platform by ST-Ericsson
775
750config ARCH_DAVINCI 776config ARCH_DAVINCI
751 bool "TI DaVinci" 777 bool "TI DaVinci"
752 select CPU_ARM926T 778 select CPU_ARM926T
@@ -775,30 +801,13 @@ config ARCH_OMAP
775 help 801 help
776 Support for TI's OMAP platform (OMAP1 and OMAP2). 802 Support for TI's OMAP platform (OMAP1 and OMAP2).
777 803
778config ARCH_BCMRING
779 bool "Broadcom BCMRING"
780 depends on MMU
781 select CPU_V6
782 select ARM_AMBA
783 select COMMON_CLKDEV
784 select GENERIC_TIME
785 select GENERIC_CLOCKEVENTS
786 select ARCH_WANT_OPTIONAL_GPIOLIB
787 help
788 Support for Broadcom's BCMRing platform.
789
790config ARCH_U8500
791 bool "ST-Ericsson U8500 Series"
792 select CPU_V7
793 select ARM_AMBA
794 select GENERIC_TIME
795 select GENERIC_CLOCKEVENTS
796 select COMMON_CLKDEV
797 help
798 Support for ST-Ericsson's Ux500 architecture
799
800endchoice 804endchoice
801 805
806#
807# This is sorted alphabetically by mach-* pathname. However, plat-*
808# Kconfigs may be included either alphabetically (according to the
809# plat- suffix) or along side the corresponding mach-* source.
810#
802source "arch/arm/mach-aaec2000/Kconfig" 811source "arch/arm/mach-aaec2000/Kconfig"
803 812
804source "arch/arm/mach-at91/Kconfig" 813source "arch/arm/mach-at91/Kconfig"
diff --git a/arch/arm/boot/compressed/decompress.c b/arch/arm/boot/compressed/decompress.c
index 0da382f33157..9c097073ce4c 100644
--- a/arch/arm/boot/compressed/decompress.c
+++ b/arch/arm/boot/compressed/decompress.c
@@ -11,6 +11,7 @@ extern unsigned long free_mem_end_ptr;
11extern void error(char *); 11extern void error(char *);
12 12
13#define STATIC static 13#define STATIC static
14#define STATIC_RW_DATA /* non-static please */
14 15
15#define ARCH_HAS_DECOMP_WDOG 16#define ARCH_HAS_DECOMP_WDOG
16 17
diff --git a/arch/arm/boot/compressed/head.S b/arch/arm/boot/compressed/head.S
index 535a91daaa53..0f23009170a1 100644
--- a/arch/arm/boot/compressed/head.S
+++ b/arch/arm/boot/compressed/head.S
@@ -742,7 +742,7 @@ proc_types:
742 .word 0x000f0000 742 .word 0x000f0000
743 W(b) __armv4_mmu_cache_on 743 W(b) __armv4_mmu_cache_on
744 W(b) __armv4_mmu_cache_off 744 W(b) __armv4_mmu_cache_off
745 W(b) __armv4_mmu_cache_flush 745 W(b) __armv5tej_mmu_cache_flush
746 746
747 .word 0x0007b000 @ ARMv6 747 .word 0x0007b000 @ ARMv6
748 .word 0x000ff000 748 .word 0x000ff000
diff --git a/arch/arm/boot/compressed/misc.c b/arch/arm/boot/compressed/misc.c
index d32bc71c1f78..d2b2ef41cd4f 100644
--- a/arch/arm/boot/compressed/misc.c
+++ b/arch/arm/boot/compressed/misc.c
@@ -33,6 +33,7 @@ unsigned int __machine_arch_type;
33#else 33#else
34 34
35static void putstr(const char *ptr); 35static void putstr(const char *ptr);
36extern void error(char *x);
36 37
37#include <mach/uncompress.h> 38#include <mach/uncompress.h>
38 39
diff --git a/arch/arm/configs/cm_t35_defconfig b/arch/arm/configs/cm_t35_defconfig
index 893cd267e075..032b49bad91f 100644
--- a/arch/arm/configs/cm_t35_defconfig
+++ b/arch/arm/configs/cm_t35_defconfig
@@ -358,7 +358,7 @@ CONFIG_PM_SLEEP=y
358CONFIG_SUSPEND=y 358CONFIG_SUSPEND=y
359CONFIG_SUSPEND_FREEZER=y 359CONFIG_SUSPEND_FREEZER=y
360# CONFIG_APM_EMULATION is not set 360# CONFIG_APM_EMULATION is not set
361# CONFIG_PM_RUNTIME is not set 361CONFIG_PM_RUNTIME=y
362CONFIG_ARCH_SUSPEND_POSSIBLE=y 362CONFIG_ARCH_SUSPEND_POSSIBLE=y
363CONFIG_NET=y 363CONFIG_NET=y
364 364
diff --git a/arch/arm/configs/n770_defconfig b/arch/arm/configs/n770_defconfig
index 75cae18fbcb6..de0c28aa43e7 100644
--- a/arch/arm/configs/n770_defconfig
+++ b/arch/arm/configs/n770_defconfig
@@ -308,6 +308,7 @@ CONFIG_PM_SLEEP=y
308CONFIG_SUSPEND_UP_POSSIBLE=y 308CONFIG_SUSPEND_UP_POSSIBLE=y
309CONFIG_SUSPEND=y 309CONFIG_SUSPEND=y
310# CONFIG_APM_EMULATION is not set 310# CONFIG_APM_EMULATION is not set
311CONFIG_PM_RUNTIME=y
311 312
312# 313#
313# Networking 314# Networking
diff --git a/arch/arm/configs/n8x0_defconfig b/arch/arm/configs/n8x0_defconfig
index e6f667c5e58a..216ad00948af 100644
--- a/arch/arm/configs/n8x0_defconfig
+++ b/arch/arm/configs/n8x0_defconfig
@@ -191,6 +191,7 @@ CONFIG_ARCH_OMAP=y
191# 191#
192CONFIG_ARCH_OMAP_OTG=y 192CONFIG_ARCH_OMAP_OTG=y
193# CONFIG_ARCH_OMAP1 is not set 193# CONFIG_ARCH_OMAP1 is not set
194CONFIG_ARCH_OMAP2PLUS=y
194CONFIG_ARCH_OMAP2=y 195CONFIG_ARCH_OMAP2=y
195# CONFIG_ARCH_OMAP3 is not set 196# CONFIG_ARCH_OMAP3 is not set
196# CONFIG_ARCH_OMAP4 is not set 197# CONFIG_ARCH_OMAP4 is not set
@@ -198,8 +199,6 @@ CONFIG_ARCH_OMAP2=y
198# 199#
199# OMAP Feature Selections 200# OMAP Feature Selections
200# 201#
201# CONFIG_OMAP_DEBUG_POWERDOMAIN is not set
202# CONFIG_OMAP_DEBUG_CLOCKDOMAIN is not set
203CONFIG_OMAP_RESET_CLOCKS=y 202CONFIG_OMAP_RESET_CLOCKS=y
204# CONFIG_OMAP_MUX is not set 203# CONFIG_OMAP_MUX is not set
205# CONFIG_OMAP_MCBSP is not set 204# CONFIG_OMAP_MCBSP is not set
@@ -208,15 +207,13 @@ CONFIG_OMAP_MBOX_FWK=y
208CONFIG_OMAP_32K_TIMER=y 207CONFIG_OMAP_32K_TIMER=y
209CONFIG_OMAP_32K_TIMER_HZ=128 208CONFIG_OMAP_32K_TIMER_HZ=128
210CONFIG_OMAP_DM_TIMER=y 209CONFIG_OMAP_DM_TIMER=y
211# CONFIG_OMAP_LL_DEBUG_UART1 is not set 210# CONFIG_OMAP_PM_NONE is not set
212# CONFIG_OMAP_LL_DEBUG_UART2 is not set 211CONFIG_OMAP_PM_NOOP=y
213CONFIG_OMAP_LL_DEBUG_UART3=y
214# CONFIG_MACH_OMAP_GENERIC is not set 212# CONFIG_MACH_OMAP_GENERIC is not set
215 213
216# 214#
217# OMAP Core Type 215# OMAP Core Type
218# 216#
219CONFIG_ARCH_OMAP24XX=y
220CONFIG_ARCH_OMAP2420=y 217CONFIG_ARCH_OMAP2420=y
221# CONFIG_ARCH_OMAP2430 is not set 218# CONFIG_ARCH_OMAP2430 is not set
222 219
@@ -227,6 +224,9 @@ CONFIG_MACH_OMAP2_TUSB6010=y
227# CONFIG_MACH_OMAP_H4 is not set 224# CONFIG_MACH_OMAP_H4 is not set
228# CONFIG_MACH_OMAP_APOLLON is not set 225# CONFIG_MACH_OMAP_APOLLON is not set
229# CONFIG_MACH_OMAP_2430SDP is not set 226# CONFIG_MACH_OMAP_2430SDP is not set
227CONFIG_MACH_NOKIA_N800=y
228CONFIG_MACH_NOKIA_N810=y
229CONFIG_MACH_NOKIA_N810_WIMAX=y
230CONFIG_MACH_NOKIA_N8X0=y 230CONFIG_MACH_NOKIA_N8X0=y
231 231
232# 232#
@@ -303,7 +303,7 @@ CONFIG_ALIGNMENT_TRAP=y
303CONFIG_ZBOOT_ROM_TEXT=0x10C08000 303CONFIG_ZBOOT_ROM_TEXT=0x10C08000
304CONFIG_ZBOOT_ROM_BSS=0x10200000 304CONFIG_ZBOOT_ROM_BSS=0x10200000
305# CONFIG_ZBOOT_ROM is not set 305# CONFIG_ZBOOT_ROM is not set
306CONFIG_CMDLINE="root=1f03 rootfstype=jffs2 console=ttyS2,115200n8" 306CONFIG_CMDLINE="root=/dev/mmcblk0p2 console=ttyS2,115200n8 debug earlyprintk rootwait"
307# CONFIG_XIP_KERNEL is not set 307# CONFIG_XIP_KERNEL is not set
308# CONFIG_KEXEC is not set 308# CONFIG_KEXEC is not set
309 309
@@ -337,7 +337,14 @@ CONFIG_HAVE_AOUT=y
337# 337#
338# Power management options 338# Power management options
339# 339#
340# CONFIG_PM is not set 340CONFIG_PM=y
341# CONFIG_PM_DEBUG is not set
342CONFIG_PM_SLEEP=y
343CONFIG_SUSPEND=y
344CONFIG_SUSPEND_FREEZER=y
345# CONFIG_APM_EMULATION is not set
346CONFIG_PM_RUNTIME=y
347CONFIG_PM_OPS=y
341CONFIG_ARCH_SUSPEND_POSSIBLE=y 348CONFIG_ARCH_SUSPEND_POSSIBLE=y
342CONFIG_NET=y 349CONFIG_NET=y
343 350
@@ -617,7 +624,55 @@ CONFIG_UNIX98_PTYS=y
617# CONFIG_R3964 is not set 624# CONFIG_R3964 is not set
618# CONFIG_RAW_DRIVER is not set 625# CONFIG_RAW_DRIVER is not set
619# CONFIG_TCG_TPM is not set 626# CONFIG_TCG_TPM is not set
620# CONFIG_I2C is not set 627CONFIG_I2C=y
628CONFIG_I2C_BOARDINFO=y
629# CONFIG_I2C_COMPAT is not set
630# CONFIG_I2C_CHARDEV is not set
631# CONFIG_I2C_HELPER_AUTO is not set
632# CONFIG_I2C_SMBUS is not set
633
634#
635# I2C Algorithms
636#
637# CONFIG_I2C_ALGOBIT is not set
638# CONFIG_I2C_ALGOPCF is not set
639# CONFIG_I2C_ALGOPCA is not set
640
641#
642# I2C Hardware Bus support
643#
644
645#
646# I2C system bus drivers (mostly embedded / system-on-chip)
647#
648# CONFIG_I2C_DESIGNWARE is not set
649# CONFIG_I2C_GPIO is not set
650# CONFIG_I2C_OCORES is not set
651CONFIG_I2C_OMAP=y
652# CONFIG_I2C_SIMTEC is not set
653# CONFIG_I2C_XILINX is not set
654
655#
656# External I2C/SMBus adapter drivers
657#
658# CONFIG_I2C_PARPORT_LIGHT is not set
659# CONFIG_I2C_TAOS_EVM is not set
660# CONFIG_I2C_TINY_USB is not set
661
662#
663# Other I2C/SMBus bus drivers
664#
665# CONFIG_I2C_PCA_PLATFORM is not set
666# CONFIG_I2C_STUB is not set
667
668#
669# Miscellaneous I2C Chip support
670#
671# CONFIG_SENSORS_TSL2550 is not set
672# CONFIG_I2C_DEBUG_CORE is not set
673# CONFIG_I2C_DEBUG_ALGO is not set
674# CONFIG_I2C_DEBUG_BUS is not set
675# CONFIG_I2C_DEBUG_CHIP is not set
621CONFIG_SPI=y 676CONFIG_SPI=y
622# CONFIG_SPI_DEBUG is not set 677# CONFIG_SPI_DEBUG is not set
623CONFIG_SPI_MASTER=y 678CONFIG_SPI_MASTER=y
@@ -673,15 +728,44 @@ CONFIG_SSB_POSSIBLE=y
673# Multifunction device drivers 728# Multifunction device drivers
674# 729#
675# CONFIG_MFD_CORE is not set 730# CONFIG_MFD_CORE is not set
731# CONFIG_MFD_88PM860X is not set
676# CONFIG_MFD_SM501 is not set 732# CONFIG_MFD_SM501 is not set
677# CONFIG_MFD_ASIC3 is not set 733# CONFIG_MFD_ASIC3 is not set
678# CONFIG_HTC_EGPIO is not set 734# CONFIG_HTC_EGPIO is not set
679# CONFIG_HTC_PASIC3 is not set 735# CONFIG_HTC_PASIC3 is not set
736# CONFIG_HTC_I2CPLD is not set
737# CONFIG_TPS65010 is not set
738CONFIG_MENELAUS=y
739# CONFIG_TWL4030_CORE is not set
680# CONFIG_MFD_TMIO is not set 740# CONFIG_MFD_TMIO is not set
681# CONFIG_MFD_T7L66XB is not set 741# CONFIG_MFD_T7L66XB is not set
682# CONFIG_MFD_TC6387XB is not set 742# CONFIG_MFD_TC6387XB is not set
683# CONFIG_MFD_TC6393XB is not set 743# CONFIG_MFD_TC6393XB is not set
744# CONFIG_PMIC_DA903X is not set
745# CONFIG_PMIC_ADP5520 is not set
746# CONFIG_MFD_MAX8925 is not set
747# CONFIG_MFD_WM8400 is not set
748# CONFIG_MFD_WM831X is not set
749# CONFIG_MFD_WM8350_I2C is not set
750# CONFIG_MFD_WM8994 is not set
751# CONFIG_MFD_PCF50633 is not set
752# CONFIG_MFD_MC13783 is not set
753# CONFIG_AB3100_CORE is not set
684# CONFIG_EZX_PCAP is not set 754# CONFIG_EZX_PCAP is not set
755# CONFIG_AB4500_CORE is not set
756CONFIG_REGULATOR=y
757# CONFIG_REGULATOR_DEBUG is not set
758# CONFIG_REGULATOR_DUMMY is not set
759# CONFIG_REGULATOR_FIXED_VOLTAGE is not set
760# CONFIG_REGULATOR_VIRTUAL_CONSUMER is not set
761# CONFIG_REGULATOR_USERSPACE_CONSUMER is not set
762# CONFIG_REGULATOR_BQ24022 is not set
763# CONFIG_REGULATOR_MAX1586 is not set
764# CONFIG_REGULATOR_MAX8649 is not set
765# CONFIG_REGULATOR_MAX8660 is not set
766# CONFIG_REGULATOR_LP3971 is not set
767# CONFIG_REGULATOR_TPS65023 is not set
768# CONFIG_REGULATOR_TPS6507X is not set
685# CONFIG_MEDIA_SUPPORT is not set 769# CONFIG_MEDIA_SUPPORT is not set
686 770
687# 771#
@@ -718,7 +802,10 @@ CONFIG_USB_ANNOUNCE_NEW_DEVICES=y
718CONFIG_USB_DEVICEFS=y 802CONFIG_USB_DEVICEFS=y
719CONFIG_USB_DEVICE_CLASS=y 803CONFIG_USB_DEVICE_CLASS=y
720# CONFIG_USB_DYNAMIC_MINORS is not set 804# CONFIG_USB_DYNAMIC_MINORS is not set
721# CONFIG_USB_OTG is not set 805CONFIG_USB_SUSPEND=y
806CONFIG_USB_OTG=y
807# CONFIG_USB_OTG_WHITELIST is not set
808# CONFIG_USB_OTG_BLACKLIST_HUB is not set
722# CONFIG_USB_MON is not set 809# CONFIG_USB_MON is not set
723# CONFIG_USB_WUSB is not set 810# CONFIG_USB_WUSB is not set
724# CONFIG_USB_WUSB_CBAF is not set 811# CONFIG_USB_WUSB_CBAF is not set
@@ -737,9 +824,10 @@ CONFIG_USB_DEVICE_CLASS=y
737CONFIG_USB_MUSB_HDRC=y 824CONFIG_USB_MUSB_HDRC=y
738CONFIG_USB_TUSB6010=y 825CONFIG_USB_TUSB6010=y
739# CONFIG_USB_MUSB_HOST is not set 826# CONFIG_USB_MUSB_HOST is not set
740CONFIG_USB_MUSB_PERIPHERAL=y 827# CONFIG_USB_MUSB_PERIPHERAL is not set
741# CONFIG_USB_MUSB_OTG is not set 828CONFIG_USB_MUSB_OTG=y
742CONFIG_USB_GADGET_MUSB_HDRC=y 829CONFIG_USB_GADGET_MUSB_HDRC=y
830CONFIG_USB_MUSB_HDRC_HCD=y
743# CONFIG_MUSB_PIO_ONLY is not set 831# CONFIG_MUSB_PIO_ONLY is not set
744# CONFIG_USB_INVENTRA_DMA is not set 832# CONFIG_USB_INVENTRA_DMA is not set
745# CONFIG_USB_TI_CPPI_DMA is not set 833# CONFIG_USB_TI_CPPI_DMA is not set
@@ -824,44 +912,77 @@ CONFIG_USB_GADGET_DUALSPEED=y
824# CONFIG_USB_ZERO is not set 912# CONFIG_USB_ZERO is not set
825# CONFIG_USB_AUDIO is not set 913# CONFIG_USB_AUDIO is not set
826CONFIG_USB_ETH=y 914CONFIG_USB_ETH=y
827# CONFIG_USB_ETH_RNDIS is not set 915CONFIG_USB_ETH_RNDIS=y
916CONFIG_USB_ETH_EEM=y
828# CONFIG_USB_GADGETFS is not set 917# CONFIG_USB_GADGETFS is not set
829# CONFIG_USB_FILE_STORAGE is not set 918# CONFIG_USB_FILE_STORAGE is not set
919# CONFIG_USB_MASS_STORAGE is not set
830# CONFIG_USB_G_SERIAL is not set 920# CONFIG_USB_G_SERIAL is not set
831# CONFIG_USB_MIDI_GADGET is not set 921# CONFIG_USB_MIDI_GADGET is not set
832# CONFIG_USB_G_PRINTER is not set 922# CONFIG_USB_G_PRINTER is not set
833# CONFIG_USB_CDC_COMPOSITE is not set 923# CONFIG_USB_CDC_COMPOSITE is not set
924# CONFIG_USB_G_NOKIA is not set
925# CONFIG_USB_G_MULTI is not set
834 926
835# 927#
836# OTG and related infrastructure 928# OTG and related infrastructure
837# 929#
838CONFIG_USB_OTG_UTILS=y 930CONFIG_USB_OTG_UTILS=y
839# CONFIG_USB_GPIO_VBUS is not set 931# CONFIG_USB_GPIO_VBUS is not set
932# CONFIG_ISP1301_OMAP is not set
933# CONFIG_USB_ULPI is not set
840CONFIG_NOP_USB_XCEIV=y 934CONFIG_NOP_USB_XCEIV=y
841# CONFIG_MMC is not set 935CONFIG_MMC=y
936# CONFIG_MMC_DEBUG is not set
937# CONFIG_MMC_UNSAFE_RESUME is not set
938
939#
940# MMC/SD/SDIO Card Drivers
941#
942CONFIG_MMC_BLOCK=y
943CONFIG_MMC_BLOCK_BOUNCE=y
944# CONFIG_SDIO_UART is not set
945# CONFIG_MMC_TEST is not set
946
947#
948# MMC/SD/SDIO Host Controller Drivers
949#
950# CONFIG_MMC_SDHCI is not set
951CONFIG_MMC_OMAP=y
952# CONFIG_MMC_SPI is not set
842# CONFIG_MEMSTICK is not set 953# CONFIG_MEMSTICK is not set
843# CONFIG_ACCESSIBILITY is not set
844# CONFIG_NEW_LEDS is not set 954# CONFIG_NEW_LEDS is not set
955# CONFIG_ACCESSIBILITY is not set
845CONFIG_RTC_LIB=y 956CONFIG_RTC_LIB=y
846# CONFIG_RTC_CLASS is not set 957# CONFIG_RTC_CLASS is not set
847# CONFIG_DMADEVICES is not set 958# CONFIG_DMADEVICES is not set
848# CONFIG_AUXDISPLAY is not set 959# CONFIG_AUXDISPLAY is not set
849# CONFIG_REGULATOR is not set
850# CONFIG_UIO is not set 960# CONFIG_UIO is not set
961
962#
963# TI VLYNQ
964#
851# CONFIG_STAGING is not set 965# CONFIG_STAGING is not set
852 966
853# 967#
854# File systems 968# File systems
855# 969#
856# CONFIG_EXT2_FS is not set 970# CONFIG_EXT2_FS is not set
857# CONFIG_EXT3_FS is not set 971CONFIG_EXT3_FS=y
972# CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set
973CONFIG_EXT3_FS_XATTR=y
974# CONFIG_EXT3_FS_POSIX_ACL is not set
975# CONFIG_EXT3_FS_SECURITY is not set
858# CONFIG_EXT4_FS is not set 976# CONFIG_EXT4_FS is not set
977CONFIG_JBD=y
978CONFIG_FS_MBCACHE=y
859# CONFIG_REISERFS_FS is not set 979# CONFIG_REISERFS_FS is not set
860# CONFIG_JFS_FS is not set 980# CONFIG_JFS_FS is not set
861# CONFIG_FS_POSIX_ACL is not set 981# CONFIG_FS_POSIX_ACL is not set
862# CONFIG_XFS_FS is not set 982# CONFIG_XFS_FS is not set
863# CONFIG_OCFS2_FS is not set 983# CONFIG_OCFS2_FS is not set
864# CONFIG_BTRFS_FS is not set 984# CONFIG_BTRFS_FS is not set
985# CONFIG_NILFS2_FS is not set
865CONFIG_FILE_LOCKING=y 986CONFIG_FILE_LOCKING=y
866CONFIG_FSNOTIFY=y 987CONFIG_FSNOTIFY=y
867CONFIG_DNOTIFY=y 988CONFIG_DNOTIFY=y
@@ -886,8 +1007,11 @@ CONFIG_INOTIFY_USER=y
886# 1007#
887# DOS/FAT/NT Filesystems 1008# DOS/FAT/NT Filesystems
888# 1009#
1010CONFIG_FAT_FS=y
889# CONFIG_MSDOS_FS is not set 1011# CONFIG_MSDOS_FS is not set
890# CONFIG_VFAT_FS is not set 1012CONFIG_VFAT_FS=y
1013CONFIG_FAT_DEFAULT_CODEPAGE=437
1014CONFIG_FAT_DEFAULT_IOCHARSET="iso8859-1"
891# CONFIG_NTFS_FS is not set 1015# CONFIG_NTFS_FS is not set
892 1016
893# 1017#
diff --git a/arch/arm/configs/omap3_beagle_defconfig b/arch/arm/configs/omap3_beagle_defconfig
index c7999f5b1c9a..5a9e95fa728b 100644
--- a/arch/arm/configs/omap3_beagle_defconfig
+++ b/arch/arm/configs/omap3_beagle_defconfig
@@ -324,6 +324,7 @@ CONFIG_PM_SLEEP=y
324CONFIG_SUSPEND=y 324CONFIG_SUSPEND=y
325CONFIG_SUSPEND_FREEZER=y 325CONFIG_SUSPEND_FREEZER=y
326# CONFIG_APM_EMULATION is not set 326# CONFIG_APM_EMULATION is not set
327CONFIG_PM_RUNTIME=y
327CONFIG_ARCH_SUSPEND_POSSIBLE=y 328CONFIG_ARCH_SUSPEND_POSSIBLE=y
328CONFIG_NET=y 329CONFIG_NET=y
329 330
diff --git a/arch/arm/configs/omap3_defconfig b/arch/arm/configs/omap3_defconfig
index 714835e5ebec..d6ad92177324 100644
--- a/arch/arm/configs/omap3_defconfig
+++ b/arch/arm/configs/omap3_defconfig
@@ -450,7 +450,7 @@ CONFIG_SUSPEND=y
450# CONFIG_PM_TEST_SUSPEND is not set 450# CONFIG_PM_TEST_SUSPEND is not set
451CONFIG_SUSPEND_FREEZER=y 451CONFIG_SUSPEND_FREEZER=y
452# CONFIG_APM_EMULATION is not set 452# CONFIG_APM_EMULATION is not set
453# CONFIG_PM_RUNTIME is not set 453CONFIG_PM_RUNTIME=y
454CONFIG_ARCH_SUSPEND_POSSIBLE=y 454CONFIG_ARCH_SUSPEND_POSSIBLE=y
455CONFIG_NET=y 455CONFIG_NET=y
456 456
diff --git a/arch/arm/configs/omap3_evm_defconfig b/arch/arm/configs/omap3_evm_defconfig
index e2ad859fbec6..a6dd6d1af806 100644
--- a/arch/arm/configs/omap3_evm_defconfig
+++ b/arch/arm/configs/omap3_evm_defconfig
@@ -340,6 +340,7 @@ CONFIG_PM_SLEEP=y
340CONFIG_SUSPEND=y 340CONFIG_SUSPEND=y
341CONFIG_SUSPEND_FREEZER=y 341CONFIG_SUSPEND_FREEZER=y
342# CONFIG_APM_EMULATION is not set 342# CONFIG_APM_EMULATION is not set
343CONFIG_PM_RUNTIME=y
343CONFIG_ARCH_SUSPEND_POSSIBLE=y 344CONFIG_ARCH_SUSPEND_POSSIBLE=y
344CONFIG_NET=y 345CONFIG_NET=y
345 346
diff --git a/arch/arm/configs/omap3_touchbook_defconfig b/arch/arm/configs/omap3_touchbook_defconfig
index 74fe6be9c5ec..968fbaa8f04d 100644
--- a/arch/arm/configs/omap3_touchbook_defconfig
+++ b/arch/arm/configs/omap3_touchbook_defconfig
@@ -368,7 +368,7 @@ CONFIG_SUSPEND=y
368# CONFIG_PM_TEST_SUSPEND is not set 368# CONFIG_PM_TEST_SUSPEND is not set
369CONFIG_SUSPEND_FREEZER=y 369CONFIG_SUSPEND_FREEZER=y
370# CONFIG_APM_EMULATION is not set 370# CONFIG_APM_EMULATION is not set
371# CONFIG_PM_RUNTIME is not set 371CONFIG_PM_RUNTIME=y
372CONFIG_ARCH_SUSPEND_POSSIBLE=y 372CONFIG_ARCH_SUSPEND_POSSIBLE=y
373CONFIG_NET=y 373CONFIG_NET=y
374 374
diff --git a/arch/arm/configs/omap_3430sdp_defconfig b/arch/arm/configs/omap_3430sdp_defconfig
index bb2917e5cb47..ddde429a7d9b 100644
--- a/arch/arm/configs/omap_3430sdp_defconfig
+++ b/arch/arm/configs/omap_3430sdp_defconfig
@@ -363,6 +363,7 @@ CONFIG_PM_SLEEP=y
363CONFIG_SUSPEND=y 363CONFIG_SUSPEND=y
364CONFIG_SUSPEND_FREEZER=y 364CONFIG_SUSPEND_FREEZER=y
365# CONFIG_APM_EMULATION is not set 365# CONFIG_APM_EMULATION is not set
366CONFIG_PM_RUNTIME=y
366CONFIG_ARCH_SUSPEND_POSSIBLE=y 367CONFIG_ARCH_SUSPEND_POSSIBLE=y
367CONFIG_NET=y 368CONFIG_NET=y
368 369
diff --git a/arch/arm/configs/omap_3630sdp_defconfig b/arch/arm/configs/omap_3630sdp_defconfig
index d25c3d4424ca..609f348b1055 100644
--- a/arch/arm/configs/omap_3630sdp_defconfig
+++ b/arch/arm/configs/omap_3630sdp_defconfig
@@ -361,7 +361,7 @@ CONFIG_SUSPEND=y
361# CONFIG_PM_TEST_SUSPEND is not set 361# CONFIG_PM_TEST_SUSPEND is not set
362CONFIG_SUSPEND_FREEZER=y 362CONFIG_SUSPEND_FREEZER=y
363# CONFIG_APM_EMULATION is not set 363# CONFIG_APM_EMULATION is not set
364# CONFIG_PM_RUNTIME is not set 364CONFIG_PM_RUNTIME=y
365CONFIG_ARCH_SUSPEND_POSSIBLE=y 365CONFIG_ARCH_SUSPEND_POSSIBLE=y
366CONFIG_NET=y 366CONFIG_NET=y
367 367
diff --git a/arch/arm/configs/omap_h2_1610_defconfig b/arch/arm/configs/omap_h2_1610_defconfig
index 523189586a4b..91ef2ed0f80a 100644
--- a/arch/arm/configs/omap_h2_1610_defconfig
+++ b/arch/arm/configs/omap_h2_1610_defconfig
@@ -331,6 +331,7 @@ CONFIG_PM_SLEEP=y
331CONFIG_SUSPEND=y 331CONFIG_SUSPEND=y
332CONFIG_SUSPEND_FREEZER=y 332CONFIG_SUSPEND_FREEZER=y
333# CONFIG_APM_EMULATION is not set 333# CONFIG_APM_EMULATION is not set
334CONFIG_PM_RUNTIME=y
334CONFIG_ARCH_SUSPEND_POSSIBLE=y 335CONFIG_ARCH_SUSPEND_POSSIBLE=y
335 336
336# 337#
diff --git a/arch/arm/configs/omap_zoom2_defconfig b/arch/arm/configs/omap_zoom2_defconfig
index a82e81332a03..f5c6e11cf189 100644
--- a/arch/arm/configs/omap_zoom2_defconfig
+++ b/arch/arm/configs/omap_zoom2_defconfig
@@ -343,6 +343,7 @@ CONFIG_SUSPEND=y
343# CONFIG_PM_TEST_SUSPEND is not set 343# CONFIG_PM_TEST_SUSPEND is not set
344CONFIG_SUSPEND_FREEZER=y 344CONFIG_SUSPEND_FREEZER=y
345# CONFIG_APM_EMULATION is not set 345# CONFIG_APM_EMULATION is not set
346CONFIG_PM_RUNTIME=y
346CONFIG_ARCH_SUSPEND_POSSIBLE=y 347CONFIG_ARCH_SUSPEND_POSSIBLE=y
347CONFIG_NET=y 348CONFIG_NET=y
348 349
diff --git a/arch/arm/configs/omap_zoom3_defconfig b/arch/arm/configs/omap_zoom3_defconfig
index ff8ac3dcc31d..ea9a5012d332 100644
--- a/arch/arm/configs/omap_zoom3_defconfig
+++ b/arch/arm/configs/omap_zoom3_defconfig
@@ -361,7 +361,7 @@ CONFIG_SUSPEND=y
361# CONFIG_PM_TEST_SUSPEND is not set 361# CONFIG_PM_TEST_SUSPEND is not set
362CONFIG_SUSPEND_FREEZER=y 362CONFIG_SUSPEND_FREEZER=y
363# CONFIG_APM_EMULATION is not set 363# CONFIG_APM_EMULATION is not set
364# CONFIG_PM_RUNTIME is not set 364CONFIG_PM_RUNTIME=y
365CONFIG_ARCH_SUSPEND_POSSIBLE=y 365CONFIG_ARCH_SUSPEND_POSSIBLE=y
366CONFIG_NET=y 366CONFIG_NET=y
367 367
diff --git a/arch/arm/configs/rx51_defconfig b/arch/arm/configs/rx51_defconfig
index 193bd334fbbf..45135ffadc57 100644
--- a/arch/arm/configs/rx51_defconfig
+++ b/arch/arm/configs/rx51_defconfig
@@ -322,6 +322,7 @@ CONFIG_PM_SLEEP=y
322CONFIG_SUSPEND=y 322CONFIG_SUSPEND=y
323CONFIG_SUSPEND_FREEZER=y 323CONFIG_SUSPEND_FREEZER=y
324# CONFIG_APM_EMULATION is not set 324# CONFIG_APM_EMULATION is not set
325CONFIG_PM_RUNTIME=y
325CONFIG_ARCH_SUSPEND_POSSIBLE=y 326CONFIG_ARCH_SUSPEND_POSSIBLE=y
326CONFIG_NET=y 327CONFIG_NET=y
327 328
diff --git a/arch/arm/include/asm/elf.h b/arch/arm/include/asm/elf.h
index a399bb5730f1..bff056489cc1 100644
--- a/arch/arm/include/asm/elf.h
+++ b/arch/arm/include/asm/elf.h
@@ -98,6 +98,7 @@ extern int elf_check_arch(const struct elf32_hdr *);
98extern int arm_elf_read_implies_exec(const struct elf32_hdr *, int); 98extern int arm_elf_read_implies_exec(const struct elf32_hdr *, int);
99#define elf_read_implies_exec(ex,stk) arm_elf_read_implies_exec(&(ex), stk) 99#define elf_read_implies_exec(ex,stk) arm_elf_read_implies_exec(&(ex), stk)
100 100
101struct task_struct;
101int dump_task_regs(struct task_struct *t, elf_gregset_t *elfregs); 102int dump_task_regs(struct task_struct *t, elf_gregset_t *elfregs);
102#define ELF_CORE_COPY_TASK_REGS dump_task_regs 103#define ELF_CORE_COPY_TASK_REGS dump_task_regs
103 104
diff --git a/arch/arm/include/asm/pgtable-nommu.h b/arch/arm/include/asm/pgtable-nommu.h
index 013cfcdc4839..ffc0e85775b4 100644
--- a/arch/arm/include/asm/pgtable-nommu.h
+++ b/arch/arm/include/asm/pgtable-nommu.h
@@ -67,6 +67,7 @@ static inline int pte_file(pte_t pte) { return 0; }
67 */ 67 */
68#define pgprot_noncached(prot) __pgprot(0) 68#define pgprot_noncached(prot) __pgprot(0)
69#define pgprot_writecombine(prot) __pgprot(0) 69#define pgprot_writecombine(prot) __pgprot(0)
70#define pgprot_dmacoherent(prot) __pgprot(0)
70 71
71 72
72/* 73/*
diff --git a/arch/arm/kernel/entry-header.S b/arch/arm/kernel/entry-header.S
index 7e9ed1eea40a..d93f976fb389 100644
--- a/arch/arm/kernel/entry-header.S
+++ b/arch/arm/kernel/entry-header.S
@@ -102,6 +102,8 @@
102 .else 102 .else
103 ldmdb sp, {r0 - lr}^ @ get calling r0 - lr 103 ldmdb sp, {r0 - lr}^ @ get calling r0 - lr
104 .endif 104 .endif
105 mov r0, r0 @ ARMv5T and earlier require a nop
106 @ after ldm {}^
105 add sp, sp, #S_FRAME_SIZE - S_PC 107 add sp, sp, #S_FRAME_SIZE - S_PC
106 movs pc, lr @ return & move spsr_svc into cpsr 108 movs pc, lr @ return & move spsr_svc into cpsr
107 .endm 109 .endm
diff --git a/arch/arm/kernel/kgdb.c b/arch/arm/kernel/kgdb.c
index ba8ccfede964..a5b846b9895d 100644
--- a/arch/arm/kernel/kgdb.c
+++ b/arch/arm/kernel/kgdb.c
@@ -9,6 +9,7 @@
9 * Authors: George Davis <davis_g@mvista.com> 9 * Authors: George Davis <davis_g@mvista.com>
10 * Deepak Saxena <dsaxena@plexity.net> 10 * Deepak Saxena <dsaxena@plexity.net>
11 */ 11 */
12#include <linux/irq.h>
12#include <linux/kgdb.h> 13#include <linux/kgdb.h>
13#include <asm/traps.h> 14#include <asm/traps.h>
14 15
@@ -158,6 +159,18 @@ static struct undef_hook kgdb_compiled_brkpt_hook = {
158 .fn = kgdb_compiled_brk_fn 159 .fn = kgdb_compiled_brk_fn
159}; 160};
160 161
162static void kgdb_call_nmi_hook(void *ignored)
163{
164 kgdb_nmicallback(raw_smp_processor_id(), get_irq_regs());
165}
166
167void kgdb_roundup_cpus(unsigned long flags)
168{
169 local_irq_enable();
170 smp_call_function(kgdb_call_nmi_hook, NULL, 0);
171 local_irq_disable();
172}
173
161/** 174/**
162 * kgdb_arch_init - Perform any architecture specific initalization. 175 * kgdb_arch_init - Perform any architecture specific initalization.
163 * 176 *
diff --git a/arch/arm/kernel/perf_event.c b/arch/arm/kernel/perf_event.c
index 3875d99cc40f..9e70f2053f9a 100644
--- a/arch/arm/kernel/perf_event.c
+++ b/arch/arm/kernel/perf_event.c
@@ -332,7 +332,8 @@ armpmu_reserve_hardware(void)
332 332
333 for (i = 0; i < pmu_irqs->num_irqs; ++i) { 333 for (i = 0; i < pmu_irqs->num_irqs; ++i) {
334 err = request_irq(pmu_irqs->irqs[i], armpmu->handle_irq, 334 err = request_irq(pmu_irqs->irqs[i], armpmu->handle_irq,
335 IRQF_DISABLED, "armpmu", NULL); 335 IRQF_DISABLED | IRQF_NOBALANCING,
336 "armpmu", NULL);
336 if (err) { 337 if (err) {
337 pr_warning("unable to request IRQ%d for ARM " 338 pr_warning("unable to request IRQ%d for ARM "
338 "perf counters\n", pmu_irqs->irqs[i]); 339 "perf counters\n", pmu_irqs->irqs[i]);
@@ -1624,7 +1625,7 @@ enum armv7_counters {
1624/* 1625/*
1625 * EVTSEL: Event selection reg 1626 * EVTSEL: Event selection reg
1626 */ 1627 */
1627#define ARMV7_EVTSEL_MASK 0x7f /* Mask for writable bits */ 1628#define ARMV7_EVTSEL_MASK 0xff /* Mask for writable bits */
1628 1629
1629/* 1630/*
1630 * SELECT: Counter selection reg 1631 * SELECT: Counter selection reg
diff --git a/arch/arm/kernel/smp.c b/arch/arm/kernel/smp.c
index 57162af53dc9..577543f3857f 100644
--- a/arch/arm/kernel/smp.c
+++ b/arch/arm/kernel/smp.c
@@ -99,6 +99,7 @@ int __cpuinit __cpu_up(unsigned int cpu)
99 *pmd = __pmd((PHYS_OFFSET & PGDIR_MASK) | 99 *pmd = __pmd((PHYS_OFFSET & PGDIR_MASK) |
100 PMD_TYPE_SECT | PMD_SECT_AP_WRITE); 100 PMD_TYPE_SECT | PMD_SECT_AP_WRITE);
101 flush_pmd_entry(pmd); 101 flush_pmd_entry(pmd);
102 outer_clean_range(__pa(pmd), __pa(pmd + 1));
102 103
103 /* 104 /*
104 * We need to tell the secondary core where to find 105 * We need to tell the secondary core where to find
@@ -106,7 +107,8 @@ int __cpuinit __cpu_up(unsigned int cpu)
106 */ 107 */
107 secondary_data.stack = task_stack_page(idle) + THREAD_START_SP; 108 secondary_data.stack = task_stack_page(idle) + THREAD_START_SP;
108 secondary_data.pgdir = virt_to_phys(pgd); 109 secondary_data.pgdir = virt_to_phys(pgd);
109 wmb(); 110 __cpuc_flush_dcache_area(&secondary_data, sizeof(secondary_data));
111 outer_clean_range(__pa(&secondary_data), __pa(&secondary_data + 1));
110 112
111 /* 113 /*
112 * Now bring the CPU into our world. 114 * Now bring the CPU into our world.
diff --git a/arch/arm/mach-at91/board-sam9g20ek.c b/arch/arm/mach-at91/board-sam9g20ek.c
index 29cf83177484..c11fd47aec5d 100644
--- a/arch/arm/mach-at91/board-sam9g20ek.c
+++ b/arch/arm/mach-at91/board-sam9g20ek.c
@@ -271,10 +271,12 @@ static void __init ek_add_device_buttons(void) {}
271 271
272 272
273static struct i2c_board_info __initdata ek_i2c_devices[] = { 273static struct i2c_board_info __initdata ek_i2c_devices[] = {
274 { 274 {
275 I2C_BOARD_INFO("24c512", 0x50), 275 I2C_BOARD_INFO("24c512", 0x50)
276 I2C_BOARD_INFO("wm8731", 0x1b), 276 },
277 }, 277 {
278 I2C_BOARD_INFO("wm8731", 0x1b)
279 },
278}; 280};
279 281
280 282
diff --git a/arch/arm/mach-omap2/Makefile b/arch/arm/mach-omap2/Makefile
index 2069fb33baaa..4b9fc57770db 100644
--- a/arch/arm/mach-omap2/Makefile
+++ b/arch/arm/mach-omap2/Makefile
@@ -22,6 +22,9 @@ obj-$(CONFIG_OMAP_MCBSP) += mcbsp.o
22# SMP support ONLY available for OMAP4 22# SMP support ONLY available for OMAP4
23obj-$(CONFIG_SMP) += omap-smp.o omap-headsmp.o 23obj-$(CONFIG_SMP) += omap-smp.o omap-headsmp.o
24obj-$(CONFIG_LOCAL_TIMERS) += timer-mpu.o 24obj-$(CONFIG_LOCAL_TIMERS) += timer-mpu.o
25obj-$(CONFIG_ARCH_OMAP4) += omap44xx-smc.o
26
27AFLAGS_omap44xx-smc.o :=-Wa,-march=armv7-a
25 28
26# Functions loaded to SRAM 29# Functions loaded to SRAM
27obj-$(CONFIG_ARCH_OMAP2420) += sram242x.o 30obj-$(CONFIG_ARCH_OMAP2420) += sram242x.o
diff --git a/arch/arm/mach-omap2/board-3430sdp.c b/arch/arm/mach-omap2/board-3430sdp.c
index a101029ceb6f..5822bcf7b15f 100644
--- a/arch/arm/mach-omap2/board-3430sdp.c
+++ b/arch/arm/mach-omap2/board-3430sdp.c
@@ -648,7 +648,7 @@ static void enable_board_wakeup_source(void)
648 OMAP_WAKEUP_EN | OMAP_PIN_INPUT_PULLUP); 648 OMAP_WAKEUP_EN | OMAP_PIN_INPUT_PULLUP);
649} 649}
650 650
651static struct ehci_hcd_omap_platform_data ehci_pdata __initconst = { 651static const struct ehci_hcd_omap_platform_data ehci_pdata __initconst = {
652 652
653 .port_mode[0] = EHCI_HCD_OMAP_MODE_PHY, 653 .port_mode[0] = EHCI_HCD_OMAP_MODE_PHY,
654 .port_mode[1] = EHCI_HCD_OMAP_MODE_PHY, 654 .port_mode[1] = EHCI_HCD_OMAP_MODE_PHY,
diff --git a/arch/arm/mach-omap2/board-3630sdp.c b/arch/arm/mach-omap2/board-3630sdp.c
index 4386d2b4a785..a0a2a113465c 100644
--- a/arch/arm/mach-omap2/board-3630sdp.c
+++ b/arch/arm/mach-omap2/board-3630sdp.c
@@ -54,7 +54,7 @@ static void enable_board_wakeup_source(void)
54 OMAP_WAKEUP_EN | OMAP_PIN_INPUT_PULLUP); 54 OMAP_WAKEUP_EN | OMAP_PIN_INPUT_PULLUP);
55} 55}
56 56
57static struct ehci_hcd_omap_platform_data ehci_pdata __initconst = { 57static const struct ehci_hcd_omap_platform_data ehci_pdata __initconst = {
58 58
59 .port_mode[0] = EHCI_HCD_OMAP_MODE_PHY, 59 .port_mode[0] = EHCI_HCD_OMAP_MODE_PHY,
60 .port_mode[1] = EHCI_HCD_OMAP_MODE_PHY, 60 .port_mode[1] = EHCI_HCD_OMAP_MODE_PHY,
diff --git a/arch/arm/mach-omap2/board-4430sdp.c b/arch/arm/mach-omap2/board-4430sdp.c
index 180ac112e527..b88f28c5814b 100644
--- a/arch/arm/mach-omap2/board-4430sdp.c
+++ b/arch/arm/mach-omap2/board-4430sdp.c
@@ -50,33 +50,9 @@ static struct omap_board_config_kernel sdp4430_config[] __initdata = {
50}; 50};
51 51
52#ifdef CONFIG_CACHE_L2X0 52#ifdef CONFIG_CACHE_L2X0
53noinline void omap_smc1(u32 fn, u32 arg)
54{
55 register u32 r12 asm("r12") = fn;
56 register u32 r0 asm("r0") = arg;
57
58 /* This is common routine cache secure monitor API used to
59 * modify the PL310 secure registers.
60 * r0 contains the value to be modified and "r12" contains
61 * the monitor API number. It uses few CPU registers
62 * internally and hence they need be backed up including
63 * link register "lr".
64 * Explicitly save r11 and r12 the compiler generated code
65 * won't save it.
66 */
67 asm volatile(
68 "stmfd r13!, {r11,r12}\n"
69 "dsb\n"
70 "smc\n"
71 "ldmfd r13!, {r11,r12}\n"
72 : "+r" (r0), "+r" (r12)
73 :
74 : "r4", "r5", "r10", "lr", "cc");
75}
76EXPORT_SYMBOL(omap_smc1);
77
78static int __init omap_l2_cache_init(void) 53static int __init omap_l2_cache_init(void)
79{ 54{
55 extern void omap_smc1(u32 fn, u32 arg);
80 void __iomem *l2cache_base; 56 void __iomem *l2cache_base;
81 57
82 /* To avoid code running on other OMAPs in 58 /* To avoid code running on other OMAPs in
diff --git a/arch/arm/mach-omap2/board-am3517evm.c b/arch/arm/mach-omap2/board-am3517evm.c
index 70c18614773c..6ae880585d54 100644
--- a/arch/arm/mach-omap2/board-am3517evm.c
+++ b/arch/arm/mach-omap2/board-am3517evm.c
@@ -273,7 +273,7 @@ static void __init am3517_evm_init_irq(void)
273 omap_gpio_init(); 273 omap_gpio_init();
274} 274}
275 275
276static struct ehci_hcd_omap_platform_data ehci_pdata __initdata = { 276static const struct ehci_hcd_omap_platform_data ehci_pdata __initconst = {
277 .port_mode[0] = EHCI_HCD_OMAP_MODE_PHY, 277 .port_mode[0] = EHCI_HCD_OMAP_MODE_PHY,
278 .port_mode[1] = EHCI_HCD_OMAP_MODE_PHY, 278 .port_mode[1] = EHCI_HCD_OMAP_MODE_PHY,
279 .port_mode[2] = EHCI_HCD_OMAP_MODE_UNKNOWN, 279 .port_mode[2] = EHCI_HCD_OMAP_MODE_UNKNOWN,
diff --git a/arch/arm/mach-omap2/board-cm-t35.c b/arch/arm/mach-omap2/board-cm-t35.c
index afa77caaff4d..2de4f79f03a0 100644
--- a/arch/arm/mach-omap2/board-cm-t35.c
+++ b/arch/arm/mach-omap2/board-cm-t35.c
@@ -612,7 +612,7 @@ static struct omap2_hsmmc_info mmc[] = {
612 {} /* Terminator */ 612 {} /* Terminator */
613}; 613};
614 614
615static struct ehci_hcd_omap_platform_data ehci_pdata = { 615static struct ehci_hcd_omap_platform_data ehci_pdata __initdata = {
616 .port_mode[0] = EHCI_HCD_OMAP_MODE_PHY, 616 .port_mode[0] = EHCI_HCD_OMAP_MODE_PHY,
617 .port_mode[1] = EHCI_HCD_OMAP_MODE_PHY, 617 .port_mode[1] = EHCI_HCD_OMAP_MODE_PHY,
618 .port_mode[2] = EHCI_HCD_OMAP_MODE_UNKNOWN, 618 .port_mode[2] = EHCI_HCD_OMAP_MODE_UNKNOWN,
diff --git a/arch/arm/mach-omap2/board-devkit8000.c b/arch/arm/mach-omap2/board-devkit8000.c
index 371019054b49..5bfc13b3176c 100644
--- a/arch/arm/mach-omap2/board-devkit8000.c
+++ b/arch/arm/mach-omap2/board-devkit8000.c
@@ -636,7 +636,7 @@ static struct omap_musb_board_data musb_board_data = {
636 .power = 100, 636 .power = 100,
637}; 637};
638 638
639static struct ehci_hcd_omap_platform_data ehci_pdata __initconst = { 639static const struct ehci_hcd_omap_platform_data ehci_pdata __initconst = {
640 640
641 .port_mode[0] = EHCI_HCD_OMAP_MODE_PHY, 641 .port_mode[0] = EHCI_HCD_OMAP_MODE_PHY,
642 .port_mode[1] = EHCI_HCD_OMAP_MODE_PHY, 642 .port_mode[1] = EHCI_HCD_OMAP_MODE_PHY,
diff --git a/arch/arm/mach-omap2/board-igep0020.c b/arch/arm/mach-omap2/board-igep0020.c
index 9958987a3d0a..3c7789d45051 100644
--- a/arch/arm/mach-omap2/board-igep0020.c
+++ b/arch/arm/mach-omap2/board-igep0020.c
@@ -16,7 +16,6 @@
16#include <linux/clk.h> 16#include <linux/clk.h>
17#include <linux/io.h> 17#include <linux/io.h>
18#include <linux/gpio.h> 18#include <linux/gpio.h>
19#include <linux/leds.h>
20#include <linux/interrupt.h> 19#include <linux/interrupt.h>
21 20
22#include <linux/regulator/machine.h> 21#include <linux/regulator/machine.h>
@@ -39,8 +38,8 @@
39#define IGEP2_SMSC911X_CS 5 38#define IGEP2_SMSC911X_CS 5
40#define IGEP2_SMSC911X_GPIO 176 39#define IGEP2_SMSC911X_GPIO 176
41#define IGEP2_GPIO_USBH_NRESET 24 40#define IGEP2_GPIO_USBH_NRESET 24
42#define IGEP2_GPIO_LED0_RED 26 41#define IGEP2_GPIO_LED0_GREEN 26
43#define IGEP2_GPIO_LED0_GREEN 27 42#define IGEP2_GPIO_LED0_RED 27
44#define IGEP2_GPIO_LED1_RED 28 43#define IGEP2_GPIO_LED1_RED 28
45#define IGEP2_GPIO_DVI_PUP 170 44#define IGEP2_GPIO_DVI_PUP 170
46#define IGEP2_GPIO_WIFI_NPD 94 45#define IGEP2_GPIO_WIFI_NPD 94
@@ -355,34 +354,50 @@ static void __init igep2_display_init(void)
355 gpio_direction_output(IGEP2_GPIO_DVI_PUP, 1)) 354 gpio_direction_output(IGEP2_GPIO_DVI_PUP, 1))
356 pr_err("IGEP v2: Could not obtain gpio GPIO_DVI_PUP\n"); 355 pr_err("IGEP v2: Could not obtain gpio GPIO_DVI_PUP\n");
357} 356}
358#ifdef CONFIG_LEDS_TRIGGERS 357
359static struct gpio_led gpio_leds[] = { 358#if defined(CONFIG_LEDS_GPIO) || defined(CONFIG_LEDS_GPIO_MODULE)
359#include <linux/leds.h>
360
361static struct gpio_led igep2_gpio_leds[] = {
360 { 362 {
361 .name = "GPIO_LED1_RED", 363 .name = "led0:red",
364 .gpio = IGEP2_GPIO_LED0_RED,
365 },
366 {
367 .name = "led0:green",
362 .default_trigger = "heartbeat", 368 .default_trigger = "heartbeat",
369 .gpio = IGEP2_GPIO_LED0_GREEN,
370 },
371 {
372 .name = "led1:red",
363 .gpio = IGEP2_GPIO_LED1_RED, 373 .gpio = IGEP2_GPIO_LED1_RED,
364 }, 374 },
365}; 375};
366 376
367static struct gpio_led_platform_data gpio_leds_info = { 377static struct gpio_led_platform_data igep2_led_pdata = {
368 .leds = gpio_leds, 378 .leds = igep2_gpio_leds,
369 .num_leds = ARRAY_SIZE(gpio_leds), 379 .num_leds = ARRAY_SIZE(igep2_gpio_leds),
370}; 380};
371 381
372static struct platform_device leds_gpio = { 382static struct platform_device igep2_led_device = {
373 .name = "leds-gpio", 383 .name = "leds-gpio",
374 .id = -1, 384 .id = -1,
375 .dev = { 385 .dev = {
376 .platform_data = &gpio_leds_info, 386 .platform_data = &igep2_led_pdata,
377 }, 387 },
378}; 388};
389
390static void __init igep2_init_led(void)
391{
392 platform_device_register(&igep2_led_device);
393}
394
395#else
396static inline void igep2_init_led(void) {}
379#endif 397#endif
380 398
381static struct platform_device *igep2_devices[] __initdata = { 399static struct platform_device *igep2_devices[] __initdata = {
382 &igep2_dss_device, 400 &igep2_dss_device,
383#ifdef CONFIG_LEDS_TRIGGERS
384 &leds_gpio,
385#endif
386}; 401};
387 402
388static void __init igep2_init_irq(void) 403static void __init igep2_init_irq(void)
@@ -442,7 +457,7 @@ static struct omap_musb_board_data musb_board_data = {
442 .power = 100, 457 .power = 100,
443}; 458};
444 459
445static struct ehci_hcd_omap_platform_data ehci_pdata __initconst = { 460static const struct ehci_hcd_omap_platform_data ehci_pdata __initconst = {
446 .port_mode[0] = EHCI_HCD_OMAP_MODE_UNKNOWN, 461 .port_mode[0] = EHCI_HCD_OMAP_MODE_UNKNOWN,
447 .port_mode[1] = EHCI_HCD_OMAP_MODE_PHY, 462 .port_mode[1] = EHCI_HCD_OMAP_MODE_PHY,
448 .port_mode[2] = EHCI_HCD_OMAP_MODE_UNKNOWN, 463 .port_mode[2] = EHCI_HCD_OMAP_MODE_UNKNOWN,
@@ -471,31 +486,34 @@ static void __init igep2_init(void)
471 usb_ehci_init(&ehci_pdata); 486 usb_ehci_init(&ehci_pdata);
472 487
473 igep2_flash_init(); 488 igep2_flash_init();
489 igep2_init_led();
474 igep2_display_init(); 490 igep2_display_init();
475 igep2_init_smsc911x(); 491 igep2_init_smsc911x();
476 492
477 /* GPIO userspace leds */ 493 /* GPIO userspace leds */
478 if ((gpio_request(IGEP2_GPIO_LED0_RED, "GPIO_LED0_RED") == 0) && 494#if !defined(CONFIG_LEDS_GPIO) && !defined(CONFIG_LEDS_GPIO_MODULE)
495 if ((gpio_request(IGEP2_GPIO_LED0_RED, "led0:red") == 0) &&
479 (gpio_direction_output(IGEP2_GPIO_LED0_RED, 1) == 0)) { 496 (gpio_direction_output(IGEP2_GPIO_LED0_RED, 1) == 0)) {
480 gpio_export(IGEP2_GPIO_LED0_RED, 0); 497 gpio_export(IGEP2_GPIO_LED0_RED, 0);
481 gpio_set_value(IGEP2_GPIO_LED0_RED, 0); 498 gpio_set_value(IGEP2_GPIO_LED0_RED, 0);
482 } else 499 } else
483 pr_warning("IGEP v2: Could not obtain gpio GPIO_LED0_RED\n"); 500 pr_warning("IGEP v2: Could not obtain gpio GPIO_LED0_RED\n");
484 501
485 if ((gpio_request(IGEP2_GPIO_LED0_GREEN, "GPIO_LED0_GREEN") == 0) && 502 if ((gpio_request(IGEP2_GPIO_LED0_GREEN, "led0:green") == 0) &&
486 (gpio_direction_output(IGEP2_GPIO_LED0_GREEN, 1) == 0)) { 503 (gpio_direction_output(IGEP2_GPIO_LED0_GREEN, 1) == 0)) {
487 gpio_export(IGEP2_GPIO_LED0_GREEN, 0); 504 gpio_export(IGEP2_GPIO_LED0_GREEN, 0);
488 gpio_set_value(IGEP2_GPIO_LED0_GREEN, 0); 505 gpio_set_value(IGEP2_GPIO_LED0_GREEN, 0);
489 } else 506 } else
490 pr_warning("IGEP v2: Could not obtain gpio GPIO_LED0_GREEN\n"); 507 pr_warning("IGEP v2: Could not obtain gpio GPIO_LED0_GREEN\n");
491#ifndef CONFIG_LEDS_TRIGGERS 508
492 if ((gpio_request(IGEP2_GPIO_LED1_RED, "GPIO_LED1_RED") == 0) && 509 if ((gpio_request(IGEP2_GPIO_LED1_RED, "led1:red") == 0) &&
493 (gpio_direction_output(IGEP2_GPIO_LED1_RED, 1) == 0)) { 510 (gpio_direction_output(IGEP2_GPIO_LED1_RED, 1) == 0)) {
494 gpio_export(IGEP2_GPIO_LED1_RED, 0); 511 gpio_export(IGEP2_GPIO_LED1_RED, 0);
495 gpio_set_value(IGEP2_GPIO_LED1_RED, 0); 512 gpio_set_value(IGEP2_GPIO_LED1_RED, 0);
496 } else 513 } else
497 pr_warning("IGEP v2: Could not obtain gpio GPIO_LED1_RED\n"); 514 pr_warning("IGEP v2: Could not obtain gpio GPIO_LED1_RED\n");
498#endif 515#endif
516
499 /* GPIO W-LAN + Bluetooth combo module */ 517 /* GPIO W-LAN + Bluetooth combo module */
500 if ((gpio_request(IGEP2_GPIO_WIFI_NPD, "GPIO_WIFI_NPD") == 0) && 518 if ((gpio_request(IGEP2_GPIO_WIFI_NPD, "GPIO_WIFI_NPD") == 0) &&
501 (gpio_direction_output(IGEP2_GPIO_WIFI_NPD, 1) == 0)) { 519 (gpio_direction_output(IGEP2_GPIO_WIFI_NPD, 1) == 0)) {
diff --git a/arch/arm/mach-omap2/board-n8x0.c b/arch/arm/mach-omap2/board-n8x0.c
index 4cab0522d7ce..da9bcb898991 100644
--- a/arch/arm/mach-omap2/board-n8x0.c
+++ b/arch/arm/mach-omap2/board-n8x0.c
@@ -37,6 +37,103 @@ static int slot1_cover_open;
37static int slot2_cover_open; 37static int slot2_cover_open;
38static struct device *mmc_device; 38static struct device *mmc_device;
39 39
40#define TUSB6010_ASYNC_CS 1
41#define TUSB6010_SYNC_CS 4
42#define TUSB6010_GPIO_INT 58
43#define TUSB6010_GPIO_ENABLE 0
44#define TUSB6010_DMACHAN 0x3f
45
46#if defined(CONFIG_USB_TUSB6010) || \
47 defined(CONFIG_USB_TUSB6010_MODULE)
48/*
49 * Enable or disable power to TUSB6010. When enabling, turn on 3.3 V and
50 * 1.5 V voltage regulators of PM companion chip. Companion chip will then
51 * provide then PGOOD signal to TUSB6010 which will release it from reset.
52 */
53static int tusb_set_power(int state)
54{
55 int i, retval = 0;
56
57 if (state) {
58 gpio_set_value(TUSB6010_GPIO_ENABLE, 1);
59 msleep(1);
60
61 /* Wait until TUSB6010 pulls INT pin down */
62 i = 100;
63 while (i && gpio_get_value(TUSB6010_GPIO_INT)) {
64 msleep(1);
65 i--;
66 }
67
68 if (!i) {
69 printk(KERN_ERR "tusb: powerup failed\n");
70 retval = -ENODEV;
71 }
72 } else {
73 gpio_set_value(TUSB6010_GPIO_ENABLE, 0);
74 msleep(10);
75 }
76
77 return retval;
78}
79
80static struct musb_hdrc_config musb_config = {
81 .multipoint = 1,
82 .dyn_fifo = 1,
83 .num_eps = 16,
84 .ram_bits = 12,
85};
86
87static struct musb_hdrc_platform_data tusb_data = {
88#if defined(CONFIG_USB_MUSB_OTG)
89 .mode = MUSB_OTG,
90#elif defined(CONFIG_USB_MUSB_PERIPHERAL)
91 .mode = MUSB_PERIPHERAL,
92#else /* defined(CONFIG_USB_MUSB_HOST) */
93 .mode = MUSB_HOST,
94#endif
95 .set_power = tusb_set_power,
96 .min_power = 25, /* x2 = 50 mA drawn from VBUS as peripheral */
97 .power = 100, /* Max 100 mA VBUS for host mode */
98 .config = &musb_config,
99};
100
101static void __init n8x0_usb_init(void)
102{
103 int ret = 0;
104 static char announce[] __initdata = KERN_INFO "TUSB 6010\n";
105
106 /* PM companion chip power control pin */
107 ret = gpio_request(TUSB6010_GPIO_ENABLE, "TUSB6010 enable");
108 if (ret != 0) {
109 printk(KERN_ERR "Could not get TUSB power GPIO%i\n",
110 TUSB6010_GPIO_ENABLE);
111 return;
112 }
113 gpio_direction_output(TUSB6010_GPIO_ENABLE, 0);
114
115 tusb_set_power(0);
116
117 ret = tusb6010_setup_interface(&tusb_data, TUSB6010_REFCLK_19, 2,
118 TUSB6010_ASYNC_CS, TUSB6010_SYNC_CS,
119 TUSB6010_GPIO_INT, TUSB6010_DMACHAN);
120 if (ret != 0)
121 goto err;
122
123 printk(announce);
124
125 return;
126
127err:
128 gpio_free(TUSB6010_GPIO_ENABLE);
129}
130#else
131
132static void __init n8x0_usb_init(void) {}
133
134#endif /*CONFIG_USB_TUSB6010 */
135
136
40static struct omap2_mcspi_device_config p54spi_mcspi_config = { 137static struct omap2_mcspi_device_config p54spi_mcspi_config = {
41 .turbo_mode = 0, 138 .turbo_mode = 0,
42 .single_channel = 1, 139 .single_channel = 1,
@@ -562,6 +659,7 @@ static void __init n8x0_init_machine(void)
562 n8x0_menelaus_init(); 659 n8x0_menelaus_init();
563 n8x0_onenand_init(); 660 n8x0_onenand_init();
564 n8x0_mmc_init(); 661 n8x0_mmc_init();
662 n8x0_usb_init();
565} 663}
566 664
567MACHINE_START(NOKIA_N800, "Nokia N800") 665MACHINE_START(NOKIA_N800, "Nokia N800")
diff --git a/arch/arm/mach-omap2/board-omap3beagle.c b/arch/arm/mach-omap2/board-omap3beagle.c
index 6eb77e1f7c82..962d377970e9 100644
--- a/arch/arm/mach-omap2/board-omap3beagle.c
+++ b/arch/arm/mach-omap2/board-omap3beagle.c
@@ -410,7 +410,7 @@ static void __init omap3beagle_flash_init(void)
410 } 410 }
411} 411}
412 412
413static struct ehci_hcd_omap_platform_data ehci_pdata __initconst = { 413static const struct ehci_hcd_omap_platform_data ehci_pdata __initconst = {
414 414
415 .port_mode[0] = EHCI_HCD_OMAP_MODE_PHY, 415 .port_mode[0] = EHCI_HCD_OMAP_MODE_PHY,
416 .port_mode[1] = EHCI_HCD_OMAP_MODE_PHY, 416 .port_mode[1] = EHCI_HCD_OMAP_MODE_PHY,
diff --git a/arch/arm/mach-omap2/board-omap3evm.c b/arch/arm/mach-omap2/board-omap3evm.c
index d6bc88c426b5..017bb2f4f7d2 100644
--- a/arch/arm/mach-omap2/board-omap3evm.c
+++ b/arch/arm/mach-omap2/board-omap3evm.c
@@ -635,7 +635,7 @@ static struct platform_device *omap3_evm_devices[] __initdata = {
635 &omap3_evm_dss_device, 635 &omap3_evm_dss_device,
636}; 636};
637 637
638static struct ehci_hcd_omap_platform_data ehci_pdata __initconst = { 638static struct ehci_hcd_omap_platform_data ehci_pdata __initdata = {
639 639
640 .port_mode[0] = EHCI_HCD_OMAP_MODE_UNKNOWN, 640 .port_mode[0] = EHCI_HCD_OMAP_MODE_UNKNOWN,
641 .port_mode[1] = EHCI_HCD_OMAP_MODE_PHY, 641 .port_mode[1] = EHCI_HCD_OMAP_MODE_PHY,
diff --git a/arch/arm/mach-omap2/board-omap3pandora.c b/arch/arm/mach-omap2/board-omap3pandora.c
index 4827f4658df3..395d049bf010 100644
--- a/arch/arm/mach-omap2/board-omap3pandora.c
+++ b/arch/arm/mach-omap2/board-omap3pandora.c
@@ -459,12 +459,20 @@ static struct i2c_board_info __initdata omap3pandora_i2c_boardinfo[] = {
459 }, 459 },
460}; 460};
461 461
462static struct i2c_board_info __initdata omap3pandora_i2c3_boardinfo[] = {
463 {
464 I2C_BOARD_INFO("bq27500", 0x55),
465 .flags = I2C_CLIENT_WAKE,
466 },
467};
468
462static int __init omap3pandora_i2c_init(void) 469static int __init omap3pandora_i2c_init(void)
463{ 470{
464 omap_register_i2c_bus(1, 2600, omap3pandora_i2c_boardinfo, 471 omap_register_i2c_bus(1, 2600, omap3pandora_i2c_boardinfo,
465 ARRAY_SIZE(omap3pandora_i2c_boardinfo)); 472 ARRAY_SIZE(omap3pandora_i2c_boardinfo));
466 /* i2c2 pins are not connected */ 473 /* i2c2 pins are not connected */
467 omap_register_i2c_bus(3, 100, NULL, 0); 474 omap_register_i2c_bus(3, 100, omap3pandora_i2c3_boardinfo,
475 ARRAY_SIZE(omap3pandora_i2c3_boardinfo));
468 return 0; 476 return 0;
469} 477}
470 478
@@ -537,7 +545,7 @@ static struct platform_device *omap3pandora_devices[] __initdata = {
537 &pandora_dss_device, 545 &pandora_dss_device,
538}; 546};
539 547
540static struct ehci_hcd_omap_platform_data ehci_pdata __initconst = { 548static const struct ehci_hcd_omap_platform_data ehci_pdata __initconst = {
541 549
542 .port_mode[0] = EHCI_HCD_OMAP_MODE_PHY, 550 .port_mode[0] = EHCI_HCD_OMAP_MODE_PHY,
543 .port_mode[1] = EHCI_HCD_OMAP_MODE_UNKNOWN, 551 .port_mode[1] = EHCI_HCD_OMAP_MODE_UNKNOWN,
diff --git a/arch/arm/mach-omap2/board-omap3touchbook.c b/arch/arm/mach-omap2/board-omap3touchbook.c
index 3943d0f8322c..2504d41f923e 100644
--- a/arch/arm/mach-omap2/board-omap3touchbook.c
+++ b/arch/arm/mach-omap2/board-omap3touchbook.c
@@ -493,7 +493,7 @@ static void __init omap3touchbook_flash_init(void)
493 } 493 }
494} 494}
495 495
496static struct ehci_hcd_omap_platform_data ehci_pdata __initconst = { 496static const struct ehci_hcd_omap_platform_data ehci_pdata __initconst = {
497 497
498 .port_mode[0] = EHCI_HCD_OMAP_MODE_PHY, 498 .port_mode[0] = EHCI_HCD_OMAP_MODE_PHY,
499 .port_mode[1] = EHCI_HCD_OMAP_MODE_PHY, 499 .port_mode[1] = EHCI_HCD_OMAP_MODE_PHY,
@@ -518,14 +518,14 @@ static void omap3_touchbook_poweroff(void)
518 gpio_direction_output(TB_KILL_POWER_GPIO, 0); 518 gpio_direction_output(TB_KILL_POWER_GPIO, 0);
519} 519}
520 520
521static void __init early_touchbook_revision(char **p) 521static int __init early_touchbook_revision(char *p)
522{ 522{
523 if (!*p) 523 if (!p)
524 return; 524 return 0;
525 525
526 strict_strtoul(*p, 10, &touchbook_revision); 526 return strict_strtoul(p, 10, &touchbook_revision);
527} 527}
528__early_param("tbr=", early_touchbook_revision); 528early_param("tbr", early_touchbook_revision);
529 529
530static struct omap_musb_board_data musb_board_data = { 530static struct omap_musb_board_data musb_board_data = {
531 .interface_type = MUSB_INTERFACE_ULPI, 531 .interface_type = MUSB_INTERFACE_ULPI,
diff --git a/arch/arm/mach-omap2/board-overo.c b/arch/arm/mach-omap2/board-overo.c
index 50872a42bec7..8848c7c5ce48 100644
--- a/arch/arm/mach-omap2/board-overo.c
+++ b/arch/arm/mach-omap2/board-overo.c
@@ -394,7 +394,7 @@ static struct platform_device *overo_devices[] __initdata = {
394 &overo_lcd_device, 394 &overo_lcd_device,
395}; 395};
396 396
397static struct ehci_hcd_omap_platform_data ehci_pdata __initconst = { 397static const struct ehci_hcd_omap_platform_data ehci_pdata __initconst = {
398 .port_mode[0] = EHCI_HCD_OMAP_MODE_UNKNOWN, 398 .port_mode[0] = EHCI_HCD_OMAP_MODE_UNKNOWN,
399 .port_mode[1] = EHCI_HCD_OMAP_MODE_PHY, 399 .port_mode[1] = EHCI_HCD_OMAP_MODE_PHY,
400 .port_mode[2] = EHCI_HCD_OMAP_MODE_UNKNOWN, 400 .port_mode[2] = EHCI_HCD_OMAP_MODE_UNKNOWN,
diff --git a/arch/arm/mach-omap2/board-zoom3.c b/arch/arm/mach-omap2/board-zoom3.c
index d3e3cd5170d1..cd3e40cf3ac1 100644
--- a/arch/arm/mach-omap2/board-zoom3.c
+++ b/arch/arm/mach-omap2/board-zoom3.c
@@ -52,7 +52,7 @@ static struct omap_board_mux board_mux[] __initdata = {
52#define board_mux NULL 52#define board_mux NULL
53#endif 53#endif
54 54
55static struct ehci_hcd_omap_platform_data ehci_pdata __initconst = { 55static const struct ehci_hcd_omap_platform_data ehci_pdata __initconst = {
56 .port_mode[0] = EHCI_HCD_OMAP_MODE_UNKNOWN, 56 .port_mode[0] = EHCI_HCD_OMAP_MODE_UNKNOWN,
57 .port_mode[1] = EHCI_HCD_OMAP_MODE_PHY, 57 .port_mode[1] = EHCI_HCD_OMAP_MODE_PHY,
58 .port_mode[2] = EHCI_HCD_OMAP_MODE_UNKNOWN, 58 .port_mode[2] = EHCI_HCD_OMAP_MODE_UNKNOWN,
diff --git a/arch/arm/mach-omap2/clock2420_data.c b/arch/arm/mach-omap2/clock2420_data.c
index f12af95ead45..d932b142d0b6 100644
--- a/arch/arm/mach-omap2/clock2420_data.c
+++ b/arch/arm/mach-omap2/clock2420_data.c
@@ -1841,6 +1841,7 @@ static struct omap_clk omap2420_clks[] = {
1841 CLK(NULL, "aes_ick", &aes_ick, CK_242X), 1841 CLK(NULL, "aes_ick", &aes_ick, CK_242X),
1842 CLK(NULL, "pka_ick", &pka_ick, CK_242X), 1842 CLK(NULL, "pka_ick", &pka_ick, CK_242X),
1843 CLK(NULL, "usb_fck", &usb_fck, CK_242X), 1843 CLK(NULL, "usb_fck", &usb_fck, CK_242X),
1844 CLK("musb_hdrc", "fck", &osc_ck, CK_242X),
1844}; 1845};
1845 1846
1846/* 1847/*
diff --git a/arch/arm/mach-omap2/io.c b/arch/arm/mach-omap2/io.c
index 402e8f0d0f21..87f676acf61d 100644
--- a/arch/arm/mach-omap2/io.c
+++ b/arch/arm/mach-omap2/io.c
@@ -237,7 +237,7 @@ static void __init _omap2_map_common_io(void)
237} 237}
238 238
239#ifdef CONFIG_ARCH_OMAP2420 239#ifdef CONFIG_ARCH_OMAP2420
240void __init omap242x_map_common_io() 240void __init omap242x_map_common_io(void)
241{ 241{
242 iotable_init(omap24xx_io_desc, ARRAY_SIZE(omap24xx_io_desc)); 242 iotable_init(omap24xx_io_desc, ARRAY_SIZE(omap24xx_io_desc));
243 iotable_init(omap242x_io_desc, ARRAY_SIZE(omap242x_io_desc)); 243 iotable_init(omap242x_io_desc, ARRAY_SIZE(omap242x_io_desc));
@@ -246,7 +246,7 @@ void __init omap242x_map_common_io()
246#endif 246#endif
247 247
248#ifdef CONFIG_ARCH_OMAP2430 248#ifdef CONFIG_ARCH_OMAP2430
249void __init omap243x_map_common_io() 249void __init omap243x_map_common_io(void)
250{ 250{
251 iotable_init(omap24xx_io_desc, ARRAY_SIZE(omap24xx_io_desc)); 251 iotable_init(omap24xx_io_desc, ARRAY_SIZE(omap24xx_io_desc));
252 iotable_init(omap243x_io_desc, ARRAY_SIZE(omap243x_io_desc)); 252 iotable_init(omap243x_io_desc, ARRAY_SIZE(omap243x_io_desc));
@@ -255,7 +255,7 @@ void __init omap243x_map_common_io()
255#endif 255#endif
256 256
257#ifdef CONFIG_ARCH_OMAP3 257#ifdef CONFIG_ARCH_OMAP3
258void __init omap34xx_map_common_io() 258void __init omap34xx_map_common_io(void)
259{ 259{
260 iotable_init(omap34xx_io_desc, ARRAY_SIZE(omap34xx_io_desc)); 260 iotable_init(omap34xx_io_desc, ARRAY_SIZE(omap34xx_io_desc));
261 _omap2_map_common_io(); 261 _omap2_map_common_io();
@@ -263,7 +263,7 @@ void __init omap34xx_map_common_io()
263#endif 263#endif
264 264
265#ifdef CONFIG_ARCH_OMAP4 265#ifdef CONFIG_ARCH_OMAP4
266void __init omap44xx_map_common_io() 266void __init omap44xx_map_common_io(void)
267{ 267{
268 iotable_init(omap44xx_io_desc, ARRAY_SIZE(omap44xx_io_desc)); 268 iotable_init(omap44xx_io_desc, ARRAY_SIZE(omap44xx_io_desc));
269 _omap2_map_common_io(); 269 _omap2_map_common_io();
@@ -309,7 +309,6 @@ void __init omap2_init_common_hw(struct omap_sdrc_params *sdrc_cs0,
309{ 309{
310 pwrdm_init(powerdomains_omap); 310 pwrdm_init(powerdomains_omap);
311 clkdm_init(clockdomains_omap, clkdm_autodeps); 311 clkdm_init(clockdomains_omap, clkdm_autodeps);
312#ifndef CONFIG_ARCH_OMAP4 /* FIXME: Remove this once the clkdev is ready */
313 if (cpu_is_omap242x()) 312 if (cpu_is_omap242x())
314 omap2420_hwmod_init(); 313 omap2420_hwmod_init();
315 else if (cpu_is_omap243x()) 314 else if (cpu_is_omap243x())
@@ -319,7 +318,6 @@ void __init omap2_init_common_hw(struct omap_sdrc_params *sdrc_cs0,
319 omap2_mux_init(); 318 omap2_mux_init();
320 /* The OPP tables have to be registered before a clk init */ 319 /* The OPP tables have to be registered before a clk init */
321 omap_pm_if_early_init(mpu_opps, dsp_opps, l3_opps); 320 omap_pm_if_early_init(mpu_opps, dsp_opps, l3_opps);
322#endif
323 321
324 if (cpu_is_omap2420()) 322 if (cpu_is_omap2420())
325 omap2420_clk_init(); 323 omap2420_clk_init();
@@ -333,11 +331,12 @@ void __init omap2_init_common_hw(struct omap_sdrc_params *sdrc_cs0,
333 pr_err("Could not init clock framework - unknown CPU\n"); 331 pr_err("Could not init clock framework - unknown CPU\n");
334 332
335 omap_serial_early_init(); 333 omap_serial_early_init();
336#ifndef CONFIG_ARCH_OMAP4 334 if (cpu_is_omap24xx() || cpu_is_omap34xx()) /* FIXME: OMAP4 */
337 omap_hwmod_late_init(); 335 omap_hwmod_late_init();
338 omap_pm_if_init(); 336 omap_pm_if_init();
339 omap2_sdrc_init(sdrc_cs0, sdrc_cs1); 337 if (cpu_is_omap24xx() || cpu_is_omap34xx()) {
340 _omap2_init_reprogram_sdrc(); 338 omap2_sdrc_init(sdrc_cs0, sdrc_cs1);
341#endif 339 _omap2_init_reprogram_sdrc();
340 }
342 gpmc_init(); 341 gpmc_init();
343} 342}
diff --git a/arch/arm/mach-omap2/mailbox.c b/arch/arm/mach-omap2/mailbox.c
index 52a981cb8fdd..318f3638653c 100644
--- a/arch/arm/mach-omap2/mailbox.c
+++ b/arch/arm/mach-omap2/mailbox.c
@@ -430,19 +430,19 @@ static int __devinit omap2_mbox_probe(struct platform_device *pdev)
430 if (unlikely(!res)) { 430 if (unlikely(!res)) {
431 dev_err(&pdev->dev, "invalid irq resource\n"); 431 dev_err(&pdev->dev, "invalid irq resource\n");
432 ret = -ENODEV; 432 ret = -ENODEV;
433 goto err_iva1; 433 omap_mbox_unregister(&mbox_dsp_info);
434 goto err_dsp;
434 } 435 }
435 mbox_iva_info.irq = res->start; 436 mbox_iva_info.irq = res->start;
436 ret = omap_mbox_register(&pdev->dev, &mbox_iva_info); 437 ret = omap_mbox_register(&pdev->dev, &mbox_iva_info);
437 if (ret) 438 if (ret) {
438 goto err_iva1; 439 omap_mbox_unregister(&mbox_dsp_info);
440 goto err_dsp;
441 }
439 } 442 }
440#endif 443#endif
441 return 0; 444 return 0;
442 445
443err_iva1:
444 omap_mbox_unregister(&mbox_dsp_info);
445
446err_dsp: 446err_dsp:
447 iounmap(mbox_base); 447 iounmap(mbox_base);
448 return ret; 448 return ret;
diff --git a/arch/arm/mach-omap2/omap44xx-smc.S b/arch/arm/mach-omap2/omap44xx-smc.S
new file mode 100644
index 000000000000..89bb2b141473
--- /dev/null
+++ b/arch/arm/mach-omap2/omap44xx-smc.S
@@ -0,0 +1,32 @@
1/*
2 * OMAP44xx secure APIs file.
3 *
4 * Copyright (C) 2010 Texas Instruments, Inc.
5 * Written by Santosh Shilimkar <santosh.shilimkar@ti.com>
6 *
7 *
8 * This program is free software,you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
11 */
12
13#include <linux/linkage.h>
14
15/*
16 * This is common routine to manage secure monitor API
17 * used to modify the PL310 secure registers.
18 * 'r0' contains the value to be modified and 'r12' contains
19 * the monitor API number. It uses few CPU registers
20 * internally and hence they need be backed up including
21 * link register "lr".
22 * Function signature : void omap_smc1(u32 fn, u32 arg)
23 */
24
25ENTRY(omap_smc1)
26 stmfd sp!, {r2-r12, lr}
27 mov r12, r0
28 mov r0, r1
29 dsb
30 smc
31 ldmfd sp!, {r2-r12, pc}
32END(omap_smc1)
diff --git a/arch/arm/mach-omap2/prcm.c b/arch/arm/mach-omap2/prcm.c
index 81872aacb801..9537f6f2352d 100644
--- a/arch/arm/mach-omap2/prcm.c
+++ b/arch/arm/mach-omap2/prcm.c
@@ -133,7 +133,7 @@ u32 omap_prcm_get_reset_sources(void)
133EXPORT_SYMBOL(omap_prcm_get_reset_sources); 133EXPORT_SYMBOL(omap_prcm_get_reset_sources);
134 134
135/* Resets clock rates and reboots the system. Only called from system.h */ 135/* Resets clock rates and reboots the system. Only called from system.h */
136void omap_prcm_arch_reset(char mode) 136void omap_prcm_arch_reset(char mode, const char *cmd)
137{ 137{
138 s16 prcm_offs = 0; 138 s16 prcm_offs = 0;
139 139
@@ -145,7 +145,7 @@ void omap_prcm_arch_reset(char mode)
145 u32 l; 145 u32 l;
146 146
147 prcm_offs = OMAP3430_GR_MOD; 147 prcm_offs = OMAP3430_GR_MOD;
148 l = ('B' << 24) | ('M' << 16) | mode; 148 l = ('B' << 24) | ('M' << 16) | (cmd ? (u8)*cmd : 0);
149 /* Reserve the first word in scratchpad for communicating 149 /* Reserve the first word in scratchpad for communicating
150 * with the boot ROM. A pointer to a data structure 150 * with the boot ROM. A pointer to a data structure
151 * describing the boot process can be stored there, 151 * describing the boot process can be stored there,
diff --git a/arch/arm/mach-omap2/serial.c b/arch/arm/mach-omap2/serial.c
index b79bc8926cc9..da77930480e9 100644
--- a/arch/arm/mach-omap2/serial.c
+++ b/arch/arm/mach-omap2/serial.c
@@ -644,16 +644,21 @@ static void serial_out_override(struct uart_port *up, int offset, int value)
644} 644}
645void __init omap_serial_early_init(void) 645void __init omap_serial_early_init(void)
646{ 646{
647 int i; 647 int i, nr_ports;
648 char name[16]; 648 char name[16];
649 649
650 if (!(cpu_is_omap3630() || cpu_is_omap4430()))
651 nr_ports = 3;
652 else
653 nr_ports = ARRAY_SIZE(omap_uart);
654
650 /* 655 /*
651 * Make sure the serial ports are muxed on at this point. 656 * Make sure the serial ports are muxed on at this point.
652 * You have to mux them off in device drivers later on 657 * You have to mux them off in device drivers later on
653 * if not needed. 658 * if not needed.
654 */ 659 */
655 660
656 for (i = 0; i < ARRAY_SIZE(omap_uart); i++) { 661 for (i = 0; i < nr_ports; i++) {
657 struct omap_uart_state *uart = &omap_uart[i]; 662 struct omap_uart_state *uart = &omap_uart[i];
658 struct platform_device *pdev = &uart->pdev; 663 struct platform_device *pdev = &uart->pdev;
659 struct device *dev = &pdev->dev; 664 struct device *dev = &pdev->dev;
@@ -669,17 +674,17 @@ void __init omap_serial_early_init(void)
669 continue; 674 continue;
670 } 675 }
671 676
672 sprintf(name, "uart%d_ick", i+1); 677 sprintf(name, "uart%d_ick", i + 1);
673 uart->ick = clk_get(NULL, name); 678 uart->ick = clk_get(NULL, name);
674 if (IS_ERR(uart->ick)) { 679 if (IS_ERR(uart->ick)) {
675 printk(KERN_ERR "Could not get uart%d_ick\n", i+1); 680 printk(KERN_ERR "Could not get uart%d_ick\n", i + 1);
676 uart->ick = NULL; 681 uart->ick = NULL;
677 } 682 }
678 683
679 sprintf(name, "uart%d_fck", i+1); 684 sprintf(name, "uart%d_fck", i+1);
680 uart->fck = clk_get(NULL, name); 685 uart->fck = clk_get(NULL, name);
681 if (IS_ERR(uart->fck)) { 686 if (IS_ERR(uart->fck)) {
682 printk(KERN_ERR "Could not get uart%d_fck\n", i+1); 687 printk(KERN_ERR "Could not get uart%d_fck\n", i + 1);
683 uart->fck = NULL; 688 uart->fck = NULL;
684 } 689 }
685 690
diff --git a/arch/arm/mach-omap2/usb-ehci.c b/arch/arm/mach-omap2/usb-ehci.c
index f1df873d59db..ee9f548d5d81 100644
--- a/arch/arm/mach-omap2/usb-ehci.c
+++ b/arch/arm/mach-omap2/usb-ehci.c
@@ -70,7 +70,7 @@ static struct platform_device ehci_device = {
70/* 70/*
71 * setup_ehci_io_mux - initialize IO pad mux for USBHOST 71 * setup_ehci_io_mux - initialize IO pad mux for USBHOST
72 */ 72 */
73static void setup_ehci_io_mux(enum ehci_hcd_omap_mode *port_mode) 73static void setup_ehci_io_mux(const enum ehci_hcd_omap_mode *port_mode)
74{ 74{
75 switch (port_mode[0]) { 75 switch (port_mode[0]) {
76 case EHCI_HCD_OMAP_MODE_PHY: 76 case EHCI_HCD_OMAP_MODE_PHY:
@@ -213,7 +213,7 @@ static void setup_ehci_io_mux(enum ehci_hcd_omap_mode *port_mode)
213 return; 213 return;
214} 214}
215 215
216void __init usb_ehci_init(struct ehci_hcd_omap_platform_data *pdata) 216void __init usb_ehci_init(const struct ehci_hcd_omap_platform_data *pdata)
217{ 217{
218 platform_device_add_data(&ehci_device, pdata, sizeof(*pdata)); 218 platform_device_add_data(&ehci_device, pdata, sizeof(*pdata));
219 219
@@ -229,7 +229,7 @@ void __init usb_ehci_init(struct ehci_hcd_omap_platform_data *pdata)
229 229
230#else 230#else
231 231
232void __init usb_ehci_init(struct ehci_hcd_omap_platform_data *pdata) 232void __init usb_ehci_init(const struct ehci_hcd_omap_platform_data *pdata)
233 233
234{ 234{
235} 235}
diff --git a/arch/arm/mach-rpc/include/mach/uncompress.h b/arch/arm/mach-rpc/include/mach/uncompress.h
index d5862368c4f2..8c9e2c7161c6 100644
--- a/arch/arm/mach-rpc/include/mach/uncompress.h
+++ b/arch/arm/mach-rpc/include/mach/uncompress.h
@@ -109,8 +109,6 @@ static inline void flush(void)
109{ 109{
110} 110}
111 111
112static void error(char *x);
113
114/* 112/*
115 * Setup for decompression 113 * Setup for decompression
116 */ 114 */
diff --git a/arch/arm/plat-s3c24xx/s3c2440-cpufreq.c b/arch/arm/mach-s3c2440/s3c2440-cpufreq.c
index 976002fb1b8f..976002fb1b8f 100644
--- a/arch/arm/plat-s3c24xx/s3c2440-cpufreq.c
+++ b/arch/arm/mach-s3c2440/s3c2440-cpufreq.c
diff --git a/arch/arm/mach-s3c64xx/include/mach/debug-macro.S b/arch/arm/mach-s3c64xx/include/mach/debug-macro.S
index b18ac5266dfc..f9ab5d26052a 100644
--- a/arch/arm/mach-s3c64xx/include/mach/debug-macro.S
+++ b/arch/arm/mach-s3c64xx/include/mach/debug-macro.S
@@ -21,7 +21,7 @@
21 * aligned and add in the offset when we load the value here. 21 * aligned and add in the offset when we load the value here.
22 */ 22 */
23 23
24 .macro addruart, rx 24 .macro addruart, rx, rtmp
25 mrc p15, 0, \rx, c1, c0 25 mrc p15, 0, \rx, c1, c0
26 tst \rx, #1 26 tst \rx, #1
27 ldreq \rx, = S3C_PA_UART 27 ldreq \rx, = S3C_PA_UART
diff --git a/arch/arm/mach-s5p6440/include/mach/debug-macro.S b/arch/arm/mach-s5p6440/include/mach/debug-macro.S
index 48cdb0da026c..1347d7f99079 100644
--- a/arch/arm/mach-s5p6440/include/mach/debug-macro.S
+++ b/arch/arm/mach-s5p6440/include/mach/debug-macro.S
@@ -19,7 +19,7 @@
19 * aligned and add in the offset when we load the value here. 19 * aligned and add in the offset when we load the value here.
20 */ 20 */
21 21
22 .macro addruart, rx 22 .macro addruart, rx, rtmp
23 mrc p15, 0, \rx, c1, c0 23 mrc p15, 0, \rx, c1, c0
24 tst \rx, #1 24 tst \rx, #1
25 ldreq \rx, = S3C_PA_UART 25 ldreq \rx, = S3C_PA_UART
diff --git a/arch/arm/mach-s5p6442/include/mach/debug-macro.S b/arch/arm/mach-s5p6442/include/mach/debug-macro.S
index 1aae691e58ef..bb6536147ffb 100644
--- a/arch/arm/mach-s5p6442/include/mach/debug-macro.S
+++ b/arch/arm/mach-s5p6442/include/mach/debug-macro.S
@@ -15,7 +15,7 @@
15#include <mach/map.h> 15#include <mach/map.h>
16#include <plat/regs-serial.h> 16#include <plat/regs-serial.h>
17 17
18 .macro addruart, rx 18 .macro addruart, rx, rtmp
19 mrc p15, 0, \rx, c1, c0 19 mrc p15, 0, \rx, c1, c0
20 tst \rx, #1 20 tst \rx, #1
21 ldreq \rx, = S3C_PA_UART 21 ldreq \rx, = S3C_PA_UART
diff --git a/arch/arm/mach-shmobile/board-ap4evb.c b/arch/arm/mach-shmobile/board-ap4evb.c
index a0463d926447..1c2ec96ce261 100644
--- a/arch/arm/mach-shmobile/board-ap4evb.c
+++ b/arch/arm/mach-shmobile/board-ap4evb.c
@@ -206,10 +206,32 @@ static struct platform_device keysc_device = {
206 }, 206 },
207}; 207};
208 208
209/* SDHI0 */
210static struct resource sdhi0_resources[] = {
211 [0] = {
212 .name = "SDHI0",
213 .start = 0xe6850000,
214 .end = 0xe68501ff,
215 .flags = IORESOURCE_MEM,
216 },
217 [1] = {
218 .start = 96,
219 .flags = IORESOURCE_IRQ,
220 },
221};
222
223static struct platform_device sdhi0_device = {
224 .name = "sh_mobile_sdhi",
225 .num_resources = ARRAY_SIZE(sdhi0_resources),
226 .resource = sdhi0_resources,
227 .id = 0,
228};
229
209static struct platform_device *ap4evb_devices[] __initdata = { 230static struct platform_device *ap4evb_devices[] __initdata = {
210 &nor_flash_device, 231 &nor_flash_device,
211 &smc911x_device, 232 &smc911x_device,
212 &keysc_device, 233 &keysc_device,
234 &sdhi0_device,
213}; 235};
214 236
215static struct map_desc ap4evb_io_desc[] __initdata = { 237static struct map_desc ap4evb_io_desc[] __initdata = {
@@ -286,6 +308,16 @@ static void __init ap4evb_init(void)
286 gpio_request(GPIO_FN_KEYIN3_133, NULL); 308 gpio_request(GPIO_FN_KEYIN3_133, NULL);
287 gpio_request(GPIO_FN_KEYIN4, NULL); 309 gpio_request(GPIO_FN_KEYIN4, NULL);
288 310
311 /* SDHI0 */
312 gpio_request(GPIO_FN_SDHICD0, NULL);
313 gpio_request(GPIO_FN_SDHIWP0, NULL);
314 gpio_request(GPIO_FN_SDHICMD0, NULL);
315 gpio_request(GPIO_FN_SDHICLK0, NULL);
316 gpio_request(GPIO_FN_SDHID0_3, NULL);
317 gpio_request(GPIO_FN_SDHID0_2, NULL);
318 gpio_request(GPIO_FN_SDHID0_1, NULL);
319 gpio_request(GPIO_FN_SDHID0_0, NULL);
320
289 sh7372_add_standard_devices(); 321 sh7372_add_standard_devices();
290 322
291 platform_add_devices(ap4evb_devices, ARRAY_SIZE(ap4evb_devices)); 323 platform_add_devices(ap4evb_devices, ARRAY_SIZE(ap4evb_devices));
diff --git a/arch/arm/mach-shmobile/board-g3evm.c b/arch/arm/mach-shmobile/board-g3evm.c
index f36c9a94d326..9247503296c4 100644
--- a/arch/arm/mach-shmobile/board-g3evm.c
+++ b/arch/arm/mach-shmobile/board-g3evm.c
@@ -26,9 +26,12 @@
26#include <linux/mtd/mtd.h> 26#include <linux/mtd/mtd.h>
27#include <linux/mtd/partitions.h> 27#include <linux/mtd/partitions.h>
28#include <linux/mtd/physmap.h> 28#include <linux/mtd/physmap.h>
29#include <linux/mtd/sh_flctl.h>
29#include <linux/usb/r8a66597.h> 30#include <linux/usb/r8a66597.h>
30#include <linux/io.h> 31#include <linux/io.h>
31#include <linux/gpio.h> 32#include <linux/gpio.h>
33#include <linux/input.h>
34#include <linux/input/sh_keysc.h>
32#include <mach/sh7367.h> 35#include <mach/sh7367.h>
33#include <mach/common.h> 36#include <mach/common.h>
34#include <asm/mach-types.h> 37#include <asm/mach-types.h>
@@ -127,9 +130,90 @@ static struct platform_device usb_host_device = {
127 .resource = usb_host_resources, 130 .resource = usb_host_resources,
128}; 131};
129 132
133/* KEYSC */
134static struct sh_keysc_info keysc_info = {
135 .mode = SH_KEYSC_MODE_5,
136 .scan_timing = 3,
137 .delay = 100,
138 .keycodes = {
139 KEY_A, KEY_B, KEY_C, KEY_D, KEY_E, KEY_F, KEY_G,
140 KEY_H, KEY_I, KEY_J, KEY_K, KEY_L, KEY_M, KEY_N,
141 KEY_O, KEY_P, KEY_Q, KEY_R, KEY_S, KEY_T, KEY_U,
142 KEY_V, KEY_W, KEY_X, KEY_Y, KEY_Z, KEY_HOME, KEY_SLEEP,
143 KEY_WAKEUP, KEY_COFFEE, KEY_0, KEY_1, KEY_2, KEY_3, KEY_4,
144 KEY_5, KEY_6, KEY_7, KEY_8, KEY_9, KEY_STOP, KEY_COMPUTER,
145 },
146};
147
148static struct resource keysc_resources[] = {
149 [0] = {
150 .name = "KEYSC",
151 .start = 0xe61b0000,
152 .end = 0xe61b000f,
153 .flags = IORESOURCE_MEM,
154 },
155 [1] = {
156 .start = 79,
157 .flags = IORESOURCE_IRQ,
158 },
159};
160
161static struct platform_device keysc_device = {
162 .name = "sh_keysc",
163 .num_resources = ARRAY_SIZE(keysc_resources),
164 .resource = keysc_resources,
165 .dev = {
166 .platform_data = &keysc_info,
167 },
168};
169
170static struct mtd_partition nand_partition_info[] = {
171 {
172 .name = "system",
173 .offset = 0,
174 .size = 64 * 1024 * 1024,
175 },
176 {
177 .name = "userdata",
178 .offset = MTDPART_OFS_APPEND,
179 .size = 128 * 1024 * 1024,
180 },
181 {
182 .name = "cache",
183 .offset = MTDPART_OFS_APPEND,
184 .size = 64 * 1024 * 1024,
185 },
186};
187
188static struct resource nand_flash_resources[] = {
189 [0] = {
190 .start = 0xe6a30000,
191 .end = 0xe6a3009b,
192 .flags = IORESOURCE_MEM,
193 }
194};
195
196static struct sh_flctl_platform_data nand_flash_data = {
197 .parts = nand_partition_info,
198 .nr_parts = ARRAY_SIZE(nand_partition_info),
199 .flcmncr_val = QTSEL_E | FCKSEL_E | TYPESEL_SET | NANWF_E
200 | SHBUSSEL | SEL_16BIT,
201};
202
203static struct platform_device nand_flash_device = {
204 .name = "sh_flctl",
205 .resource = nand_flash_resources,
206 .num_resources = ARRAY_SIZE(nand_flash_resources),
207 .dev = {
208 .platform_data = &nand_flash_data,
209 },
210};
211
130static struct platform_device *g3evm_devices[] __initdata = { 212static struct platform_device *g3evm_devices[] __initdata = {
131 &nor_flash_device, 213 &nor_flash_device,
132 &usb_host_device, 214 &usb_host_device,
215 &keysc_device,
216 &nand_flash_device,
133}; 217};
134 218
135static struct map_desc g3evm_io_desc[] __initdata = { 219static struct map_desc g3evm_io_desc[] __initdata = {
@@ -196,6 +280,44 @@ static void __init g3evm_init(void)
196 __raw_writew(0x6010, 0xe60581c6); /* CGPOSR */ 280 __raw_writew(0x6010, 0xe60581c6); /* CGPOSR */
197 __raw_writew(0x8a0a, 0xe605810c); /* USBCR2 */ 281 __raw_writew(0x8a0a, 0xe605810c); /* USBCR2 */
198 282
283 /* KEYSC @ CN7 */
284 gpio_request(GPIO_FN_PORT42_KEYOUT0, NULL);
285 gpio_request(GPIO_FN_PORT43_KEYOUT1, NULL);
286 gpio_request(GPIO_FN_PORT44_KEYOUT2, NULL);
287 gpio_request(GPIO_FN_PORT45_KEYOUT3, NULL);
288 gpio_request(GPIO_FN_PORT46_KEYOUT4, NULL);
289 gpio_request(GPIO_FN_PORT47_KEYOUT5, NULL);
290 gpio_request(GPIO_FN_PORT48_KEYIN0_PU, NULL);
291 gpio_request(GPIO_FN_PORT49_KEYIN1_PU, NULL);
292 gpio_request(GPIO_FN_PORT50_KEYIN2_PU, NULL);
293 gpio_request(GPIO_FN_PORT55_KEYIN3_PU, NULL);
294 gpio_request(GPIO_FN_PORT56_KEYIN4_PU, NULL);
295 gpio_request(GPIO_FN_PORT57_KEYIN5_PU, NULL);
296 gpio_request(GPIO_FN_PORT58_KEYIN6_PU, NULL);
297
298 /* FLCTL */
299 gpio_request(GPIO_FN_FCE0, NULL);
300 gpio_request(GPIO_FN_D0_ED0_NAF0, NULL);
301 gpio_request(GPIO_FN_D1_ED1_NAF1, NULL);
302 gpio_request(GPIO_FN_D2_ED2_NAF2, NULL);
303 gpio_request(GPIO_FN_D3_ED3_NAF3, NULL);
304 gpio_request(GPIO_FN_D4_ED4_NAF4, NULL);
305 gpio_request(GPIO_FN_D5_ED5_NAF5, NULL);
306 gpio_request(GPIO_FN_D6_ED6_NAF6, NULL);
307 gpio_request(GPIO_FN_D7_ED7_NAF7, NULL);
308 gpio_request(GPIO_FN_D8_ED8_NAF8, NULL);
309 gpio_request(GPIO_FN_D9_ED9_NAF9, NULL);
310 gpio_request(GPIO_FN_D10_ED10_NAF10, NULL);
311 gpio_request(GPIO_FN_D11_ED11_NAF11, NULL);
312 gpio_request(GPIO_FN_D12_ED12_NAF12, NULL);
313 gpio_request(GPIO_FN_D13_ED13_NAF13, NULL);
314 gpio_request(GPIO_FN_D14_ED14_NAF14, NULL);
315 gpio_request(GPIO_FN_D15_ED15_NAF15, NULL);
316 gpio_request(GPIO_FN_WE0_XWR0_FWE, NULL);
317 gpio_request(GPIO_FN_FRB, NULL);
318 /* FOE, FCDE, FSC on dedicated pins */
319 __raw_writel(__raw_readl(0xe6158048) & ~(1 << 15), 0xe6158048);
320
199 sh7367_add_standard_devices(); 321 sh7367_add_standard_devices();
200 322
201 platform_add_devices(g3evm_devices, ARRAY_SIZE(g3evm_devices)); 323 platform_add_devices(g3evm_devices, ARRAY_SIZE(g3evm_devices));
diff --git a/arch/arm/mach-shmobile/board-g4evm.c b/arch/arm/mach-shmobile/board-g4evm.c
index 5acd623f93e7..10673a90be52 100644
--- a/arch/arm/mach-shmobile/board-g4evm.c
+++ b/arch/arm/mach-shmobile/board-g4evm.c
@@ -28,6 +28,8 @@
28#include <linux/mtd/physmap.h> 28#include <linux/mtd/physmap.h>
29#include <linux/usb/r8a66597.h> 29#include <linux/usb/r8a66597.h>
30#include <linux/io.h> 30#include <linux/io.h>
31#include <linux/input.h>
32#include <linux/input/sh_keysc.h>
31#include <linux/gpio.h> 33#include <linux/gpio.h>
32#include <mach/sh7377.h> 34#include <mach/sh7377.h>
33#include <mach/common.h> 35#include <mach/common.h>
@@ -128,9 +130,49 @@ static struct platform_device usb_host_device = {
128 .resource = usb_host_resources, 130 .resource = usb_host_resources,
129}; 131};
130 132
133/* KEYSC */
134static struct sh_keysc_info keysc_info = {
135 .mode = SH_KEYSC_MODE_5,
136 .scan_timing = 3,
137 .delay = 100,
138 .keycodes = {
139 KEY_A, KEY_B, KEY_C, KEY_D, KEY_E, KEY_F,
140 KEY_G, KEY_H, KEY_I, KEY_J, KEY_K, KEY_L,
141 KEY_M, KEY_N, KEY_U, KEY_P, KEY_Q, KEY_R,
142 KEY_S, KEY_T, KEY_U, KEY_V, KEY_W, KEY_X,
143 KEY_Y, KEY_Z, KEY_HOME, KEY_SLEEP, KEY_WAKEUP, KEY_COFFEE,
144 KEY_0, KEY_1, KEY_2, KEY_3, KEY_4, KEY_5,
145 KEY_6, KEY_7, KEY_8, KEY_9, KEY_STOP, KEY_COMPUTER,
146 },
147};
148
149static struct resource keysc_resources[] = {
150 [0] = {
151 .name = "KEYSC",
152 .start = 0xe61b0000,
153 .end = 0xe61b000f,
154 .flags = IORESOURCE_MEM,
155 },
156 [1] = {
157 .start = 79,
158 .flags = IORESOURCE_IRQ,
159 },
160};
161
162static struct platform_device keysc_device = {
163 .name = "sh_keysc",
164 .id = 0, /* keysc0 clock */
165 .num_resources = ARRAY_SIZE(keysc_resources),
166 .resource = keysc_resources,
167 .dev = {
168 .platform_data = &keysc_info,
169 },
170};
171
131static struct platform_device *g4evm_devices[] __initdata = { 172static struct platform_device *g4evm_devices[] __initdata = {
132 &nor_flash_device, 173 &nor_flash_device,
133 &usb_host_device, 174 &usb_host_device,
175 &keysc_device,
134}; 176};
135 177
136static struct map_desc g4evm_io_desc[] __initdata = { 178static struct map_desc g4evm_io_desc[] __initdata = {
@@ -196,6 +238,21 @@ static void __init g4evm_init(void)
196 __raw_writew(0x6010, 0xe60581c6); /* CGPOSR */ 238 __raw_writew(0x6010, 0xe60581c6); /* CGPOSR */
197 __raw_writew(0x8a0a, 0xe605810c); /* USBCR2 */ 239 __raw_writew(0x8a0a, 0xe605810c); /* USBCR2 */
198 240
241 /* KEYSC @ CN31 */
242 gpio_request(GPIO_FN_PORT60_KEYOUT5, NULL);
243 gpio_request(GPIO_FN_PORT61_KEYOUT4, NULL);
244 gpio_request(GPIO_FN_PORT62_KEYOUT3, NULL);
245 gpio_request(GPIO_FN_PORT63_KEYOUT2, NULL);
246 gpio_request(GPIO_FN_PORT64_KEYOUT1, NULL);
247 gpio_request(GPIO_FN_PORT65_KEYOUT0, NULL);
248 gpio_request(GPIO_FN_PORT66_KEYIN0_PU, NULL);
249 gpio_request(GPIO_FN_PORT67_KEYIN1_PU, NULL);
250 gpio_request(GPIO_FN_PORT68_KEYIN2_PU, NULL);
251 gpio_request(GPIO_FN_PORT69_KEYIN3_PU, NULL);
252 gpio_request(GPIO_FN_PORT70_KEYIN4_PU, NULL);
253 gpio_request(GPIO_FN_PORT71_KEYIN5_PU, NULL);
254 gpio_request(GPIO_FN_PORT72_KEYIN6_PU, NULL);
255
199 sh7377_add_standard_devices(); 256 sh7377_add_standard_devices();
200 257
201 platform_add_devices(g4evm_devices, ARRAY_SIZE(g4evm_devices)); 258 platform_add_devices(g4evm_devices, ARRAY_SIZE(g4evm_devices));
diff --git a/arch/arm/mach-shmobile/clock-sh7367.c b/arch/arm/mach-shmobile/clock-sh7367.c
index 58bd54e1113a..bb940c6e4e6c 100644
--- a/arch/arm/mach-shmobile/clock-sh7367.c
+++ b/arch/arm/mach-shmobile/clock-sh7367.c
@@ -75,6 +75,11 @@ static struct clk usb0_clk = {
75 .name = "usb0", 75 .name = "usb0",
76}; 76};
77 77
78/* a static keysc0 clk for now - enough to get sh_keysc working */
79static struct clk keysc0_clk = {
80 .name = "keysc0",
81};
82
78static struct clk_lookup lookups[] = { 83static struct clk_lookup lookups[] = {
79 { 84 {
80 .clk = &peripheral_clk, 85 .clk = &peripheral_clk,
@@ -82,6 +87,8 @@ static struct clk_lookup lookups[] = {
82 .clk = &r_clk, 87 .clk = &r_clk,
83 }, { 88 }, {
84 .clk = &usb0_clk, 89 .clk = &usb0_clk,
90 }, {
91 .clk = &keysc0_clk,
85 } 92 }
86}; 93};
87 94
diff --git a/arch/arm/mach-shmobile/intc-sh7367.c b/arch/arm/mach-shmobile/intc-sh7367.c
index 6a547b47aabb..5ff70cadfc32 100644
--- a/arch/arm/mach-shmobile/intc-sh7367.c
+++ b/arch/arm/mach-shmobile/intc-sh7367.c
@@ -27,6 +27,8 @@
27 27
28enum { 28enum {
29 UNUSED_INTCA = 0, 29 UNUSED_INTCA = 0,
30 ENABLED,
31 DISABLED,
30 32
31 /* interrupt sources INTCA */ 33 /* interrupt sources INTCA */
32 IRQ0A, IRQ1A, IRQ2A, IRQ3A, IRQ4A, IRQ5A, IRQ6A, IRQ7A, 34 IRQ0A, IRQ1A, IRQ2A, IRQ3A, IRQ4A, IRQ5A, IRQ6A, IRQ7A,
@@ -46,8 +48,8 @@ enum {
46 MSIOF2, MSIOF1, 48 MSIOF2, MSIOF1,
47 SCIFA4, SCIFA5, SCIFB, 49 SCIFA4, SCIFA5, SCIFB,
48 FLCTL_FLSTEI, FLCTL_FLTENDI, FLCTL_FLTREQ0I, FLCTL_FLTREQ1I, 50 FLCTL_FLSTEI, FLCTL_FLTENDI, FLCTL_FLTREQ0I, FLCTL_FLTREQ1I,
49 SDHI0_SDHI0I0, SDHI0_SDHI0I1, SDHI0_SDHI0I2, SDHI0_SDHI0I3, 51 SDHI0,
50 SDHI1_SDHI1I0, SDHI1_SDHI1I1, SDHI1_SDHI1I2, SDHI1_SDHI1I3, 52 SDHI1,
51 MSU_MSU, MSU_MSU2, 53 MSU_MSU, MSU_MSU2,
52 IREM, 54 IREM,
53 SIU, 55 SIU,
@@ -59,7 +61,7 @@ enum {
59 TTI20, 61 TTI20,
60 MISTY, 62 MISTY,
61 DDM, 63 DDM,
62 SDHI2_SDHI2I0, SDHI2_SDHI2I1, SDHI2_SDHI2I2, SDHI2_SDHI2I3, 64 SDHI2,
63 RWDT0, RWDT1, 65 RWDT0, RWDT1,
64 DMAC_1_DEI0, DMAC_1_DEI1, DMAC_1_DEI2, DMAC_1_DEI3, 66 DMAC_1_DEI0, DMAC_1_DEI1, DMAC_1_DEI2, DMAC_1_DEI3,
65 DMAC_2_DEI4, DMAC_2_DEI5, DMAC_2_DADERR, 67 DMAC_2_DEI4, DMAC_2_DEI5, DMAC_2_DADERR,
@@ -70,7 +72,7 @@ enum {
70 72
71 /* interrupt groups INTCA */ 73 /* interrupt groups INTCA */
72 DMAC_1, DMAC_2, DMAC2_1, DMAC2_2, DMAC3_1, DMAC3_2, 74 DMAC_1, DMAC_2, DMAC2_1, DMAC2_2, DMAC3_1, DMAC3_2,
73 ETM11, ARM11, USBHS, FLCTL, IIC1, SDHI0, SDHI1, SDHI2, 75 ETM11, ARM11, USBHS, FLCTL, IIC1
74}; 76};
75 77
76static struct intc_vect intca_vectors[] = { 78static struct intc_vect intca_vectors[] = {
@@ -105,10 +107,10 @@ static struct intc_vect intca_vectors[] = {
105 INTC_VECT(SCIFB, 0x0d60), 107 INTC_VECT(SCIFB, 0x0d60),
106 INTC_VECT(FLCTL_FLSTEI, 0x0d80), INTC_VECT(FLCTL_FLTENDI, 0x0da0), 108 INTC_VECT(FLCTL_FLSTEI, 0x0d80), INTC_VECT(FLCTL_FLTENDI, 0x0da0),
107 INTC_VECT(FLCTL_FLTREQ0I, 0x0dc0), INTC_VECT(FLCTL_FLTREQ1I, 0x0de0), 109 INTC_VECT(FLCTL_FLTREQ0I, 0x0dc0), INTC_VECT(FLCTL_FLTREQ1I, 0x0de0),
108 INTC_VECT(SDHI0_SDHI0I0, 0x0e00), INTC_VECT(SDHI0_SDHI0I1, 0x0e20), 110 INTC_VECT(SDHI0, 0x0e00), INTC_VECT(SDHI0, 0x0e20),
109 INTC_VECT(SDHI0_SDHI0I2, 0x0e40), INTC_VECT(SDHI0_SDHI0I3, 0x0e60), 111 INTC_VECT(SDHI0, 0x0e40), INTC_VECT(SDHI0, 0x0e60),
110 INTC_VECT(SDHI1_SDHI1I0, 0x0e80), INTC_VECT(SDHI1_SDHI1I1, 0x0ea0), 112 INTC_VECT(SDHI1, 0x0e80), INTC_VECT(SDHI1, 0x0ea0),
111 INTC_VECT(SDHI1_SDHI1I2, 0x0ec0), INTC_VECT(SDHI1_SDHI1I3, 0x0ee0), 113 INTC_VECT(SDHI1, 0x0ec0), INTC_VECT(SDHI1, 0x0ee0),
112 INTC_VECT(MSU_MSU, 0x0f20), INTC_VECT(MSU_MSU2, 0x0f40), 114 INTC_VECT(MSU_MSU, 0x0f20), INTC_VECT(MSU_MSU2, 0x0f40),
113 INTC_VECT(IREM, 0x0f60), 115 INTC_VECT(IREM, 0x0f60),
114 INTC_VECT(SIU, 0x0fa0), 116 INTC_VECT(SIU, 0x0fa0),
@@ -122,8 +124,8 @@ static struct intc_vect intca_vectors[] = {
122 INTC_VECT(TTI20, 0x1100), 124 INTC_VECT(TTI20, 0x1100),
123 INTC_VECT(MISTY, 0x1120), 125 INTC_VECT(MISTY, 0x1120),
124 INTC_VECT(DDM, 0x1140), 126 INTC_VECT(DDM, 0x1140),
125 INTC_VECT(SDHI2_SDHI2I0, 0x1200), INTC_VECT(SDHI2_SDHI2I1, 0x1220), 127 INTC_VECT(SDHI2, 0x1200), INTC_VECT(SDHI2, 0x1220),
126 INTC_VECT(SDHI2_SDHI2I2, 0x1240), INTC_VECT(SDHI2_SDHI2I3, 0x1260), 128 INTC_VECT(SDHI2, 0x1240), INTC_VECT(SDHI2, 0x1260),
127 INTC_VECT(RWDT0, 0x1280), INTC_VECT(RWDT1, 0x12a0), 129 INTC_VECT(RWDT0, 0x1280), INTC_VECT(RWDT1, 0x12a0),
128 INTC_VECT(DMAC_1_DEI0, 0x2000), INTC_VECT(DMAC_1_DEI1, 0x2020), 130 INTC_VECT(DMAC_1_DEI0, 0x2000), INTC_VECT(DMAC_1_DEI1, 0x2020),
129 INTC_VECT(DMAC_1_DEI2, 0x2040), INTC_VECT(DMAC_1_DEI3, 0x2060), 131 INTC_VECT(DMAC_1_DEI2, 0x2040), INTC_VECT(DMAC_1_DEI3, 0x2060),
@@ -158,12 +160,6 @@ static struct intc_group intca_groups[] __initdata = {
158 INTC_GROUP(FLCTL, FLCTL_FLSTEI, FLCTL_FLTENDI, 160 INTC_GROUP(FLCTL, FLCTL_FLSTEI, FLCTL_FLTENDI,
159 FLCTL_FLTREQ0I, FLCTL_FLTREQ1I), 161 FLCTL_FLTREQ0I, FLCTL_FLTREQ1I),
160 INTC_GROUP(IIC1, IIC1_ALI1, IIC1_TACKI1, IIC1_WAITI1, IIC1_DTEI1), 162 INTC_GROUP(IIC1, IIC1_ALI1, IIC1_TACKI1, IIC1_WAITI1, IIC1_DTEI1),
161 INTC_GROUP(SDHI0, SDHI0_SDHI0I0, SDHI0_SDHI0I1,
162 SDHI0_SDHI0I2, SDHI0_SDHI0I3),
163 INTC_GROUP(SDHI1, SDHI1_SDHI1I0, SDHI1_SDHI1I1,
164 SDHI1_SDHI1I2, SDHI1_SDHI1I3),
165 INTC_GROUP(SDHI2, SDHI2_SDHI2I0, SDHI2_SDHI2I1,
166 SDHI2_SDHI2I2, SDHI2_SDHI2I3),
167}; 163};
168 164
169static struct intc_mask_reg intca_mask_registers[] = { 165static struct intc_mask_reg intca_mask_registers[] = {
@@ -193,10 +189,10 @@ static struct intc_mask_reg intca_mask_registers[] = {
193 { SCIFB, SCIFA5, SCIFA4, MSIOF1, 189 { SCIFB, SCIFA5, SCIFA4, MSIOF1,
194 0, 0, MSIOF2, 0 } }, 190 0, 0, MSIOF2, 0 } },
195 { 0xe694009c, 0xe69400dc, 8, /* IMR7A / IMCR7A */ 191 { 0xe694009c, 0xe69400dc, 8, /* IMR7A / IMCR7A */
196 { SDHI0_SDHI0I3, SDHI0_SDHI0I2, SDHI0_SDHI0I1, SDHI0_SDHI0I0, 192 { DISABLED, DISABLED, ENABLED, ENABLED,
197 FLCTL_FLTREQ1I, FLCTL_FLTREQ0I, FLCTL_FLTENDI, FLCTL_FLSTEI } }, 193 FLCTL_FLTREQ1I, FLCTL_FLTREQ0I, FLCTL_FLTENDI, FLCTL_FLSTEI } },
198 { 0xe69400a0, 0xe69400e0, 8, /* IMR8A / IMCR8A */ 194 { 0xe69400a0, 0xe69400e0, 8, /* IMR8A / IMCR8A */
199 { SDHI1_SDHI1I3, SDHI1_SDHI1I2, SDHI1_SDHI1I1, SDHI1_SDHI1I0, 195 { DISABLED, DISABLED, ENABLED, ENABLED,
200 TTI20, USBDMAC_USHDMI, SPU, SIU } }, 196 TTI20, USBDMAC_USHDMI, SPU, SIU } },
201 { 0xe69400a4, 0xe69400e4, 8, /* IMR9A / IMCR9A */ 197 { 0xe69400a4, 0xe69400e4, 8, /* IMR9A / IMCR9A */
202 { CMT1_CMT13, CMT1_CMT12, CMT1_CMT11, CMT1_CMT10, 198 { CMT1_CMT13, CMT1_CMT12, CMT1_CMT11, CMT1_CMT10,
@@ -211,7 +207,7 @@ static struct intc_mask_reg intca_mask_registers[] = {
211 { 0, 0, TPU0, TPU1, 207 { 0, 0, TPU0, TPU1,
212 TPU2, TPU3, TPU4, 0 } }, 208 TPU2, TPU3, TPU4, 0 } },
213 { 0xe69400b4, 0xe69400f4, 8, /* IMR13A / IMCR13A */ 209 { 0xe69400b4, 0xe69400f4, 8, /* IMR13A / IMCR13A */
214 { SDHI2_SDHI2I3, SDHI2_SDHI2I2, SDHI2_SDHI2I1, SDHI2_SDHI2I0, 210 { DISABLED, DISABLED, ENABLED, ENABLED,
215 MISTY, CMT3, RWDT1, RWDT0 } }, 211 MISTY, CMT3, RWDT1, RWDT0 } },
216}; 212};
217 213
@@ -258,10 +254,14 @@ static struct intc_mask_reg intca_ack_registers[] __initdata = {
258 { IRQ8A, IRQ9A, IRQ10A, IRQ11A, IRQ12A, IRQ13A, IRQ14A, IRQ15A } }, 254 { IRQ8A, IRQ9A, IRQ10A, IRQ11A, IRQ12A, IRQ13A, IRQ14A, IRQ15A } },
259}; 255};
260 256
261static DECLARE_INTC_DESC_ACK(intca_desc, "sh7367-intca", 257static struct intc_desc intca_desc __initdata = {
262 intca_vectors, intca_groups, 258 .name = "sh7367-intca",
263 intca_mask_registers, intca_prio_registers, 259 .force_enable = ENABLED,
264 intca_sense_registers, intca_ack_registers); 260 .force_disable = DISABLED,
261 .hw = INTC_HW_DESC(intca_vectors, intca_groups,
262 intca_mask_registers, intca_prio_registers,
263 intca_sense_registers, intca_ack_registers),
264};
265 265
266void __init sh7367_init_irq(void) 266void __init sh7367_init_irq(void)
267{ 267{
diff --git a/arch/arm/mach-shmobile/intc-sh7372.c b/arch/arm/mach-shmobile/intc-sh7372.c
index c57a923f97a6..3ce9d9bd5899 100644
--- a/arch/arm/mach-shmobile/intc-sh7372.c
+++ b/arch/arm/mach-shmobile/intc-sh7372.c
@@ -27,6 +27,8 @@
27 27
28enum { 28enum {
29 UNUSED_INTCA = 0, 29 UNUSED_INTCA = 0,
30 ENABLED,
31 DISABLED,
30 32
31 /* interrupt sources INTCA */ 33 /* interrupt sources INTCA */
32 IRQ0A, IRQ1A, IRQ2A, IRQ3A, IRQ4A, IRQ5A, IRQ6A, IRQ7A, 34 IRQ0A, IRQ1A, IRQ2A, IRQ3A, IRQ4A, IRQ5A, IRQ6A, IRQ7A,
@@ -47,14 +49,14 @@ enum {
47 MSIOF2, MSIOF1, 49 MSIOF2, MSIOF1,
48 SCIFA4, SCIFA5, SCIFB, 50 SCIFA4, SCIFA5, SCIFB,
49 FLCTL_FLSTEI, FLCTL_FLTENDI, FLCTL_FLTREQ0I, FLCTL_FLTREQ1I, 51 FLCTL_FLSTEI, FLCTL_FLTENDI, FLCTL_FLTREQ0I, FLCTL_FLTREQ1I,
50 SDHI0_SDHI0I0, SDHI0_SDHI0I1, SDHI0_SDHI0I2, SDHI0_SDHI0I3, 52 SDHI0,
51 SDHI1_SDHI1I0, SDHI1_SDHI1I1, SDHI1_SDHI1I2, 53 SDHI1,
52 IRREM, 54 IRREM,
53 IRDA, 55 IRDA,
54 TPU0, 56 TPU0,
55 TTI20, 57 TTI20,
56 DDM, 58 DDM,
57 SDHI2_SDHI2I0, SDHI2_SDHI2I1, SDHI2_SDHI2I2, SDHI2_SDHI2I3, 59 SDHI2,
58 RWDT0, 60 RWDT0,
59 DMAC1_1_DEI0, DMAC1_1_DEI1, DMAC1_1_DEI2, DMAC1_1_DEI3, 61 DMAC1_1_DEI0, DMAC1_1_DEI1, DMAC1_1_DEI2, DMAC1_1_DEI3,
60 DMAC1_2_DEI4, DMAC1_2_DEI5, DMAC1_2_DADERR, 62 DMAC1_2_DEI4, DMAC1_2_DEI5, DMAC1_2_DADERR,
@@ -82,7 +84,7 @@ enum {
82 84
83 /* interrupt groups INTCA */ 85 /* interrupt groups INTCA */
84 DMAC1_1, DMAC1_2, DMAC2_1, DMAC2_2, DMAC3_1, DMAC3_2, SHWYSTAT, 86 DMAC1_1, DMAC1_2, DMAC2_1, DMAC2_2, DMAC3_1, DMAC3_2, SHWYSTAT,
85 AP_ARM1, AP_ARM2, SPU2, FLCTL, IIC1, SDHI0, SDHI1, SDHI2 87 AP_ARM1, AP_ARM2, SPU2, FLCTL, IIC1
86}; 88};
87 89
88static struct intc_vect intca_vectors[] __initdata = { 90static struct intc_vect intca_vectors[] __initdata = {
@@ -123,17 +125,17 @@ static struct intc_vect intca_vectors[] __initdata = {
123 INTC_VECT(SCIFB, 0x0d60), 125 INTC_VECT(SCIFB, 0x0d60),
124 INTC_VECT(FLCTL_FLSTEI, 0x0d80), INTC_VECT(FLCTL_FLTENDI, 0x0da0), 126 INTC_VECT(FLCTL_FLSTEI, 0x0d80), INTC_VECT(FLCTL_FLTENDI, 0x0da0),
125 INTC_VECT(FLCTL_FLTREQ0I, 0x0dc0), INTC_VECT(FLCTL_FLTREQ1I, 0x0de0), 127 INTC_VECT(FLCTL_FLTREQ0I, 0x0dc0), INTC_VECT(FLCTL_FLTREQ1I, 0x0de0),
126 INTC_VECT(SDHI0_SDHI0I0, 0x0e00), INTC_VECT(SDHI0_SDHI0I1, 0x0e20), 128 INTC_VECT(SDHI0, 0x0e00), INTC_VECT(SDHI0, 0x0e20),
127 INTC_VECT(SDHI0_SDHI0I2, 0x0e40), INTC_VECT(SDHI0_SDHI0I3, 0x0e60), 129 INTC_VECT(SDHI0, 0x0e40), INTC_VECT(SDHI0, 0x0e60),
128 INTC_VECT(SDHI1_SDHI1I0, 0x0e80), INTC_VECT(SDHI1_SDHI1I1, 0x0ea0), 130 INTC_VECT(SDHI1, 0x0e80), INTC_VECT(SDHI1, 0x0ea0),
129 INTC_VECT(SDHI1_SDHI1I2, 0x0ec0), 131 INTC_VECT(SDHI1, 0x0ec0),
130 INTC_VECT(IRREM, 0x0f60), 132 INTC_VECT(IRREM, 0x0f60),
131 INTC_VECT(IRDA, 0x0480), 133 INTC_VECT(IRDA, 0x0480),
132 INTC_VECT(TPU0, 0x04a0), 134 INTC_VECT(TPU0, 0x04a0),
133 INTC_VECT(TTI20, 0x1100), 135 INTC_VECT(TTI20, 0x1100),
134 INTC_VECT(DDM, 0x1140), 136 INTC_VECT(DDM, 0x1140),
135 INTC_VECT(SDHI2_SDHI2I0, 0x1200), INTC_VECT(SDHI2_SDHI2I1, 0x1220), 137 INTC_VECT(SDHI2, 0x1200), INTC_VECT(SDHI2, 0x1220),
136 INTC_VECT(SDHI2_SDHI2I2, 0x1240), INTC_VECT(SDHI2_SDHI2I3, 0x1260), 138 INTC_VECT(SDHI2, 0x1240), INTC_VECT(SDHI2, 0x1260),
137 INTC_VECT(RWDT0, 0x1280), 139 INTC_VECT(RWDT0, 0x1280),
138 INTC_VECT(DMAC1_1_DEI0, 0x2000), INTC_VECT(DMAC1_1_DEI1, 0x2020), 140 INTC_VECT(DMAC1_1_DEI0, 0x2000), INTC_VECT(DMAC1_1_DEI1, 0x2020),
139 INTC_VECT(DMAC1_1_DEI2, 0x2040), INTC_VECT(DMAC1_1_DEI3, 0x2060), 141 INTC_VECT(DMAC1_1_DEI2, 0x2040), INTC_VECT(DMAC1_1_DEI3, 0x2060),
@@ -193,12 +195,6 @@ static struct intc_group intca_groups[] __initdata = {
193 INTC_GROUP(FLCTL, FLCTL_FLSTEI, FLCTL_FLTENDI, 195 INTC_GROUP(FLCTL, FLCTL_FLSTEI, FLCTL_FLTENDI,
194 FLCTL_FLTREQ0I, FLCTL_FLTREQ1I), 196 FLCTL_FLTREQ0I, FLCTL_FLTREQ1I),
195 INTC_GROUP(IIC1, IIC1_ALI1, IIC1_TACKI1, IIC1_WAITI1, IIC1_DTEI1), 197 INTC_GROUP(IIC1, IIC1_ALI1, IIC1_TACKI1, IIC1_WAITI1, IIC1_DTEI1),
196 INTC_GROUP(SDHI0, SDHI0_SDHI0I0, SDHI0_SDHI0I1,
197 SDHI0_SDHI0I2, SDHI0_SDHI0I3),
198 INTC_GROUP(SDHI1, SDHI1_SDHI1I0, SDHI1_SDHI1I1,
199 SDHI1_SDHI1I2),
200 INTC_GROUP(SDHI2, SDHI2_SDHI2I0, SDHI2_SDHI2I1,
201 SDHI2_SDHI2I2, SDHI2_SDHI2I3),
202 INTC_GROUP(SHWYSTAT, SHWYSTAT_RT, SHWYSTAT_HS, SHWYSTAT_COM), 198 INTC_GROUP(SHWYSTAT, SHWYSTAT_RT, SHWYSTAT_HS, SHWYSTAT_COM),
203}; 199};
204 200
@@ -234,10 +230,10 @@ static struct intc_mask_reg intca_mask_registers[] __initdata = {
234 { SCIFB, SCIFA5, SCIFA4, MSIOF1, 230 { SCIFB, SCIFA5, SCIFA4, MSIOF1,
235 0, 0, MSIOF2, 0 } }, 231 0, 0, MSIOF2, 0 } },
236 { 0xe694009c, 0xe69400dc, 8, /* IMR7A / IMCR7A */ 232 { 0xe694009c, 0xe69400dc, 8, /* IMR7A / IMCR7A */
237 { SDHI0_SDHI0I3, SDHI0_SDHI0I2, SDHI0_SDHI0I1, SDHI0_SDHI0I0, 233 { DISABLED, DISABLED, ENABLED, ENABLED,
238 FLCTL_FLTREQ1I, FLCTL_FLTREQ0I, FLCTL_FLTENDI, FLCTL_FLSTEI } }, 234 FLCTL_FLTREQ1I, FLCTL_FLTREQ0I, FLCTL_FLTENDI, FLCTL_FLSTEI } },
239 { 0xe69400a0, 0xe69400e0, 8, /* IMR8A / IMCR8A */ 235 { 0xe69400a0, 0xe69400e0, 8, /* IMR8A / IMCR8A */
240 { 0, SDHI1_SDHI1I2, SDHI1_SDHI1I1, SDHI1_SDHI1I0, 236 { 0, DISABLED, ENABLED, ENABLED,
241 TTI20, USBHSDMAC0_USHDMI, 0, 0 } }, 237 TTI20, USBHSDMAC0_USHDMI, 0, 0 } },
242 { 0xe69400a4, 0xe69400e4, 8, /* IMR9A / IMCR9A */ 238 { 0xe69400a4, 0xe69400e4, 8, /* IMR9A / IMCR9A */
243 { CMT1_CMT13, CMT1_CMT12, CMT1_CMT11, CMT1_CMT10, 239 { CMT1_CMT13, CMT1_CMT12, CMT1_CMT11, CMT1_CMT10,
@@ -252,7 +248,7 @@ static struct intc_mask_reg intca_mask_registers[] __initdata = {
252 { 0, 0, TPU0, 0, 248 { 0, 0, TPU0, 0,
253 0, 0, 0, 0 } }, 249 0, 0, 0, 0 } },
254 { 0xe69400b4, 0xe69400f4, 8, /* IMR13A / IMCR13A */ 250 { 0xe69400b4, 0xe69400f4, 8, /* IMR13A / IMCR13A */
255 { SDHI2_SDHI2I3, SDHI2_SDHI2I2, SDHI2_SDHI2I1, SDHI2_SDHI2I0, 251 { DISABLED, DISABLED, ENABLED, ENABLED,
256 0, CMT3, 0, RWDT0 } }, 252 0, CMT3, 0, RWDT0 } },
257 { 0xe6950080, 0xe69500c0, 8, /* IMR0A3 / IMCR0A3 */ 253 { 0xe6950080, 0xe69500c0, 8, /* IMR0A3 / IMCR0A3 */
258 { SHWYSTAT_RT, SHWYSTAT_HS, SHWYSTAT_COM, 0, 254 { SHWYSTAT_RT, SHWYSTAT_HS, SHWYSTAT_COM, 0,
@@ -358,10 +354,14 @@ static struct intc_mask_reg intca_ack_registers[] __initdata = {
358 { IRQ24A, IRQ25A, IRQ26A, IRQ27A, IRQ28A, IRQ29A, IRQ30A, IRQ31A } }, 354 { IRQ24A, IRQ25A, IRQ26A, IRQ27A, IRQ28A, IRQ29A, IRQ30A, IRQ31A } },
359}; 355};
360 356
361static DECLARE_INTC_DESC_ACK(intca_desc, "sh7372-intca", 357static struct intc_desc intca_desc __initdata = {
362 intca_vectors, intca_groups, 358 .name = "sh7372-intca",
363 intca_mask_registers, intca_prio_registers, 359 .force_enable = ENABLED,
364 intca_sense_registers, intca_ack_registers); 360 .force_disable = DISABLED,
361 .hw = INTC_HW_DESC(intca_vectors, intca_groups,
362 intca_mask_registers, intca_prio_registers,
363 intca_sense_registers, intca_ack_registers),
364};
365 365
366void __init sh7372_init_irq(void) 366void __init sh7372_init_irq(void)
367{ 367{
diff --git a/arch/arm/mach-shmobile/intc-sh7377.c b/arch/arm/mach-shmobile/intc-sh7377.c
index 125021cfba5c..5c781e2d1897 100644
--- a/arch/arm/mach-shmobile/intc-sh7377.c
+++ b/arch/arm/mach-shmobile/intc-sh7377.c
@@ -27,6 +27,8 @@
27 27
28enum { 28enum {
29 UNUSED_INTCA = 0, 29 UNUSED_INTCA = 0,
30 ENABLED,
31 DISABLED,
30 32
31 /* interrupt sources INTCA */ 33 /* interrupt sources INTCA */
32 IRQ0A, IRQ1A, IRQ2A, IRQ3A, IRQ4A, IRQ5A, IRQ6A, IRQ7A, 34 IRQ0A, IRQ1A, IRQ2A, IRQ3A, IRQ4A, IRQ5A, IRQ6A, IRQ7A,
@@ -49,8 +51,8 @@ enum {
49 MSIOF2, MSIOF1, 51 MSIOF2, MSIOF1,
50 SCIFA4, SCIFA5, SCIFB, 52 SCIFA4, SCIFA5, SCIFB,
51 FLCTL_FLSTEI, FLCTL_FLTENDI, FLCTL_FLTREQ0I, FLCTL_FLTREQ1I, 53 FLCTL_FLSTEI, FLCTL_FLTENDI, FLCTL_FLTREQ0I, FLCTL_FLTREQ1I,
52 SDHI0_SDHI0I0, SDHI0_SDHI0I1, SDHI0_SDHI0I2, SDHI0_SDHI0I3, 54 SDHI0,
53 SDHI1_SDHI1I0, SDHI1_SDHI1I1, SDHI1_SDHI1I2, SDHI1_SDHI1I3, 55 SDHI1,
54 MSU_MSU, MSU_MSU2, 56 MSU_MSU, MSU_MSU2,
55 IRREM, 57 IRREM,
56 MSUG, 58 MSUG,
@@ -84,7 +86,7 @@ enum {
84 86
85 /* interrupt groups INTCA */ 87 /* interrupt groups INTCA */
86 DMAC_1, DMAC_2, DMAC2_1, DMAC2_2, DMAC3_1, DMAC3_2, SHWYSTAT, 88 DMAC_1, DMAC_2, DMAC2_1, DMAC2_2, DMAC3_1, DMAC3_2, SHWYSTAT,
87 AP_ARM1, AP_ARM2, USBHS, SPU2, FLCTL, IIC1, SDHI0, SDHI1, 89 AP_ARM1, AP_ARM2, USBHS, SPU2, FLCTL, IIC1,
88 ICUSB, ICUDMC 90 ICUSB, ICUDMC
89}; 91};
90 92
@@ -128,10 +130,10 @@ static struct intc_vect intca_vectors[] = {
128 INTC_VECT(SCIFB, 0x0d60), 130 INTC_VECT(SCIFB, 0x0d60),
129 INTC_VECT(FLCTL_FLSTEI, 0x0d80), INTC_VECT(FLCTL_FLTENDI, 0x0da0), 131 INTC_VECT(FLCTL_FLSTEI, 0x0d80), INTC_VECT(FLCTL_FLTENDI, 0x0da0),
130 INTC_VECT(FLCTL_FLTREQ0I, 0x0dc0), INTC_VECT(FLCTL_FLTREQ1I, 0x0de0), 132 INTC_VECT(FLCTL_FLTREQ0I, 0x0dc0), INTC_VECT(FLCTL_FLTREQ1I, 0x0de0),
131 INTC_VECT(SDHI0_SDHI0I0, 0x0e00), INTC_VECT(SDHI0_SDHI0I1, 0x0e20), 133 INTC_VECT(SDHI0, 0x0e00), INTC_VECT(SDHI0, 0x0e20),
132 INTC_VECT(SDHI0_SDHI0I2, 0x0e40), INTC_VECT(SDHI0_SDHI0I3, 0x0e60), 134 INTC_VECT(SDHI0, 0x0e40), INTC_VECT(SDHI0, 0x0e60),
133 INTC_VECT(SDHI1_SDHI1I0, 0x0e80), INTC_VECT(SDHI1_SDHI1I1, 0x0ea0), 135 INTC_VECT(SDHI1, 0x0e80), INTC_VECT(SDHI1, 0x0ea0),
134 INTC_VECT(SDHI1_SDHI1I2, 0x0ec0), INTC_VECT(SDHI1_SDHI1I3, 0x0ee0), 136 INTC_VECT(SDHI1, 0x0ec0), INTC_VECT(SDHI1, 0x0ee0),
135 INTC_VECT(MSU_MSU, 0x0f20), INTC_VECT(MSU_MSU2, 0x0f40), 137 INTC_VECT(MSU_MSU, 0x0f20), INTC_VECT(MSU_MSU2, 0x0f40),
136 INTC_VECT(IRREM, 0x0f60), 138 INTC_VECT(IRREM, 0x0f60),
137 INTC_VECT(MSUG, 0x0fa0), 139 INTC_VECT(MSUG, 0x0fa0),
@@ -195,10 +197,6 @@ static struct intc_group intca_groups[] __initdata = {
195 INTC_GROUP(FLCTL, FLCTL_FLSTEI, FLCTL_FLTENDI, 197 INTC_GROUP(FLCTL, FLCTL_FLSTEI, FLCTL_FLTENDI,
196 FLCTL_FLTREQ0I, FLCTL_FLTREQ1I), 198 FLCTL_FLTREQ0I, FLCTL_FLTREQ1I),
197 INTC_GROUP(IIC1, IIC1_ALI1, IIC1_TACKI1, IIC1_WAITI1, IIC1_DTEI1), 199 INTC_GROUP(IIC1, IIC1_ALI1, IIC1_TACKI1, IIC1_WAITI1, IIC1_DTEI1),
198 INTC_GROUP(SDHI0, SDHI0_SDHI0I0, SDHI0_SDHI0I1,
199 SDHI0_SDHI0I2, SDHI0_SDHI0I3),
200 INTC_GROUP(SDHI1, SDHI1_SDHI1I0, SDHI1_SDHI1I1,
201 SDHI1_SDHI1I2, SDHI1_SDHI1I3),
202 INTC_GROUP(SHWYSTAT, SHWYSTAT_RT, SHWYSTAT_HS, SHWYSTAT_COM), 200 INTC_GROUP(SHWYSTAT, SHWYSTAT_RT, SHWYSTAT_HS, SHWYSTAT_COM),
203 INTC_GROUP(ICUSB, ICUSB_ICUSB0, ICUSB_ICUSB1), 201 INTC_GROUP(ICUSB, ICUSB_ICUSB0, ICUSB_ICUSB1),
204 INTC_GROUP(ICUDMC, ICUDMC_ICUDMC1, ICUDMC_ICUDMC2), 202 INTC_GROUP(ICUDMC, ICUDMC_ICUDMC1, ICUDMC_ICUDMC2),
@@ -236,10 +234,10 @@ static struct intc_mask_reg intca_mask_registers[] = {
236 { SCIFB, SCIFA5, SCIFA4, MSIOF1, 234 { SCIFB, SCIFA5, SCIFA4, MSIOF1,
237 0, 0, MSIOF2, 0 } }, 235 0, 0, MSIOF2, 0 } },
238 { 0xe694009c, 0xe69400dc, 8, /* IMR7A / IMCR7A */ 236 { 0xe694009c, 0xe69400dc, 8, /* IMR7A / IMCR7A */
239 { SDHI0_SDHI0I3, SDHI0_SDHI0I2, SDHI0_SDHI0I1, SDHI0_SDHI0I0, 237 { DISABLED, DISABLED, ENABLED, ENABLED,
240 FLCTL_FLTREQ1I, FLCTL_FLTREQ0I, FLCTL_FLTENDI, FLCTL_FLSTEI } }, 238 FLCTL_FLTREQ1I, FLCTL_FLTREQ0I, FLCTL_FLTENDI, FLCTL_FLSTEI } },
241 { 0xe69400a0, 0xe69400e0, 8, /* IMR8A / IMCR8A */ 239 { 0xe69400a0, 0xe69400e0, 8, /* IMR8A / IMCR8A */
242 { SDHI1_SDHI1I3, SDHI1_SDHI1I2, SDHI1_SDHI1I1, SDHI1_SDHI1I0, 240 { DISABLED, DISABLED, ENABLED, ENABLED,
243 TTI20, USBDMAC_USHDMI, 0, MSUG } }, 241 TTI20, USBDMAC_USHDMI, 0, MSUG } },
244 { 0xe69400a4, 0xe69400e4, 8, /* IMR9A / IMCR9A */ 242 { 0xe69400a4, 0xe69400e4, 8, /* IMR9A / IMCR9A */
245 { CMT1_CMT13, CMT1_CMT12, CMT1_CMT11, CMT1_CMT10, 243 { CMT1_CMT13, CMT1_CMT12, CMT1_CMT11, CMT1_CMT10,
@@ -339,10 +337,14 @@ static struct intc_mask_reg intca_ack_registers[] __initdata = {
339 { IRQ24A, IRQ25A, IRQ26A, IRQ27A, IRQ28A, IRQ29A, IRQ30A, IRQ31A } }, 337 { IRQ24A, IRQ25A, IRQ26A, IRQ27A, IRQ28A, IRQ29A, IRQ30A, IRQ31A } },
340}; 338};
341 339
342static DECLARE_INTC_DESC_ACK(intca_desc, "sh7377-intca", 340static struct intc_desc intca_desc __initdata = {
343 intca_vectors, intca_groups, 341 .name = "sh7377-intca",
344 intca_mask_registers, intca_prio_registers, 342 .force_enable = ENABLED,
345 intca_sense_registers, intca_ack_registers); 343 .force_disable = DISABLED,
344 .hw = INTC_HW_DESC(intca_vectors, intca_groups,
345 intca_mask_registers, intca_prio_registers,
346 intca_sense_registers, intca_ack_registers),
347};
346 348
347void __init sh7377_init_irq(void) 349void __init sh7377_init_irq(void)
348{ 350{
diff --git a/arch/arm/plat-omap/gpio.c b/arch/arm/plat-omap/gpio.c
index 337199ed3479..76a347b3ce07 100644
--- a/arch/arm/plat-omap/gpio.c
+++ b/arch/arm/plat-omap/gpio.c
@@ -2140,18 +2140,18 @@ void omap2_gpio_resume_after_retention(void)
2140 if (gen) { 2140 if (gen) {
2141 u32 old0, old1; 2141 u32 old0, old1;
2142 2142
2143 if (cpu_is_omap24xx() || cpu_is_omap44xx()) { 2143 if (cpu_is_omap24xx() || cpu_is_omap34xx()) {
2144 old0 = __raw_readl(bank->base + 2144 old0 = __raw_readl(bank->base +
2145 OMAP24XX_GPIO_LEVELDETECT0); 2145 OMAP24XX_GPIO_LEVELDETECT0);
2146 old1 = __raw_readl(bank->base + 2146 old1 = __raw_readl(bank->base +
2147 OMAP24XX_GPIO_LEVELDETECT1); 2147 OMAP24XX_GPIO_LEVELDETECT1);
2148 __raw_writel(old0 | gen, bank->base + 2148 __raw_writel(old0 | gen, bank->base +
2149 OMAP24XX_GPIO_LEVELDETECT0); 2149 OMAP24XX_GPIO_LEVELDETECT0);
2150 __raw_writel(old1 | gen, bank->base + 2150 __raw_writel(old1 | gen, bank->base +
2151 OMAP24XX_GPIO_LEVELDETECT1); 2151 OMAP24XX_GPIO_LEVELDETECT1);
2152 __raw_writel(old0, bank->base + 2152 __raw_writel(old0, bank->base +
2153 OMAP24XX_GPIO_LEVELDETECT0); 2153 OMAP24XX_GPIO_LEVELDETECT0);
2154 __raw_writel(old1, bank->base + 2154 __raw_writel(old1, bank->base +
2155 OMAP24XX_GPIO_LEVELDETECT1); 2155 OMAP24XX_GPIO_LEVELDETECT1);
2156 } 2156 }
2157 2157
diff --git a/arch/arm/plat-omap/include/plat/blizzard.h b/arch/arm/plat-omap/include/plat/blizzard.h
index 8d160f171372..56e7f2e7d12f 100644
--- a/arch/arm/plat-omap/include/plat/blizzard.h
+++ b/arch/arm/plat-omap/include/plat/blizzard.h
@@ -6,7 +6,7 @@ struct blizzard_platform_data {
6 void (*power_down)(struct device *dev); 6 void (*power_down)(struct device *dev);
7 unsigned long (*get_clock_rate)(struct device *dev); 7 unsigned long (*get_clock_rate)(struct device *dev);
8 8
9 unsigned te_connected : 1; 9 unsigned te_connected:1;
10}; 10};
11 11
12#endif 12#endif
diff --git a/arch/arm/plat-omap/include/plat/cpu.h b/arch/arm/plat-omap/include/plat/cpu.h
index ed8786c41df2..75141742300c 100644
--- a/arch/arm/plat-omap/include/plat/cpu.h
+++ b/arch/arm/plat-omap/include/plat/cpu.h
@@ -167,10 +167,14 @@ IS_OMAP_SUBCLASS(443x, 0x443)
167#if defined(MULTI_OMAP2) 167#if defined(MULTI_OMAP2)
168# if defined(CONFIG_ARCH_OMAP2) 168# if defined(CONFIG_ARCH_OMAP2)
169# undef cpu_is_omap24xx 169# undef cpu_is_omap24xx
170# undef cpu_is_omap242x
171# undef cpu_is_omap243x
172# define cpu_is_omap24xx() is_omap24xx() 170# define cpu_is_omap24xx() is_omap24xx()
171# endif
172# if defined (CONFIG_ARCH_OMAP2420)
173# undef cpu_is_omap242x
173# define cpu_is_omap242x() is_omap242x() 174# define cpu_is_omap242x() is_omap242x()
175# endif
176# if defined (CONFIG_ARCH_OMAP2430)
177# undef cpu_is_omap243x
174# define cpu_is_omap243x() is_omap243x() 178# define cpu_is_omap243x() is_omap243x()
175# endif 179# endif
176# if defined(CONFIG_ARCH_OMAP3) 180# if defined(CONFIG_ARCH_OMAP3)
diff --git a/arch/arm/plat-omap/include/plat/prcm.h b/arch/arm/plat-omap/include/plat/prcm.h
index d6a0e27d5a7f..9fbd91419cd1 100644
--- a/arch/arm/plat-omap/include/plat/prcm.h
+++ b/arch/arm/plat-omap/include/plat/prcm.h
@@ -24,7 +24,7 @@
24#define __ASM_ARM_ARCH_OMAP_PRCM_H 24#define __ASM_ARM_ARCH_OMAP_PRCM_H
25 25
26u32 omap_prcm_get_reset_sources(void); 26u32 omap_prcm_get_reset_sources(void);
27void omap_prcm_arch_reset(char mode); 27void omap_prcm_arch_reset(char mode, const char *cmd);
28int omap2_cm_wait_idlest(void __iomem *reg, u32 mask, u8 idlest, 28int omap2_cm_wait_idlest(void __iomem *reg, u32 mask, u8 idlest,
29 const char *name); 29 const char *name);
30 30
diff --git a/arch/arm/plat-omap/include/plat/system.h b/arch/arm/plat-omap/include/plat/system.h
index c58a4ef42a45..d0a119f735b4 100644
--- a/arch/arm/plat-omap/include/plat/system.h
+++ b/arch/arm/plat-omap/include/plat/system.h
@@ -22,7 +22,7 @@ static inline void arch_idle(void)
22 cpu_do_idle(); 22 cpu_do_idle();
23} 23}
24 24
25static inline void omap1_arch_reset(char mode) 25static inline void omap1_arch_reset(char mode, const char *cmd)
26{ 26{
27 /* 27 /*
28 * Workaround for 5912/1611b bug mentioned in sprz209d.pdf p. 28 28 * Workaround for 5912/1611b bug mentioned in sprz209d.pdf p. 28
@@ -43,9 +43,9 @@ static inline void omap1_arch_reset(char mode)
43static inline void arch_reset(char mode, const char *cmd) 43static inline void arch_reset(char mode, const char *cmd)
44{ 44{
45 if (!cpu_class_is_omap2()) 45 if (!cpu_class_is_omap2())
46 omap1_arch_reset(mode); 46 omap1_arch_reset(mode, cmd);
47 else 47 else
48 omap_prcm_arch_reset(mode); 48 omap_prcm_arch_reset(mode, cmd);
49} 49}
50 50
51#endif 51#endif
diff --git a/arch/arm/plat-omap/include/plat/usb.h b/arch/arm/plat-omap/include/plat/usb.h
index 288e29e1c06f..568578db93b6 100644
--- a/arch/arm/plat-omap/include/plat/usb.h
+++ b/arch/arm/plat-omap/include/plat/usb.h
@@ -53,7 +53,7 @@ enum musb_interface {MUSB_INTERFACE_ULPI, MUSB_INTERFACE_UTMI};
53 53
54extern void usb_musb_init(struct omap_musb_board_data *board_data); 54extern void usb_musb_init(struct omap_musb_board_data *board_data);
55 55
56extern void usb_ehci_init(struct ehci_hcd_omap_platform_data *pdata); 56extern void usb_ehci_init(const struct ehci_hcd_omap_platform_data *pdata);
57 57
58#endif 58#endif
59 59
diff --git a/arch/arm/plat-omap/mcbsp.c b/arch/arm/plat-omap/mcbsp.c
index e47686e0a633..52dfcc81511e 100644
--- a/arch/arm/plat-omap/mcbsp.c
+++ b/arch/arm/plat-omap/mcbsp.c
@@ -133,8 +133,7 @@ static irqreturn_t omap_mcbsp_tx_irq_handler(int irq, void *dev_id)
133 dev_err(mcbsp_tx->dev, "TX Frame Sync Error! : 0x%x\n", 133 dev_err(mcbsp_tx->dev, "TX Frame Sync Error! : 0x%x\n",
134 irqst_spcr2); 134 irqst_spcr2);
135 /* Writing zero to XSYNC_ERR clears the IRQ */ 135 /* Writing zero to XSYNC_ERR clears the IRQ */
136 MCBSP_WRITE(mcbsp_tx, SPCR2, 136 MCBSP_WRITE(mcbsp_tx, SPCR2, MCBSP_READ_CACHE(mcbsp_tx, SPCR2));
137 MCBSP_READ_CACHE(mcbsp_tx, SPCR2) & ~(XSYNC_ERR));
138 } else { 137 } else {
139 complete(&mcbsp_tx->tx_irq_completion); 138 complete(&mcbsp_tx->tx_irq_completion);
140 } 139 }
@@ -154,8 +153,7 @@ static irqreturn_t omap_mcbsp_rx_irq_handler(int irq, void *dev_id)
154 dev_err(mcbsp_rx->dev, "RX Frame Sync Error! : 0x%x\n", 153 dev_err(mcbsp_rx->dev, "RX Frame Sync Error! : 0x%x\n",
155 irqst_spcr1); 154 irqst_spcr1);
156 /* Writing zero to RSYNC_ERR clears the IRQ */ 155 /* Writing zero to RSYNC_ERR clears the IRQ */
157 MCBSP_WRITE(mcbsp_rx, SPCR1, 156 MCBSP_WRITE(mcbsp_rx, SPCR1, MCBSP_READ_CACHE(mcbsp_rx, SPCR1));
158 MCBSP_READ_CACHE(mcbsp_rx, SPCR1) & ~(RSYNC_ERR));
159 } else { 157 } else {
160 complete(&mcbsp_rx->tx_irq_completion); 158 complete(&mcbsp_rx->tx_irq_completion);
161 } 159 }
@@ -934,8 +932,7 @@ int omap_mcbsp_pollwrite(unsigned int id, u16 buf)
934 /* if frame sync error - clear the error */ 932 /* if frame sync error - clear the error */
935 if (MCBSP_READ(mcbsp, SPCR2) & XSYNC_ERR) { 933 if (MCBSP_READ(mcbsp, SPCR2) & XSYNC_ERR) {
936 /* clear error */ 934 /* clear error */
937 MCBSP_WRITE(mcbsp, SPCR2, 935 MCBSP_WRITE(mcbsp, SPCR2, MCBSP_READ_CACHE(mcbsp, SPCR2));
938 MCBSP_READ_CACHE(mcbsp, SPCR2) & (~XSYNC_ERR));
939 /* resend */ 936 /* resend */
940 return -1; 937 return -1;
941 } else { 938 } else {
@@ -975,8 +972,7 @@ int omap_mcbsp_pollread(unsigned int id, u16 *buf)
975 /* if frame sync error - clear the error */ 972 /* if frame sync error - clear the error */
976 if (MCBSP_READ(mcbsp, SPCR1) & RSYNC_ERR) { 973 if (MCBSP_READ(mcbsp, SPCR1) & RSYNC_ERR) {
977 /* clear error */ 974 /* clear error */
978 MCBSP_WRITE(mcbsp, SPCR1, 975 MCBSP_WRITE(mcbsp, SPCR1, MCBSP_READ_CACHE(mcbsp, SPCR1));
979 MCBSP_READ_CACHE(mcbsp, SPCR1) & (~RSYNC_ERR));
980 /* resend */ 976 /* resend */
981 return -1; 977 return -1;
982 } else { 978 } else {
diff --git a/arch/arm/plat-samsung/include/plat/uncompress.h b/arch/arm/plat-samsung/include/plat/uncompress.h
index e87ce8ffbbcd..7d6ed7263d57 100644
--- a/arch/arm/plat-samsung/include/plat/uncompress.h
+++ b/arch/arm/plat-samsung/include/plat/uncompress.h
@@ -140,8 +140,6 @@ static void arch_decomp_error(const char *x)
140#define arch_error arch_decomp_error 140#define arch_error arch_decomp_error
141#endif 141#endif
142 142
143static void error(char *err);
144
145#ifdef CONFIG_S3C_BOOT_UART_FORCE_FIFO 143#ifdef CONFIG_S3C_BOOT_UART_FORCE_FIFO
146static inline void arch_enable_uart_fifo(void) 144static inline void arch_enable_uart_fifo(void)
147{ 145{
diff --git a/arch/arm/plat-samsung/pwm.c b/arch/arm/plat-samsung/pwm.c
index ef019f27b67d..f2d11390d01c 100644
--- a/arch/arm/plat-samsung/pwm.c
+++ b/arch/arm/plat-samsung/pwm.c
@@ -379,6 +379,39 @@ static int __devexit s3c_pwm_remove(struct platform_device *pdev)
379 return 0; 379 return 0;
380} 380}
381 381
382#ifdef CONFIG_PM
383static int s3c_pwm_suspend(struct platform_device *pdev, pm_message_t state)
384{
385 struct pwm_device *pwm = platform_get_drvdata(pdev);
386
387 /* No one preserve these values during suspend so reset them
388 * Otherwise driver leaves PWM unconfigured if same values
389 * passed to pwm_config
390 */
391 pwm->period_ns = 0;
392 pwm->duty_ns = 0;
393
394 return 0;
395}
396
397static int s3c_pwm_resume(struct platform_device *pdev)
398{
399 struct pwm_device *pwm = platform_get_drvdata(pdev);
400 unsigned long tcon;
401
402 /* Restore invertion */
403 tcon = __raw_readl(S3C2410_TCON);
404 tcon |= pwm_tcon_invert(pwm);
405 __raw_writel(tcon, S3C2410_TCON);
406
407 return 0;
408}
409
410#else
411#define s3c_pwm_suspend NULL
412#define s3c_pwm_resume NULL
413#endif
414
382static struct platform_driver s3c_pwm_driver = { 415static struct platform_driver s3c_pwm_driver = {
383 .driver = { 416 .driver = {
384 .name = "s3c24xx-pwm", 417 .name = "s3c24xx-pwm",
@@ -386,6 +419,8 @@ static struct platform_driver s3c_pwm_driver = {
386 }, 419 },
387 .probe = s3c_pwm_probe, 420 .probe = s3c_pwm_probe,
388 .remove = __devexit_p(s3c_pwm_remove), 421 .remove = __devexit_p(s3c_pwm_remove),
422 .suspend = s3c_pwm_suspend,
423 .resume = s3c_pwm_resume,
389}; 424};
390 425
391static int __init pwm_init(void) 426static int __init pwm_init(void)
diff --git a/arch/blackfin/Kconfig b/arch/blackfin/Kconfig
index 53c1e1d45c68..c078849df7f9 100644
--- a/arch/blackfin/Kconfig
+++ b/arch/blackfin/Kconfig
@@ -23,12 +23,15 @@ config RWSEM_XCHGADD_ALGORITHM
23 23
24config BLACKFIN 24config BLACKFIN
25 def_bool y 25 def_bool y
26 select HAVE_ARCH_KGDB
27 select HAVE_ARCH_TRACEHOOK
26 select HAVE_FUNCTION_GRAPH_TRACER 28 select HAVE_FUNCTION_GRAPH_TRACER
27 select HAVE_FUNCTION_TRACER 29 select HAVE_FUNCTION_TRACER
30 select HAVE_FUNCTION_TRACE_MCOUNT_TEST
28 select HAVE_IDE 31 select HAVE_IDE
29 select HAVE_KERNEL_GZIP 32 select HAVE_KERNEL_GZIP if RAMKERNEL
30 select HAVE_KERNEL_BZIP2 33 select HAVE_KERNEL_BZIP2 if RAMKERNEL
31 select HAVE_KERNEL_LZMA 34 select HAVE_KERNEL_LZMA if RAMKERNEL
32 select HAVE_OPROFILE 35 select HAVE_OPROFILE
33 select ARCH_WANT_OPTIONAL_GPIOLIB 36 select ARCH_WANT_OPTIONAL_GPIOLIB
34 37
@@ -45,9 +48,6 @@ config ZONE_DMA
45config GENERIC_FIND_NEXT_BIT 48config GENERIC_FIND_NEXT_BIT
46 def_bool y 49 def_bool y
47 50
48config GENERIC_HWEIGHT
49 def_bool y
50
51config GENERIC_HARDIRQS 51config GENERIC_HARDIRQS
52 def_bool y 52 def_bool y
53 53
@@ -239,7 +239,7 @@ endchoice
239 239
240config SMP 240config SMP
241 depends on BF561 241 depends on BF561
242 select GENERIC_CLOCKEVENTS 242 select TICKSOURCE_CORETMR
243 bool "Symmetric multi-processing support" 243 bool "Symmetric multi-processing support"
244 ---help--- 244 ---help---
245 This enables support for systems with more than one CPU, 245 This enables support for systems with more than one CPU,
@@ -253,11 +253,20 @@ config NR_CPUS
253 depends on SMP 253 depends on SMP
254 default 2 if BF561 254 default 2 if BF561
255 255
256config HOTPLUG_CPU
257 bool "Support for hot-pluggable CPUs"
258 depends on SMP && HOTPLUG
259 default y
260
256config IRQ_PER_CPU 261config IRQ_PER_CPU
257 bool 262 bool
258 depends on SMP 263 depends on SMP
259 default y 264 default y
260 265
266config HAVE_LEGACY_PER_CPU_AREA
267 def_bool y
268 depends on SMP
269
261config BF_REV_MIN 270config BF_REV_MIN
262 int 271 int
263 default 0 if (BF51x || BF52x || (BF54x && !BF54xM)) 272 default 0 if (BF51x || BF52x || (BF54x && !BF54xM))
@@ -349,7 +358,7 @@ config MEM_MT48LC8M32B2B5_7
349 358
350config MEM_MT48LC32M16A2TG_75 359config MEM_MT48LC32M16A2TG_75
351 bool 360 bool
352 depends on (BFIN527_EZKIT || BFIN532_IP0X || BLACKSTAMP) 361 depends on (BFIN527_EZKIT || BFIN527_EZKIT_V2 || BFIN532_IP0X || BLACKSTAMP)
353 default y 362 default y
354 363
355config MEM_MT48LC32M8A2_75 364config MEM_MT48LC32M8A2_75
@@ -401,10 +410,18 @@ config BOOT_LOAD
401config ROM_BASE 410config ROM_BASE
402 hex "Kernel ROM Base" 411 hex "Kernel ROM Base"
403 depends on ROMKERNEL 412 depends on ROMKERNEL
404 default "0x20040000" 413 default "0x20040040"
405 range 0x20000000 0x20400000 if !(BF54x || BF561) 414 range 0x20000000 0x20400000 if !(BF54x || BF561)
406 range 0x20000000 0x30000000 if (BF54x || BF561) 415 range 0x20000000 0x30000000 if (BF54x || BF561)
407 help 416 help
417 Make sure your ROM base does not include any file-header
418 information that is prepended to the kernel.
419
420 For example, the bootable U-Boot format (created with
421 mkimage) has a 64 byte header (0x40). So while the image
422 you write to flash might start at say 0x20080000, you have
423 to add 0x40 to get the kernel's ROM base as it will come
424 after the header.
408 425
409comment "Clock/PLL Setup" 426comment "Clock/PLL Setup"
410 427
@@ -448,7 +465,7 @@ config VCO_MULT
448 range 1 64 465 range 1 64
449 default "22" if BFIN533_EZKIT 466 default "22" if BFIN533_EZKIT
450 default "45" if BFIN533_STAMP 467 default "45" if BFIN533_STAMP
451 default "20" if (BFIN537_STAMP || BFIN527_EZKIT || BFIN548_EZKIT || BFIN548_BLUETECHNIX_CM || BFIN538_EZKIT) 468 default "20" if (BFIN537_STAMP || BFIN527_EZKIT || BFIN527_EZKIT_V2 || BFIN548_EZKIT || BFIN548_BLUETECHNIX_CM || BFIN538_EZKIT)
452 default "22" if BFIN533_BLUETECHNIX_CM 469 default "22" if BFIN533_BLUETECHNIX_CM
453 default "20" if (BFIN537_BLUETECHNIX_CM_E || BFIN537_BLUETECHNIX_CM_U || BFIN527_BLUETECHNIX_CM || BFIN561_BLUETECHNIX_CM) 470 default "20" if (BFIN537_BLUETECHNIX_CM_E || BFIN537_BLUETECHNIX_CM_U || BFIN527_BLUETECHNIX_CM || BFIN561_BLUETECHNIX_CM)
454 default "20" if BFIN561_EZKIT 471 default "20" if BFIN561_EZKIT
@@ -609,23 +626,23 @@ config GENERIC_CLOCKEVENTS
609 bool "Generic clock events" 626 bool "Generic clock events"
610 default y 627 default y
611 628
612choice 629menu "Clock event device"
613 prompt "Kernel Tick Source"
614 depends on GENERIC_CLOCKEVENTS 630 depends on GENERIC_CLOCKEVENTS
615 default TICKSOURCE_CORETMR
616
617config TICKSOURCE_GPTMR0 631config TICKSOURCE_GPTMR0
618 bool "Gptimer0 (SCLK domain)" 632 bool "GPTimer0"
633 depends on !SMP
619 select BFIN_GPTIMERS 634 select BFIN_GPTIMERS
620 635
621config TICKSOURCE_CORETMR 636config TICKSOURCE_CORETMR
622 bool "Core timer (CCLK domain)" 637 bool "Core timer"
623 638 default y
624endchoice 639endmenu
625 640
626config CYCLES_CLOCKSOURCE 641menu "Clock souce"
627 bool "Use 'CYCLES' as a clocksource"
628 depends on GENERIC_CLOCKEVENTS 642 depends on GENERIC_CLOCKEVENTS
643config CYCLES_CLOCKSOURCE
644 bool "CYCLES"
645 default y
629 depends on !BFIN_SCRATCH_REG_CYCLES 646 depends on !BFIN_SCRATCH_REG_CYCLES
630 depends on !SMP 647 depends on !SMP
631 help 648 help
@@ -636,10 +653,10 @@ config CYCLES_CLOCKSOURCE
636 writing the registers will most likely crash the kernel. 653 writing the registers will most likely crash the kernel.
637 654
638config GPTMR0_CLOCKSOURCE 655config GPTMR0_CLOCKSOURCE
639 bool "Use GPTimer0 as a clocksource" 656 bool "GPTimer0"
640 select BFIN_GPTIMERS 657 select BFIN_GPTIMERS
641 depends on GENERIC_CLOCKEVENTS
642 depends on !TICKSOURCE_GPTMR0 658 depends on !TICKSOURCE_GPTMR0
659endmenu
643 660
644config ARCH_USES_GETTIMEOFFSET 661config ARCH_USES_GETTIMEOFFSET
645 depends on !GENERIC_CLOCKEVENTS 662 depends on !GENERIC_CLOCKEVENTS
@@ -1116,24 +1133,6 @@ config PCI
1116 1133
1117source "drivers/pci/Kconfig" 1134source "drivers/pci/Kconfig"
1118 1135
1119config HOTPLUG
1120 bool "Support for hot-pluggable device"
1121 help
1122 Say Y here if you want to plug devices into your computer while
1123 the system is running, and be able to use them quickly. In many
1124 cases, the devices can likewise be unplugged at any time too.
1125
1126 One well known example of this is PCMCIA- or PC-cards, credit-card
1127 size devices such as network cards, modems or hard drives which are
1128 plugged into slots found on all modern laptop computers. Another
1129 example, used on modern desktops as well as laptops, is USB.
1130
1131 Enable HOTPLUG and build a modular kernel. Get agent software
1132 (from <http://linux-hotplug.sourceforge.net/>) and install it.
1133 Then your kernel will automatically call out to a user mode "policy
1134 agent" (/sbin/hotplug) to load modules and set up software needed
1135 to use devices as you hotplug them.
1136
1137source "drivers/pcmcia/Kconfig" 1136source "drivers/pcmcia/Kconfig"
1138 1137
1139source "drivers/pci/hotplug/Kconfig" 1138source "drivers/pci/hotplug/Kconfig"
@@ -1147,7 +1146,6 @@ source "fs/Kconfig.binfmt"
1147endmenu 1146endmenu
1148 1147
1149menu "Power management options" 1148menu "Power management options"
1150 depends on !SMP
1151 1149
1152source "kernel/power/Kconfig" 1150source "kernel/power/Kconfig"
1153 1151
@@ -1240,7 +1238,6 @@ config PM_BFIN_WAKE_GP
1240endmenu 1238endmenu
1241 1239
1242menu "CPU Frequency scaling" 1240menu "CPU Frequency scaling"
1243 depends on !SMP
1244 1241
1245source "drivers/cpufreq/Kconfig" 1242source "drivers/cpufreq/Kconfig"
1246 1243
diff --git a/arch/blackfin/Kconfig.debug b/arch/blackfin/Kconfig.debug
index 87f195ee2e06..aec89a5280b2 100644
--- a/arch/blackfin/Kconfig.debug
+++ b/arch/blackfin/Kconfig.debug
@@ -18,9 +18,6 @@ config DEBUG_STACK_USAGE
18 18
19 This option will slow down process creation somewhat. 19 This option will slow down process creation somewhat.
20 20
21config HAVE_ARCH_KGDB
22 def_bool y
23
24config DEBUG_VERBOSE 21config DEBUG_VERBOSE
25 bool "Verbose fault messages" 22 bool "Verbose fault messages"
26 default y 23 default y
@@ -238,6 +235,15 @@ config EARLY_PRINTK
238 all of this lives in the init section and is thrown away after the 235 all of this lives in the init section and is thrown away after the
239 kernel boots completely. 236 kernel boots completely.
240 237
238config NMI_WATCHDOG
239 bool "Enable NMI watchdog to help debugging lockup on SMP"
240 default n
241 depends on (SMP && !BFIN_SCRATCH_REG_RETN)
242 help
243 If any CPU in the system does not execute the period local timer
244 interrupt for more than 5 seconds, then the NMI handler dumps debug
245 information. This information can be used to debug the lockup.
246
241config CPLB_INFO 247config CPLB_INFO
242 bool "Display the CPLB information" 248 bool "Display the CPLB information"
243 help 249 help
diff --git a/arch/blackfin/Makefile b/arch/blackfin/Makefile
index d4c7177e7656..5a97a31d4bbd 100644
--- a/arch/blackfin/Makefile
+++ b/arch/blackfin/Makefile
@@ -14,6 +14,9 @@ OBJCOPYFLAGS := -O binary -R .note -R .comment -S
14GZFLAGS := -9 14GZFLAGS := -9
15 15
16KBUILD_CFLAGS += $(call cc-option,-mno-fdpic) 16KBUILD_CFLAGS += $(call cc-option,-mno-fdpic)
17ifeq ($(CONFIG_ROMKERNEL),y)
18KBUILD_CFLAGS += -mlong-calls
19endif
17KBUILD_AFLAGS += $(call cc-option,-mno-fdpic) 20KBUILD_AFLAGS += $(call cc-option,-mno-fdpic)
18CFLAGS_MODULE += -mlong-calls 21CFLAGS_MODULE += -mlong-calls
19LDFLAGS_MODULE += -m elf32bfin 22LDFLAGS_MODULE += -m elf32bfin
@@ -130,7 +133,6 @@ KBUILD_CFLAGS += -Iarch/$(ARCH)/mach-$(MACHINE)/include
130KBUILD_CPPFLAGS += $(patsubst %,-I$(srctree)/%include,$(machdirs)) 133KBUILD_CPPFLAGS += $(patsubst %,-I$(srctree)/%include,$(machdirs))
131 134
132CLEAN_FILES += \ 135CLEAN_FILES += \
133 arch/$(ARCH)/include/asm/asm-offsets.h \
134 arch/$(ARCH)/kernel/asm-offsets.s \ 136 arch/$(ARCH)/kernel/asm-offsets.s \
135 137
136archclean: 138archclean:
@@ -138,7 +140,7 @@ archclean:
138 140
139INSTALL_PATH ?= /tftpboot 141INSTALL_PATH ?= /tftpboot
140boot := arch/$(ARCH)/boot 142boot := arch/$(ARCH)/boot
141BOOT_TARGETS = vmImage vmImage.bin vmImage.bz2 vmImage.gz vmImage.lzma 143BOOT_TARGETS = vmImage vmImage.bin vmImage.bz2 vmImage.gz vmImage.lzma vmImage.xip
142PHONY += $(BOOT_TARGETS) install 144PHONY += $(BOOT_TARGETS) install
143KBUILD_IMAGE := $(boot)/vmImage 145KBUILD_IMAGE := $(boot)/vmImage
144 146
@@ -156,6 +158,7 @@ define archhelp
156 echo ' vmImage.bz2 - Kernel-only image for U-Boot (arch/$(ARCH)/boot/vmImage.bz2)' 158 echo ' vmImage.bz2 - Kernel-only image for U-Boot (arch/$(ARCH)/boot/vmImage.bz2)'
157 echo '* vmImage.gz - Kernel-only image for U-Boot (arch/$(ARCH)/boot/vmImage.gz)' 159 echo '* vmImage.gz - Kernel-only image for U-Boot (arch/$(ARCH)/boot/vmImage.gz)'
158 echo ' vmImage.lzma - Kernel-only image for U-Boot (arch/$(ARCH)/boot/vmImage.lzma)' 160 echo ' vmImage.lzma - Kernel-only image for U-Boot (arch/$(ARCH)/boot/vmImage.lzma)'
161 echo ' vmImage.xip - XIP Kernel-only image for U-Boot (arch/$(ARCH)/boot/vmImage.xip)'
159 echo ' install - Install kernel using' 162 echo ' install - Install kernel using'
160 echo ' (your) ~/bin/$(INSTALLKERNEL) or' 163 echo ' (your) ~/bin/$(INSTALLKERNEL) or'
161 echo ' (distribution) PATH: $(INSTALLKERNEL) or' 164 echo ' (distribution) PATH: $(INSTALLKERNEL) or'
diff --git a/arch/blackfin/boot/Makefile b/arch/blackfin/boot/Makefile
index e9c48c6f8c1f..d1b3d6051fdf 100644
--- a/arch/blackfin/boot/Makefile
+++ b/arch/blackfin/boot/Makefile
@@ -8,14 +8,18 @@
8 8
9MKIMAGE := $(srctree)/scripts/mkuboot.sh 9MKIMAGE := $(srctree)/scripts/mkuboot.sh
10 10
11targets := vmImage vmImage.bin vmImage.bz2 vmImage.gz vmImage.lzma 11targets := vmImage vmImage.bin vmImage.bz2 vmImage.gz vmImage.lzma vmImage.xip
12extra-y += vmlinux.bin vmlinux.bin.gz vmlinux.bin.bz2 vmlinux.bin.lzma 12extra-y += vmlinux.bin vmlinux.bin.gz vmlinux.bin.bz2 vmlinux.bin.lzma vmlinux.bin.xip
13
14UIMAGE_OPTS-y :=
15UIMAGE_OPTS-$(CONFIG_RAMKERNEL) += -a $(CONFIG_BOOT_LOAD)
16UIMAGE_OPTS-$(CONFIG_ROMKERNEL) += -a $(CONFIG_ROM_BASE) -x
13 17
14quiet_cmd_uimage = UIMAGE $@ 18quiet_cmd_uimage = UIMAGE $@
15 cmd_uimage = $(CONFIG_SHELL) $(MKIMAGE) -A $(ARCH) -O linux -T kernel \ 19 cmd_uimage = $(CONFIG_SHELL) $(MKIMAGE) -A $(ARCH) -O linux -T kernel \
16 -C $(2) -n '$(MACHINE)-$(KERNELRELEASE)' -a $(CONFIG_BOOT_LOAD) \ 20 -C $(2) -n '$(MACHINE)-$(KERNELRELEASE)' \
17 -e $(shell $(NM) vmlinux | awk '$$NF == "__start" {print $$1}') \ 21 -e $(shell $(NM) vmlinux | awk '$$NF == "__start" {print $$1}') \
18 -d $< $@ 22 $(UIMAGE_OPTS-y) -d $< $@
19 23
20$(obj)/vmlinux.bin: vmlinux FORCE 24$(obj)/vmlinux.bin: vmlinux FORCE
21 $(call if_changed,objcopy) 25 $(call if_changed,objcopy)
@@ -29,6 +33,12 @@ $(obj)/vmlinux.bin.bz2: $(obj)/vmlinux.bin FORCE
29$(obj)/vmlinux.bin.lzma: $(obj)/vmlinux.bin FORCE 33$(obj)/vmlinux.bin.lzma: $(obj)/vmlinux.bin FORCE
30 $(call if_changed,lzma) 34 $(call if_changed,lzma)
31 35
36# The mkimage tool wants 64bytes prepended to the image
37quiet_cmd_mk_bin_xip = BIN $@
38 cmd_mk_bin_xip = ( printf '%64s' | tr ' ' '\377' ; cat $< ) > $@
39$(obj)/vmlinux.bin.xip: $(obj)/vmlinux.bin FORCE
40 $(call if_changed,mk_bin_xip)
41
32$(obj)/vmImage.bin: $(obj)/vmlinux.bin 42$(obj)/vmImage.bin: $(obj)/vmlinux.bin
33 $(call if_changed,uimage,none) 43 $(call if_changed,uimage,none)
34 44
@@ -41,10 +51,15 @@ $(obj)/vmImage.gz: $(obj)/vmlinux.bin.gz
41$(obj)/vmImage.lzma: $(obj)/vmlinux.bin.lzma 51$(obj)/vmImage.lzma: $(obj)/vmlinux.bin.lzma
42 $(call if_changed,uimage,lzma) 52 $(call if_changed,uimage,lzma)
43 53
54$(obj)/vmImage.xip: $(obj)/vmlinux.bin.xip
55 $(call if_changed,uimage,none)
56
44suffix-y := bin 57suffix-y := bin
45suffix-$(CONFIG_KERNEL_GZIP) := gz 58suffix-$(CONFIG_KERNEL_GZIP) := gz
46suffix-$(CONFIG_KERNEL_BZIP2) := bz2 59suffix-$(CONFIG_KERNEL_BZIP2) := bz2
47suffix-$(CONFIG_KERNEL_LZMA) := lzma 60suffix-$(CONFIG_KERNEL_LZMA) := lzma
61suffix-$(CONFIG_ROMKERNEL) := xip
62
48$(obj)/vmImage: $(obj)/vmImage.$(suffix-y) 63$(obj)/vmImage: $(obj)/vmImage.$(suffix-y)
49 @ln -sf $(notdir $<) $@ 64 @ln -sf $(notdir $<) $@
50 65
diff --git a/arch/blackfin/configs/BF518F-EZBRD_defconfig b/arch/blackfin/configs/BF518F-EZBRD_defconfig
index e31559419817..cf7c9bc94f13 100644
--- a/arch/blackfin/configs/BF518F-EZBRD_defconfig
+++ b/arch/blackfin/configs/BF518F-EZBRD_defconfig
@@ -1,22 +1,27 @@
1# 1#
2# Automatically generated make config: don't edit 2# Automatically generated make config: don't edit
3# Linux kernel version: 2.6.28.10 3# Linux kernel version: 2.6.32.2
4# Thu May 21 05:50:01 2009
5# 4#
6# CONFIG_MMU is not set 5# CONFIG_MMU is not set
7# CONFIG_FPU is not set 6# CONFIG_FPU is not set
8CONFIG_RWSEM_GENERIC_SPINLOCK=y 7CONFIG_RWSEM_GENERIC_SPINLOCK=y
9# CONFIG_RWSEM_XCHGADD_ALGORITHM is not set 8# CONFIG_RWSEM_XCHGADD_ALGORITHM is not set
10CONFIG_BLACKFIN=y 9CONFIG_BLACKFIN=y
10CONFIG_GENERIC_CSUM=y
11CONFIG_GENERIC_BUG=y
11CONFIG_ZONE_DMA=y 12CONFIG_ZONE_DMA=y
12CONFIG_GENERIC_FIND_NEXT_BIT=y 13CONFIG_GENERIC_FIND_NEXT_BIT=y
13CONFIG_GENERIC_HWEIGHT=y
14CONFIG_GENERIC_HARDIRQS=y 14CONFIG_GENERIC_HARDIRQS=y
15CONFIG_GENERIC_IRQ_PROBE=y 15CONFIG_GENERIC_IRQ_PROBE=y
16CONFIG_GENERIC_HARDIRQS_NO__DO_IRQ=y
16CONFIG_GENERIC_GPIO=y 17CONFIG_GENERIC_GPIO=y
17CONFIG_FORCE_MAX_ZONEORDER=14 18CONFIG_FORCE_MAX_ZONEORDER=14
18CONFIG_GENERIC_CALIBRATE_DELAY=y 19CONFIG_GENERIC_CALIBRATE_DELAY=y
20CONFIG_LOCKDEP_SUPPORT=y
21CONFIG_STACKTRACE_SUPPORT=y
22CONFIG_TRACE_IRQFLAGS_SUPPORT=y
19CONFIG_DEFCONFIG_LIST="/lib/modules/$UNAME_RELEASE/.config" 23CONFIG_DEFCONFIG_LIST="/lib/modules/$UNAME_RELEASE/.config"
24CONFIG_CONSTRUCTORS=y
20 25
21# 26#
22# General setup 27# General setup
@@ -26,22 +31,41 @@ CONFIG_BROKEN_ON_SMP=y
26CONFIG_INIT_ENV_ARG_LIMIT=32 31CONFIG_INIT_ENV_ARG_LIMIT=32
27CONFIG_LOCALVERSION="" 32CONFIG_LOCALVERSION=""
28CONFIG_LOCALVERSION_AUTO=y 33CONFIG_LOCALVERSION_AUTO=y
34CONFIG_HAVE_KERNEL_GZIP=y
35CONFIG_HAVE_KERNEL_BZIP2=y
36CONFIG_HAVE_KERNEL_LZMA=y
37CONFIG_KERNEL_GZIP=y
38# CONFIG_KERNEL_BZIP2 is not set
39# CONFIG_KERNEL_LZMA is not set
29CONFIG_SYSVIPC=y 40CONFIG_SYSVIPC=y
30CONFIG_SYSVIPC_SYSCTL=y 41CONFIG_SYSVIPC_SYSCTL=y
31# CONFIG_POSIX_MQUEUE is not set 42# CONFIG_POSIX_MQUEUE is not set
32# CONFIG_BSD_PROCESS_ACCT is not set 43# CONFIG_BSD_PROCESS_ACCT is not set
33# CONFIG_TASKSTATS is not set 44# CONFIG_TASKSTATS is not set
34# CONFIG_AUDIT is not set 45# CONFIG_AUDIT is not set
46
47#
48# RCU Subsystem
49#
50CONFIG_TREE_RCU=y
51# CONFIG_TREE_PREEMPT_RCU is not set
52# CONFIG_RCU_TRACE is not set
53CONFIG_RCU_FANOUT=32
54# CONFIG_RCU_FANOUT_EXACT is not set
55# CONFIG_TREE_RCU_TRACE is not set
35CONFIG_IKCONFIG=y 56CONFIG_IKCONFIG=y
36CONFIG_IKCONFIG_PROC=y 57CONFIG_IKCONFIG_PROC=y
37CONFIG_LOG_BUF_SHIFT=14 58CONFIG_LOG_BUF_SHIFT=14
38# CONFIG_CGROUPS is not set
39# CONFIG_GROUP_SCHED is not set 59# CONFIG_GROUP_SCHED is not set
60# CONFIG_CGROUPS is not set
40# CONFIG_SYSFS_DEPRECATED_V2 is not set 61# CONFIG_SYSFS_DEPRECATED_V2 is not set
41# CONFIG_RELAY is not set 62# CONFIG_RELAY is not set
42# CONFIG_NAMESPACES is not set 63# CONFIG_NAMESPACES is not set
43CONFIG_BLK_DEV_INITRD=y 64CONFIG_BLK_DEV_INITRD=y
44CONFIG_INITRAMFS_SOURCE="" 65CONFIG_INITRAMFS_SOURCE=""
66CONFIG_RD_GZIP=y
67# CONFIG_RD_BZIP2 is not set
68# CONFIG_RD_LZMA is not set
45# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set 69# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
46CONFIG_SYSCTL=y 70CONFIG_SYSCTL=y
47CONFIG_ANON_INODES=y 71CONFIG_ANON_INODES=y
@@ -62,6 +86,10 @@ CONFIG_EPOLL=y
62# CONFIG_TIMERFD is not set 86# CONFIG_TIMERFD is not set
63# CONFIG_EVENTFD is not set 87# CONFIG_EVENTFD is not set
64# CONFIG_AIO is not set 88# CONFIG_AIO is not set
89
90#
91# Kernel Performance Events And Counters
92#
65CONFIG_VM_EVENT_COUNTERS=y 93CONFIG_VM_EVENT_COUNTERS=y
66CONFIG_COMPAT_BRK=y 94CONFIG_COMPAT_BRK=y
67CONFIG_SLAB=y 95CONFIG_SLAB=y
@@ -69,11 +97,15 @@ CONFIG_SLAB=y
69# CONFIG_SLOB is not set 97# CONFIG_SLOB is not set
70CONFIG_MMAP_ALLOW_UNINITIALIZED=y 98CONFIG_MMAP_ALLOW_UNINITIALIZED=y
71# CONFIG_PROFILING is not set 99# CONFIG_PROFILING is not set
72# CONFIG_MARKERS is not set
73CONFIG_HAVE_OPROFILE=y 100CONFIG_HAVE_OPROFILE=y
101
102#
103# GCOV-based kernel profiling
104#
105# CONFIG_GCOV_KERNEL is not set
106# CONFIG_SLOW_WORK is not set
74# CONFIG_HAVE_GENERIC_DMA_COHERENT is not set 107# CONFIG_HAVE_GENERIC_DMA_COHERENT is not set
75CONFIG_SLABINFO=y 108CONFIG_SLABINFO=y
76CONFIG_TINY_SHMEM=y
77CONFIG_BASE_SMALL=0 109CONFIG_BASE_SMALL=0
78CONFIG_MODULES=y 110CONFIG_MODULES=y
79# CONFIG_MODULE_FORCE_LOAD is not set 111# CONFIG_MODULE_FORCE_LOAD is not set
@@ -81,11 +113,8 @@ CONFIG_MODULE_UNLOAD=y
81# CONFIG_MODULE_FORCE_UNLOAD is not set 113# CONFIG_MODULE_FORCE_UNLOAD is not set
82# CONFIG_MODVERSIONS is not set 114# CONFIG_MODVERSIONS is not set
83# CONFIG_MODULE_SRCVERSION_ALL is not set 115# CONFIG_MODULE_SRCVERSION_ALL is not set
84CONFIG_KMOD=y
85CONFIG_BLOCK=y 116CONFIG_BLOCK=y
86# CONFIG_LBD is not set 117# CONFIG_LBDAF is not set
87# CONFIG_BLK_DEV_IO_TRACE is not set
88# CONFIG_LSF is not set
89# CONFIG_BLK_DEV_BSG is not set 118# CONFIG_BLK_DEV_BSG is not set
90# CONFIG_BLK_DEV_INTEGRITY is not set 119# CONFIG_BLK_DEV_INTEGRITY is not set
91 120
@@ -101,7 +130,6 @@ CONFIG_IOSCHED_NOOP=y
101# CONFIG_DEFAULT_CFQ is not set 130# CONFIG_DEFAULT_CFQ is not set
102CONFIG_DEFAULT_NOOP=y 131CONFIG_DEFAULT_NOOP=y
103CONFIG_DEFAULT_IOSCHED="noop" 132CONFIG_DEFAULT_IOSCHED="noop"
104CONFIG_CLASSIC_RCU=y
105# CONFIG_PREEMPT_NONE is not set 133# CONFIG_PREEMPT_NONE is not set
106CONFIG_PREEMPT_VOLUNTARY=y 134CONFIG_PREEMPT_VOLUNTARY=y
107# CONFIG_PREEMPT is not set 135# CONFIG_PREEMPT is not set
@@ -132,15 +160,15 @@ CONFIG_BF518=y
132# CONFIG_BF537 is not set 160# CONFIG_BF537 is not set
133# CONFIG_BF538 is not set 161# CONFIG_BF538 is not set
134# CONFIG_BF539 is not set 162# CONFIG_BF539 is not set
135# CONFIG_BF542 is not set 163# CONFIG_BF542_std is not set
136# CONFIG_BF542M is not set 164# CONFIG_BF542M is not set
137# CONFIG_BF544 is not set 165# CONFIG_BF544_std is not set
138# CONFIG_BF544M is not set 166# CONFIG_BF544M is not set
139# CONFIG_BF547 is not set 167# CONFIG_BF547_std is not set
140# CONFIG_BF547M is not set 168# CONFIG_BF547M is not set
141# CONFIG_BF548 is not set 169# CONFIG_BF548_std is not set
142# CONFIG_BF548M is not set 170# CONFIG_BF548M is not set
143# CONFIG_BF549 is not set 171# CONFIG_BF549_std is not set
144# CONFIG_BF549M is not set 172# CONFIG_BF549M is not set
145# CONFIG_BF561 is not set 173# CONFIG_BF561 is not set
146CONFIG_BF_REV_MIN=0 174CONFIG_BF_REV_MIN=0
@@ -154,8 +182,8 @@ CONFIG_BF_REV_0_0=y
154# CONFIG_BF_REV_0_6 is not set 182# CONFIG_BF_REV_0_6 is not set
155# CONFIG_BF_REV_ANY is not set 183# CONFIG_BF_REV_ANY is not set
156# CONFIG_BF_REV_NONE is not set 184# CONFIG_BF_REV_NONE is not set
157CONFIG_BF51x=y
158CONFIG_MEM_MT48LC32M8A2_75=y 185CONFIG_MEM_MT48LC32M8A2_75=y
186CONFIG_BF51x=y
159CONFIG_BFIN518F_EZBRD=y 187CONFIG_BFIN518F_EZBRD=y
160 188
161# 189#
@@ -313,7 +341,6 @@ CONFIG_FLATMEM=y
313CONFIG_FLAT_NODE_MEM_MAP=y 341CONFIG_FLAT_NODE_MEM_MAP=y
314CONFIG_PAGEFLAGS_EXTENDED=y 342CONFIG_PAGEFLAGS_EXTENDED=y
315CONFIG_SPLIT_PTLOCK_CPUS=4 343CONFIG_SPLIT_PTLOCK_CPUS=4
316# CONFIG_RESOURCES_64BIT is not set
317# CONFIG_PHYS_ADDR_T_64BIT is not set 344# CONFIG_PHYS_ADDR_T_64BIT is not set
318CONFIG_ZONE_DMA_FLAG=1 345CONFIG_ZONE_DMA_FLAG=1
319CONFIG_VIRT_TO_BUS=y 346CONFIG_VIRT_TO_BUS=y
@@ -322,16 +349,18 @@ CONFIG_BFIN_GPTIMERS=m
322# CONFIG_DMA_UNCACHED_4M is not set 349# CONFIG_DMA_UNCACHED_4M is not set
323# CONFIG_DMA_UNCACHED_2M is not set 350# CONFIG_DMA_UNCACHED_2M is not set
324CONFIG_DMA_UNCACHED_1M=y 351CONFIG_DMA_UNCACHED_1M=y
352# CONFIG_DMA_UNCACHED_512K is not set
353# CONFIG_DMA_UNCACHED_256K is not set
354# CONFIG_DMA_UNCACHED_128K is not set
325# CONFIG_DMA_UNCACHED_NONE is not set 355# CONFIG_DMA_UNCACHED_NONE is not set
326 356
327# 357#
328# Cache Support 358# Cache Support
329# 359#
330CONFIG_BFIN_ICACHE=y 360CONFIG_BFIN_ICACHE=y
331# CONFIG_BFIN_ICACHE_LOCK is not set 361CONFIG_BFIN_EXTMEM_ICACHEABLE=y
332CONFIG_BFIN_DCACHE=y 362CONFIG_BFIN_DCACHE=y
333# CONFIG_BFIN_DCACHE_BANKA is not set 363# CONFIG_BFIN_DCACHE_BANKA is not set
334CONFIG_BFIN_EXTMEM_ICACHEABLE=y
335CONFIG_BFIN_EXTMEM_DCACHEABLE=y 364CONFIG_BFIN_EXTMEM_DCACHEABLE=y
336CONFIG_BFIN_EXTMEM_WRITEBACK=y 365CONFIG_BFIN_EXTMEM_WRITEBACK=y
337# CONFIG_BFIN_EXTMEM_WRITETHROUGH is not set 366# CONFIG_BFIN_EXTMEM_WRITETHROUGH is not set
@@ -342,7 +371,7 @@ CONFIG_BFIN_EXTMEM_WRITEBACK=y
342# CONFIG_MPU is not set 371# CONFIG_MPU is not set
343 372
344# 373#
345# Asynchonous Memory Configuration 374# Asynchronous Memory Configuration
346# 375#
347 376
348# 377#
@@ -398,11 +427,6 @@ CONFIG_NET=y
398CONFIG_PACKET=y 427CONFIG_PACKET=y
399# CONFIG_PACKET_MMAP is not set 428# CONFIG_PACKET_MMAP is not set
400CONFIG_UNIX=y 429CONFIG_UNIX=y
401CONFIG_XFRM=y
402# CONFIG_XFRM_USER is not set
403# CONFIG_XFRM_SUB_POLICY is not set
404# CONFIG_XFRM_MIGRATE is not set
405# CONFIG_XFRM_STATISTICS is not set
406# CONFIG_NET_KEY is not set 430# CONFIG_NET_KEY is not set
407CONFIG_INET=y 431CONFIG_INET=y
408# CONFIG_IP_MULTICAST is not set 432# CONFIG_IP_MULTICAST is not set
@@ -426,7 +450,6 @@ CONFIG_IP_PNP=y
426# CONFIG_INET_XFRM_MODE_BEET is not set 450# CONFIG_INET_XFRM_MODE_BEET is not set
427# CONFIG_INET_LRO is not set 451# CONFIG_INET_LRO is not set
428# CONFIG_INET_DIAG is not set 452# CONFIG_INET_DIAG is not set
429CONFIG_INET_TCP_DIAG=y
430# CONFIG_TCP_CONG_ADVANCED is not set 453# CONFIG_TCP_CONG_ADVANCED is not set
431CONFIG_TCP_CONG_CUBIC=y 454CONFIG_TCP_CONG_CUBIC=y
432CONFIG_DEFAULT_TCP_CONG="cubic" 455CONFIG_DEFAULT_TCP_CONG="cubic"
@@ -437,6 +460,7 @@ CONFIG_DEFAULT_TCP_CONG="cubic"
437# CONFIG_NETFILTER is not set 460# CONFIG_NETFILTER is not set
438# CONFIG_IP_DCCP is not set 461# CONFIG_IP_DCCP is not set
439# CONFIG_IP_SCTP is not set 462# CONFIG_IP_SCTP is not set
463# CONFIG_RDS is not set
440# CONFIG_TIPC is not set 464# CONFIG_TIPC is not set
441# CONFIG_ATM is not set 465# CONFIG_ATM is not set
442# CONFIG_BRIDGE is not set 466# CONFIG_BRIDGE is not set
@@ -450,7 +474,10 @@ CONFIG_DEFAULT_TCP_CONG="cubic"
450# CONFIG_LAPB is not set 474# CONFIG_LAPB is not set
451# CONFIG_ECONET is not set 475# CONFIG_ECONET is not set
452# CONFIG_WAN_ROUTER is not set 476# CONFIG_WAN_ROUTER is not set
477# CONFIG_PHONET is not set
478# CONFIG_IEEE802154 is not set
453# CONFIG_NET_SCHED is not set 479# CONFIG_NET_SCHED is not set
480# CONFIG_DCB is not set
454 481
455# 482#
456# Network testing 483# Network testing
@@ -461,13 +488,8 @@ CONFIG_DEFAULT_TCP_CONG="cubic"
461# CONFIG_IRDA is not set 488# CONFIG_IRDA is not set
462# CONFIG_BT is not set 489# CONFIG_BT is not set
463# CONFIG_AF_RXRPC is not set 490# CONFIG_AF_RXRPC is not set
464# CONFIG_PHONET is not set 491# CONFIG_WIRELESS is not set
465CONFIG_WIRELESS=y 492# CONFIG_WIMAX is not set
466# CONFIG_CFG80211 is not set
467CONFIG_WIRELESS_OLD_REGULATORY=y
468# CONFIG_WIRELESS_EXT is not set
469# CONFIG_MAC80211 is not set
470# CONFIG_IEEE80211 is not set
471# CONFIG_RFKILL is not set 493# CONFIG_RFKILL is not set
472# CONFIG_NET_9P is not set 494# CONFIG_NET_9P is not set
473 495
@@ -488,6 +510,7 @@ CONFIG_PREVENT_FIRMWARE_BUILD=y
488# CONFIG_CONNECTOR is not set 510# CONFIG_CONNECTOR is not set
489CONFIG_MTD=y 511CONFIG_MTD=y
490# CONFIG_MTD_DEBUG is not set 512# CONFIG_MTD_DEBUG is not set
513# CONFIG_MTD_TESTS is not set
491# CONFIG_MTD_CONCAT is not set 514# CONFIG_MTD_CONCAT is not set
492CONFIG_MTD_PARTITIONS=y 515CONFIG_MTD_PARTITIONS=y
493# CONFIG_MTD_REDBOOT_PARTS is not set 516# CONFIG_MTD_REDBOOT_PARTS is not set
@@ -545,6 +568,7 @@ CONFIG_MTD_COMPLEX_MAPPINGS=y
545# 568#
546# CONFIG_MTD_DATAFLASH is not set 569# CONFIG_MTD_DATAFLASH is not set
547# CONFIG_MTD_M25P80 is not set 570# CONFIG_MTD_M25P80 is not set
571# CONFIG_MTD_SST25L is not set
548# CONFIG_MTD_SLRAM is not set 572# CONFIG_MTD_SLRAM is not set
549# CONFIG_MTD_PHRAM is not set 573# CONFIG_MTD_PHRAM is not set
550# CONFIG_MTD_MTDRAM is not set 574# CONFIG_MTD_MTDRAM is not set
@@ -560,6 +584,11 @@ CONFIG_MTD_COMPLEX_MAPPINGS=y
560# CONFIG_MTD_ONENAND is not set 584# CONFIG_MTD_ONENAND is not set
561 585
562# 586#
587# LPDDR flash memory drivers
588#
589# CONFIG_MTD_LPDDR is not set
590
591#
563# UBI - Unsorted block images 592# UBI - Unsorted block images
564# 593#
565# CONFIG_MTD_UBI is not set 594# CONFIG_MTD_UBI is not set
@@ -576,10 +605,20 @@ CONFIG_BLK_DEV_RAM_SIZE=4096
576# CONFIG_ATA_OVER_ETH is not set 605# CONFIG_ATA_OVER_ETH is not set
577# CONFIG_BLK_DEV_HD is not set 606# CONFIG_BLK_DEV_HD is not set
578CONFIG_MISC_DEVICES=y 607CONFIG_MISC_DEVICES=y
579# CONFIG_EEPROM_93CX6 is not set 608# CONFIG_AD525X_DPOT is not set
580# CONFIG_ICS932S401 is not set 609# CONFIG_ICS932S401 is not set
581# CONFIG_ENCLOSURE_SERVICES is not set 610# CONFIG_ENCLOSURE_SERVICES is not set
611# CONFIG_ISL29003 is not set
582# CONFIG_C2PORT is not set 612# CONFIG_C2PORT is not set
613
614#
615# EEPROM support
616#
617# CONFIG_EEPROM_AT24 is not set
618# CONFIG_EEPROM_AT25 is not set
619# CONFIG_EEPROM_LEGACY is not set
620# CONFIG_EEPROM_MAX6875 is not set
621# CONFIG_EEPROM_93CX6 is not set
583CONFIG_HAVE_IDE=y 622CONFIG_HAVE_IDE=y
584# CONFIG_IDE is not set 623# CONFIG_IDE is not set
585 624
@@ -614,6 +653,9 @@ CONFIG_PHYLIB=y
614# CONFIG_BROADCOM_PHY is not set 653# CONFIG_BROADCOM_PHY is not set
615# CONFIG_ICPLUS_PHY is not set 654# CONFIG_ICPLUS_PHY is not set
616# CONFIG_REALTEK_PHY is not set 655# CONFIG_REALTEK_PHY is not set
656# CONFIG_NATIONAL_PHY is not set
657# CONFIG_STE10XP is not set
658# CONFIG_LSI_ET1011C_PHY is not set
617# CONFIG_FIXED_PHY is not set 659# CONFIG_FIXED_PHY is not set
618# CONFIG_MDIO_BITBANG is not set 660# CONFIG_MDIO_BITBANG is not set
619CONFIG_NET_ETHERNET=y 661CONFIG_NET_ETHERNET=y
@@ -622,10 +664,14 @@ CONFIG_BFIN_MAC=y
622CONFIG_BFIN_TX_DESC_NUM=10 664CONFIG_BFIN_TX_DESC_NUM=10
623CONFIG_BFIN_RX_DESC_NUM=20 665CONFIG_BFIN_RX_DESC_NUM=20
624# CONFIG_BFIN_MAC_RMII is not set 666# CONFIG_BFIN_MAC_RMII is not set
667CONFIG_BFIN_MAC_USE_HWSTAMP=y
625# CONFIG_SMC91X is not set 668# CONFIG_SMC91X is not set
626# CONFIG_SMSC911X is not set
627# CONFIG_DM9000 is not set 669# CONFIG_DM9000 is not set
628# CONFIG_ENC28J60 is not set 670# CONFIG_ENC28J60 is not set
671# CONFIG_ETHOC is not set
672# CONFIG_SMSC911X is not set
673# CONFIG_DNET is not set
674# CONFIG_ADF702X is not set
629# CONFIG_IBM_NEW_EMAC_ZMII is not set 675# CONFIG_IBM_NEW_EMAC_ZMII is not set
630# CONFIG_IBM_NEW_EMAC_RGMII is not set 676# CONFIG_IBM_NEW_EMAC_RGMII is not set
631# CONFIG_IBM_NEW_EMAC_TAH is not set 677# CONFIG_IBM_NEW_EMAC_TAH is not set
@@ -634,15 +680,16 @@ CONFIG_BFIN_RX_DESC_NUM=20
634# CONFIG_IBM_NEW_EMAC_MAL_CLR_ICINTSTAT is not set 680# CONFIG_IBM_NEW_EMAC_MAL_CLR_ICINTSTAT is not set
635# CONFIG_IBM_NEW_EMAC_MAL_COMMON_ERR is not set 681# CONFIG_IBM_NEW_EMAC_MAL_COMMON_ERR is not set
636# CONFIG_B44 is not set 682# CONFIG_B44 is not set
683# CONFIG_KS8842 is not set
684# CONFIG_KS8851 is not set
685# CONFIG_KS8851_MLL is not set
637# CONFIG_NETDEV_1000 is not set 686# CONFIG_NETDEV_1000 is not set
638# CONFIG_NETDEV_10000 is not set 687# CONFIG_NETDEV_10000 is not set
688# CONFIG_WLAN is not set
639 689
640# 690#
641# Wireless LAN 691# Enable WiMAX (Networking options) to see the WiMAX drivers
642# 692#
643# CONFIG_WLAN_PRE80211 is not set
644# CONFIG_WLAN_80211 is not set
645# CONFIG_IWLWIFI_LEDS is not set
646# CONFIG_WAN is not set 693# CONFIG_WAN is not set
647# CONFIG_PPP is not set 694# CONFIG_PPP is not set
648# CONFIG_SLIP is not set 695# CONFIG_SLIP is not set
@@ -677,7 +724,10 @@ CONFIG_INPUT=y
677# CONFIG_INPUT_TOUCHSCREEN is not set 724# CONFIG_INPUT_TOUCHSCREEN is not set
678CONFIG_INPUT_MISC=y 725CONFIG_INPUT_MISC=y
679# CONFIG_INPUT_UINPUT is not set 726# CONFIG_INPUT_UINPUT is not set
680# CONFIG_CONFIG_INPUT_PCF8574 is not set 727# CONFIG_INPUT_GPIO_ROTARY_ENCODER is not set
728# CONFIG_INPUT_AD714X is not set
729# CONFIG_INPUT_ADXL34X is not set
730# CONFIG_INPUT_PCF8574 is not set
681 731
682# 732#
683# Hardware I/O ports 733# Hardware I/O ports
@@ -688,16 +738,13 @@ CONFIG_INPUT_MISC=y
688# 738#
689# Character devices 739# Character devices
690# 740#
691# CONFIG_AD9960 is not set
692CONFIG_BFIN_DMA_INTERFACE=m 741CONFIG_BFIN_DMA_INTERFACE=m
693# CONFIG_BFIN_PPI is not set 742# CONFIG_BFIN_PPI is not set
694# CONFIG_BFIN_PPIFCD is not set 743# CONFIG_BFIN_PPIFCD is not set
695# CONFIG_BFIN_SIMPLE_TIMER is not set 744# CONFIG_BFIN_SIMPLE_TIMER is not set
696# CONFIG_BFIN_SPI_ADC is not set 745# CONFIG_BFIN_SPI_ADC is not set
697# CONFIG_BFIN_SPORT is not set 746# CONFIG_BFIN_SPORT is not set
698# CONFIG_BFIN_TIMER_LATENCY is not set
699# CONFIG_BFIN_TWI_LCD is not set 747# CONFIG_BFIN_TWI_LCD is not set
700CONFIG_SIMPLE_GPIO=m
701CONFIG_VT=y 748CONFIG_VT=y
702CONFIG_CONSOLE_TRANSLATIONS=y 749CONFIG_CONSOLE_TRANSLATIONS=y
703CONFIG_VT_CONSOLE=y 750CONFIG_VT_CONSOLE=y
@@ -715,6 +762,7 @@ CONFIG_BFIN_JTAG_COMM=m
715# 762#
716# Non-8250 serial port support 763# Non-8250 serial port support
717# 764#
765# CONFIG_SERIAL_MAX3100 is not set
718CONFIG_SERIAL_BFIN=y 766CONFIG_SERIAL_BFIN=y
719CONFIG_SERIAL_BFIN_CONSOLE=y 767CONFIG_SERIAL_BFIN_CONSOLE=y
720CONFIG_SERIAL_BFIN_DMA=y 768CONFIG_SERIAL_BFIN_DMA=y
@@ -726,12 +774,10 @@ CONFIG_SERIAL_CORE=y
726CONFIG_SERIAL_CORE_CONSOLE=y 774CONFIG_SERIAL_CORE_CONSOLE=y
727# CONFIG_SERIAL_BFIN_SPORT is not set 775# CONFIG_SERIAL_BFIN_SPORT is not set
728CONFIG_UNIX98_PTYS=y 776CONFIG_UNIX98_PTYS=y
777# CONFIG_DEVPTS_MULTIPLE_INSTANCES is not set
729# CONFIG_LEGACY_PTYS is not set 778# CONFIG_LEGACY_PTYS is not set
730 779CONFIG_BFIN_OTP=y
731# 780# CONFIG_BFIN_OTP_WRITE_ENABLE is not set
732# CAN, the car bus and industrial fieldbus
733#
734# CONFIG_CAN4LINUX is not set
735# CONFIG_IPMI_HANDLER is not set 781# CONFIG_IPMI_HANDLER is not set
736# CONFIG_HW_RANDOM is not set 782# CONFIG_HW_RANDOM is not set
737# CONFIG_R3964 is not set 783# CONFIG_R3964 is not set
@@ -739,6 +785,7 @@ CONFIG_UNIX98_PTYS=y
739# CONFIG_TCG_TPM is not set 785# CONFIG_TCG_TPM is not set
740CONFIG_I2C=y 786CONFIG_I2C=y
741CONFIG_I2C_BOARDINFO=y 787CONFIG_I2C_BOARDINFO=y
788CONFIG_I2C_COMPAT=y
742CONFIG_I2C_CHARDEV=y 789CONFIG_I2C_CHARDEV=y
743CONFIG_I2C_HELPER_AUTO=y 790CONFIG_I2C_HELPER_AUTO=y
744 791
@@ -771,14 +818,6 @@ CONFIG_I2C_BLACKFIN_TWI_CLK_KHZ=100
771# Miscellaneous I2C Chip support 818# Miscellaneous I2C Chip support
772# 819#
773# CONFIG_DS1682 is not set 820# CONFIG_DS1682 is not set
774# CONFIG_EEPROM_AT24 is not set
775# CONFIG_SENSORS_AD5252 is not set
776# CONFIG_EEPROM_LEGACY is not set
777# CONFIG_SENSORS_PCF8574 is not set
778# CONFIG_PCF8575 is not set
779# CONFIG_SENSORS_PCA9539 is not set
780# CONFIG_SENSORS_PCF8591 is not set
781# CONFIG_SENSORS_MAX6875 is not set
782# CONFIG_SENSORS_TSL2550 is not set 821# CONFIG_SENSORS_TSL2550 is not set
783# CONFIG_I2C_DEBUG_CORE is not set 822# CONFIG_I2C_DEBUG_CORE is not set
784# CONFIG_I2C_DEBUG_ALGO is not set 823# CONFIG_I2C_DEBUG_ALGO is not set
@@ -795,13 +834,18 @@ CONFIG_SPI_BFIN=y
795# CONFIG_SPI_BFIN_LOCK is not set 834# CONFIG_SPI_BFIN_LOCK is not set
796# CONFIG_SPI_BFIN_SPORT is not set 835# CONFIG_SPI_BFIN_SPORT is not set
797# CONFIG_SPI_BITBANG is not set 836# CONFIG_SPI_BITBANG is not set
837# CONFIG_SPI_GPIO is not set
798 838
799# 839#
800# SPI Protocol Masters 840# SPI Protocol Masters
801# 841#
802# CONFIG_SPI_AT25 is not set
803# CONFIG_SPI_SPIDEV is not set 842# CONFIG_SPI_SPIDEV is not set
804# CONFIG_SPI_TLE62X0 is not set 843# CONFIG_SPI_TLE62X0 is not set
844
845#
846# PPS support
847#
848# CONFIG_PPS is not set
805CONFIG_ARCH_WANT_OPTIONAL_GPIOLIB=y 849CONFIG_ARCH_WANT_OPTIONAL_GPIOLIB=y
806CONFIG_GPIOLIB=y 850CONFIG_GPIOLIB=y
807# CONFIG_DEBUG_GPIO is not set 851# CONFIG_DEBUG_GPIO is not set
@@ -817,6 +861,7 @@ CONFIG_GPIO_SYSFS=y
817# CONFIG_GPIO_MAX732X is not set 861# CONFIG_GPIO_MAX732X is not set
818# CONFIG_GPIO_PCA953X is not set 862# CONFIG_GPIO_PCA953X is not set
819# CONFIG_GPIO_PCF857X is not set 863# CONFIG_GPIO_PCF857X is not set
864# CONFIG_GPIO_ADP5588 is not set
820 865
821# 866#
822# PCI GPIO expanders: 867# PCI GPIO expanders:
@@ -827,11 +872,15 @@ CONFIG_GPIO_SYSFS=y
827# 872#
828# CONFIG_GPIO_MAX7301 is not set 873# CONFIG_GPIO_MAX7301 is not set
829# CONFIG_GPIO_MCP23S08 is not set 874# CONFIG_GPIO_MCP23S08 is not set
875# CONFIG_GPIO_MC33880 is not set
876
877#
878# AC97 GPIO expanders:
879#
830# CONFIG_W1 is not set 880# CONFIG_W1 is not set
831# CONFIG_POWER_SUPPLY is not set 881# CONFIG_POWER_SUPPLY is not set
832# CONFIG_HWMON is not set 882# CONFIG_HWMON is not set
833# CONFIG_THERMAL is not set 883# CONFIG_THERMAL is not set
834# CONFIG_THERMAL_HWMON is not set
835CONFIG_WATCHDOG=y 884CONFIG_WATCHDOG=y
836# CONFIG_WATCHDOG_NOWAYOUT is not set 885# CONFIG_WATCHDOG_NOWAYOUT is not set
837 886
@@ -853,28 +902,20 @@ CONFIG_SSB_POSSIBLE=y
853# CONFIG_MFD_CORE is not set 902# CONFIG_MFD_CORE is not set
854# CONFIG_MFD_SM501 is not set 903# CONFIG_MFD_SM501 is not set
855# CONFIG_HTC_PASIC3 is not set 904# CONFIG_HTC_PASIC3 is not set
905# CONFIG_TPS65010 is not set
906# CONFIG_TWL4030_CORE is not set
856# CONFIG_MFD_TMIO is not set 907# CONFIG_MFD_TMIO is not set
857# CONFIG_PMIC_DA903X is not set 908# CONFIG_PMIC_DA903X is not set
858# CONFIG_PMIC_ADP5520 is not set 909# CONFIG_PMIC_ADP5520 is not set
859# CONFIG_MFD_WM8400 is not set 910# CONFIG_MFD_WM8400 is not set
911# CONFIG_MFD_WM831X is not set
860# CONFIG_MFD_WM8350_I2C is not set 912# CONFIG_MFD_WM8350_I2C is not set
913# CONFIG_MFD_PCF50633 is not set
914# CONFIG_MFD_MC13783 is not set
915# CONFIG_AB3100_CORE is not set
916# CONFIG_EZX_PCAP is not set
861# CONFIG_REGULATOR is not set 917# CONFIG_REGULATOR is not set
862 918# CONFIG_MEDIA_SUPPORT is not set
863#
864# Multimedia devices
865#
866
867#
868# Multimedia core support
869#
870# CONFIG_VIDEO_DEV is not set
871# CONFIG_DVB_CORE is not set
872# CONFIG_VIDEO_MEDIA is not set
873
874#
875# Multimedia drivers
876#
877# CONFIG_DAB is not set
878 919
879# 920#
880# Graphics support 921# Graphics support
@@ -912,10 +953,11 @@ CONFIG_MMC_BLOCK_BOUNCE=y
912# MMC/SD/SDIO Host Controller Drivers 953# MMC/SD/SDIO Host Controller Drivers
913# 954#
914# CONFIG_MMC_SDHCI is not set 955# CONFIG_MMC_SDHCI is not set
956# CONFIG_MMC_AT91 is not set
957# CONFIG_MMC_ATMELMCI is not set
958# CONFIG_MMC_SPI is not set
915CONFIG_SDH_BFIN=m 959CONFIG_SDH_BFIN=m
916CONFIG_SDH_BFIN_MISSING_CMD_PULLUP_WORKAROUND=y 960CONFIG_SDH_BFIN_MISSING_CMD_PULLUP_WORKAROUND=y
917# CONFIG_SDH_BFIN_ENABLE_SDIO_IRQ is not set
918# CONFIG_MMC_SPI is not set
919# CONFIG_MEMSTICK is not set 961# CONFIG_MEMSTICK is not set
920# CONFIG_NEW_LEDS is not set 962# CONFIG_NEW_LEDS is not set
921# CONFIG_ACCESSIBILITY is not set 963# CONFIG_ACCESSIBILITY is not set
@@ -950,6 +992,7 @@ CONFIG_RTC_INTF_DEV=y
950# CONFIG_RTC_DRV_S35390A is not set 992# CONFIG_RTC_DRV_S35390A is not set
951# CONFIG_RTC_DRV_FM3130 is not set 993# CONFIG_RTC_DRV_FM3130 is not set
952# CONFIG_RTC_DRV_RX8581 is not set 994# CONFIG_RTC_DRV_RX8581 is not set
995# CONFIG_RTC_DRV_RX8025 is not set
953 996
954# 997#
955# SPI RTC drivers 998# SPI RTC drivers
@@ -961,6 +1004,7 @@ CONFIG_RTC_INTF_DEV=y
961# CONFIG_RTC_DRV_R9701 is not set 1004# CONFIG_RTC_DRV_R9701 is not set
962# CONFIG_RTC_DRV_RS5C348 is not set 1005# CONFIG_RTC_DRV_RS5C348 is not set
963# CONFIG_RTC_DRV_DS3234 is not set 1006# CONFIG_RTC_DRV_DS3234 is not set
1007# CONFIG_RTC_DRV_PCF2123 is not set
964 1008
965# 1009#
966# Platform RTC drivers 1010# Platform RTC drivers
@@ -981,10 +1025,21 @@ CONFIG_RTC_INTF_DEV=y
981# 1025#
982CONFIG_RTC_DRV_BFIN=y 1026CONFIG_RTC_DRV_BFIN=y
983# CONFIG_DMADEVICES is not set 1027# CONFIG_DMADEVICES is not set
1028# CONFIG_AUXDISPLAY is not set
984# CONFIG_UIO is not set 1029# CONFIG_UIO is not set
1030
1031#
1032# TI VLYNQ
1033#
985# CONFIG_STAGING is not set 1034# CONFIG_STAGING is not set
986 1035
987# 1036#
1037# Firmware Drivers
1038#
1039# CONFIG_FIRMWARE_MEMMAP is not set
1040# CONFIG_SIGMA is not set
1041
1042#
988# File systems 1043# File systems
989# 1044#
990CONFIG_EXT2_FS=m 1045CONFIG_EXT2_FS=m
@@ -994,9 +1049,13 @@ CONFIG_EXT2_FS=m
994# CONFIG_REISERFS_FS is not set 1049# CONFIG_REISERFS_FS is not set
995# CONFIG_JFS_FS is not set 1050# CONFIG_JFS_FS is not set
996# CONFIG_FS_POSIX_ACL is not set 1051# CONFIG_FS_POSIX_ACL is not set
997CONFIG_FILE_LOCKING=y
998# CONFIG_XFS_FS is not set 1052# CONFIG_XFS_FS is not set
1053# CONFIG_GFS2_FS is not set
999# CONFIG_OCFS2_FS is not set 1054# CONFIG_OCFS2_FS is not set
1055# CONFIG_BTRFS_FS is not set
1056# CONFIG_NILFS2_FS is not set
1057CONFIG_FILE_LOCKING=y
1058CONFIG_FSNOTIFY=y
1000# CONFIG_DNOTIFY is not set 1059# CONFIG_DNOTIFY is not set
1001CONFIG_INOTIFY=y 1060CONFIG_INOTIFY=y
1002CONFIG_INOTIFY_USER=y 1061CONFIG_INOTIFY_USER=y
@@ -1006,6 +1065,11 @@ CONFIG_INOTIFY_USER=y
1006# CONFIG_FUSE_FS is not set 1065# CONFIG_FUSE_FS is not set
1007 1066
1008# 1067#
1068# Caches
1069#
1070# CONFIG_FSCACHE is not set
1071
1072#
1009# CD-ROM/DVD Filesystems 1073# CD-ROM/DVD Filesystems
1010# 1074#
1011# CONFIG_ISO9660_FS is not set 1075# CONFIG_ISO9660_FS is not set
@@ -1027,13 +1091,9 @@ CONFIG_FAT_DEFAULT_IOCHARSET="iso8859-1"
1027CONFIG_PROC_FS=y 1091CONFIG_PROC_FS=y
1028CONFIG_PROC_SYSCTL=y 1092CONFIG_PROC_SYSCTL=y
1029CONFIG_SYSFS=y 1093CONFIG_SYSFS=y
1030# CONFIG_TMPFS is not set
1031# CONFIG_HUGETLB_PAGE is not set 1094# CONFIG_HUGETLB_PAGE is not set
1032# CONFIG_CONFIGFS_FS is not set 1095# CONFIG_CONFIGFS_FS is not set
1033 1096CONFIG_MISC_FILESYSTEMS=y
1034#
1035# Miscellaneous filesystems
1036#
1037# CONFIG_ADFS_FS is not set 1097# CONFIG_ADFS_FS is not set
1038# CONFIG_AFFS_FS is not set 1098# CONFIG_AFFS_FS is not set
1039# CONFIG_HFS_FS is not set 1099# CONFIG_HFS_FS is not set
@@ -1042,8 +1102,8 @@ CONFIG_SYSFS=y
1042# CONFIG_BFS_FS is not set 1102# CONFIG_BFS_FS is not set
1043# CONFIG_EFS_FS is not set 1103# CONFIG_EFS_FS is not set
1044# CONFIG_JFFS2_FS is not set 1104# CONFIG_JFFS2_FS is not set
1045# CONFIG_YAFFS_FS is not set
1046# CONFIG_CRAMFS is not set 1105# CONFIG_CRAMFS is not set
1106# CONFIG_SQUASHFS is not set
1047# CONFIG_VXFS_FS is not set 1107# CONFIG_VXFS_FS is not set
1048# CONFIG_MINIX_FS is not set 1108# CONFIG_MINIX_FS is not set
1049# CONFIG_OMFS_FS is not set 1109# CONFIG_OMFS_FS is not set
@@ -1062,7 +1122,6 @@ CONFIG_LOCKD=m
1062CONFIG_LOCKD_V4=y 1122CONFIG_LOCKD_V4=y
1063CONFIG_NFS_COMMON=y 1123CONFIG_NFS_COMMON=y
1064CONFIG_SUNRPC=m 1124CONFIG_SUNRPC=m
1065# CONFIG_SUNRPC_REGISTER_V4 is not set
1066# CONFIG_RPCSEC_GSS_KRB5 is not set 1125# CONFIG_RPCSEC_GSS_KRB5 is not set
1067# CONFIG_RPCSEC_GSS_SPKM3 is not set 1126# CONFIG_RPCSEC_GSS_SPKM3 is not set
1068CONFIG_SMB_FS=m 1127CONFIG_SMB_FS=m
@@ -1127,14 +1186,19 @@ CONFIG_ENABLE_WARN_DEPRECATED=y
1127CONFIG_ENABLE_MUST_CHECK=y 1186CONFIG_ENABLE_MUST_CHECK=y
1128CONFIG_FRAME_WARN=1024 1187CONFIG_FRAME_WARN=1024
1129# CONFIG_MAGIC_SYSRQ is not set 1188# CONFIG_MAGIC_SYSRQ is not set
1189# CONFIG_STRIP_ASM_SYMS is not set
1130# CONFIG_UNUSED_SYMBOLS is not set 1190# CONFIG_UNUSED_SYMBOLS is not set
1131CONFIG_DEBUG_FS=y 1191CONFIG_DEBUG_FS=y
1132# CONFIG_HEADERS_CHECK is not set 1192# CONFIG_HEADERS_CHECK is not set
1193CONFIG_DEBUG_SECTION_MISMATCH=y
1133CONFIG_DEBUG_KERNEL=y 1194CONFIG_DEBUG_KERNEL=y
1134CONFIG_DEBUG_SHIRQ=y 1195CONFIG_DEBUG_SHIRQ=y
1135CONFIG_DETECT_SOFTLOCKUP=y 1196CONFIG_DETECT_SOFTLOCKUP=y
1136# CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC is not set 1197# CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC is not set
1137CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC_VALUE=0 1198CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC_VALUE=0
1199CONFIG_DETECT_HUNG_TASK=y
1200# CONFIG_BOOTPARAM_HUNG_TASK_PANIC is not set
1201CONFIG_BOOTPARAM_HUNG_TASK_PANIC_VALUE=0
1138CONFIG_SCHED_DEBUG=y 1202CONFIG_SCHED_DEBUG=y
1139# CONFIG_SCHEDSTATS is not set 1203# CONFIG_SCHEDSTATS is not set
1140# CONFIG_TIMER_STATS is not set 1204# CONFIG_TIMER_STATS is not set
@@ -1142,31 +1206,39 @@ CONFIG_SCHED_DEBUG=y
1142# CONFIG_DEBUG_SLAB is not set 1206# CONFIG_DEBUG_SLAB is not set
1143# CONFIG_DEBUG_SPINLOCK is not set 1207# CONFIG_DEBUG_SPINLOCK is not set
1144# CONFIG_DEBUG_MUTEXES is not set 1208# CONFIG_DEBUG_MUTEXES is not set
1209# CONFIG_DEBUG_LOCK_ALLOC is not set
1210# CONFIG_PROVE_LOCKING is not set
1211# CONFIG_LOCK_STAT is not set
1145# CONFIG_DEBUG_SPINLOCK_SLEEP is not set 1212# CONFIG_DEBUG_SPINLOCK_SLEEP is not set
1146# CONFIG_DEBUG_LOCKING_API_SELFTESTS is not set 1213# CONFIG_DEBUG_LOCKING_API_SELFTESTS is not set
1147# CONFIG_DEBUG_KOBJECT is not set 1214# CONFIG_DEBUG_KOBJECT is not set
1148CONFIG_DEBUG_BUGVERBOSE=y 1215CONFIG_DEBUG_BUGVERBOSE=y
1149CONFIG_DEBUG_INFO=y 1216CONFIG_DEBUG_INFO=y
1150# CONFIG_DEBUG_VM is not set 1217# CONFIG_DEBUG_VM is not set
1218# CONFIG_DEBUG_NOMMU_REGIONS is not set
1151# CONFIG_DEBUG_WRITECOUNT is not set 1219# CONFIG_DEBUG_WRITECOUNT is not set
1152# CONFIG_DEBUG_MEMORY_INIT is not set 1220# CONFIG_DEBUG_MEMORY_INIT is not set
1153# CONFIG_DEBUG_LIST is not set 1221# CONFIG_DEBUG_LIST is not set
1154# CONFIG_DEBUG_SG is not set 1222# CONFIG_DEBUG_SG is not set
1223# CONFIG_DEBUG_NOTIFIERS is not set
1224# CONFIG_DEBUG_CREDENTIALS is not set
1155# CONFIG_FRAME_POINTER is not set 1225# CONFIG_FRAME_POINTER is not set
1156# CONFIG_BOOT_PRINTK_DELAY is not set 1226# CONFIG_BOOT_PRINTK_DELAY is not set
1157# CONFIG_RCU_TORTURE_TEST is not set 1227# CONFIG_RCU_TORTURE_TEST is not set
1158# CONFIG_RCU_CPU_STALL_DETECTOR is not set 1228# CONFIG_RCU_CPU_STALL_DETECTOR is not set
1159# CONFIG_BACKTRACE_SELF_TEST is not set 1229# CONFIG_BACKTRACE_SELF_TEST is not set
1160# CONFIG_DEBUG_BLOCK_EXT_DEVT is not set 1230# CONFIG_DEBUG_BLOCK_EXT_DEVT is not set
1231# CONFIG_DEBUG_FORCE_WEAK_PER_CPU is not set
1161# CONFIG_FAULT_INJECTION is not set 1232# CONFIG_FAULT_INJECTION is not set
1162 1233# CONFIG_PAGE_POISONING is not set
1163# 1234CONFIG_HAVE_FUNCTION_TRACER=y
1164# Tracers 1235CONFIG_HAVE_FUNCTION_GRAPH_TRACER=y
1165# 1236CONFIG_TRACING_SUPPORT=y
1166# CONFIG_SCHED_TRACER is not set 1237# CONFIG_FTRACE is not set
1167# CONFIG_CONTEXT_SWITCH_TRACER is not set 1238# CONFIG_BRANCH_PROFILE_NONE is not set
1168# CONFIG_BOOT_TRACER is not set 1239# CONFIG_PROFILE_ANNOTATED_BRANCHES is not set
1169# CONFIG_DYNAMIC_PRINTK_DEBUG is not set 1240# CONFIG_PROFILE_ALL_BRANCHES is not set
1241# CONFIG_DYNAMIC_DEBUG is not set
1170# CONFIG_SAMPLES is not set 1242# CONFIG_SAMPLES is not set
1171CONFIG_HAVE_ARCH_KGDB=y 1243CONFIG_HAVE_ARCH_KGDB=y
1172# CONFIG_KGDB is not set 1244# CONFIG_KGDB is not set
@@ -1191,6 +1263,7 @@ CONFIG_DEBUG_BFIN_NO_KERN_HWTRACE=y
1191CONFIG_EARLY_PRINTK=y 1263CONFIG_EARLY_PRINTK=y
1192CONFIG_CPLB_INFO=y 1264CONFIG_CPLB_INFO=y
1193CONFIG_ACCESS_CHECK=y 1265CONFIG_ACCESS_CHECK=y
1266# CONFIG_BFIN_ISRAM_SELF_TEST is not set
1194 1267
1195# 1268#
1196# Security options 1269# Security options
@@ -1199,14 +1272,14 @@ CONFIG_ACCESS_CHECK=y
1199CONFIG_SECURITY=y 1272CONFIG_SECURITY=y
1200# CONFIG_SECURITYFS is not set 1273# CONFIG_SECURITYFS is not set
1201# CONFIG_SECURITY_NETWORK is not set 1274# CONFIG_SECURITY_NETWORK is not set
1275# CONFIG_SECURITY_PATH is not set
1202# CONFIG_SECURITY_FILE_CAPABILITIES is not set 1276# CONFIG_SECURITY_FILE_CAPABILITIES is not set
1203CONFIG_SECURITY_DEFAULT_MMAP_MIN_ADDR=0 1277# CONFIG_SECURITY_TOMOYO is not set
1204CONFIG_CRYPTO=y 1278CONFIG_CRYPTO=y
1205 1279
1206# 1280#
1207# Crypto core or helper 1281# Crypto core or helper
1208# 1282#
1209# CONFIG_CRYPTO_FIPS is not set
1210# CONFIG_CRYPTO_MANAGER is not set 1283# CONFIG_CRYPTO_MANAGER is not set
1211# CONFIG_CRYPTO_MANAGER2 is not set 1284# CONFIG_CRYPTO_MANAGER2 is not set
1212# CONFIG_CRYPTO_GF128MUL is not set 1285# CONFIG_CRYPTO_GF128MUL is not set
@@ -1238,11 +1311,13 @@ CONFIG_CRYPTO=y
1238# 1311#
1239# CONFIG_CRYPTO_HMAC is not set 1312# CONFIG_CRYPTO_HMAC is not set
1240# CONFIG_CRYPTO_XCBC is not set 1313# CONFIG_CRYPTO_XCBC is not set
1314# CONFIG_CRYPTO_VMAC is not set
1241 1315
1242# 1316#
1243# Digest 1317# Digest
1244# 1318#
1245# CONFIG_CRYPTO_CRC32C is not set 1319# CONFIG_CRYPTO_CRC32C is not set
1320# CONFIG_CRYPTO_GHASH is not set
1246# CONFIG_CRYPTO_MD4 is not set 1321# CONFIG_CRYPTO_MD4 is not set
1247# CONFIG_CRYPTO_MD5 is not set 1322# CONFIG_CRYPTO_MD5 is not set
1248# CONFIG_CRYPTO_MICHAEL_MIC is not set 1323# CONFIG_CRYPTO_MICHAEL_MIC is not set
@@ -1279,6 +1354,7 @@ CONFIG_CRYPTO=y
1279# Compression 1354# Compression
1280# 1355#
1281# CONFIG_CRYPTO_DEFLATE is not set 1356# CONFIG_CRYPTO_DEFLATE is not set
1357# CONFIG_CRYPTO_ZLIB is not set
1282# CONFIG_CRYPTO_LZO is not set 1358# CONFIG_CRYPTO_LZO is not set
1283 1359
1284# 1360#
@@ -1286,11 +1362,13 @@ CONFIG_CRYPTO=y
1286# 1362#
1287# CONFIG_CRYPTO_ANSI_CPRNG is not set 1363# CONFIG_CRYPTO_ANSI_CPRNG is not set
1288CONFIG_CRYPTO_HW=y 1364CONFIG_CRYPTO_HW=y
1365# CONFIG_BINARY_PRINTF is not set
1289 1366
1290# 1367#
1291# Library routines 1368# Library routines
1292# 1369#
1293CONFIG_BITREVERSE=y 1370CONFIG_BITREVERSE=y
1371CONFIG_GENERIC_FIND_LAST_BIT=y
1294CONFIG_CRC_CCITT=m 1372CONFIG_CRC_CCITT=m
1295# CONFIG_CRC16 is not set 1373# CONFIG_CRC16 is not set
1296# CONFIG_CRC_T10DIF is not set 1374# CONFIG_CRC_T10DIF is not set
@@ -1299,6 +1377,8 @@ CONFIG_CRC32=y
1299# CONFIG_CRC7 is not set 1377# CONFIG_CRC7 is not set
1300# CONFIG_LIBCRC32C is not set 1378# CONFIG_LIBCRC32C is not set
1301CONFIG_ZLIB_INFLATE=y 1379CONFIG_ZLIB_INFLATE=y
1380CONFIG_DECOMPRESS_GZIP=y
1302CONFIG_HAS_IOMEM=y 1381CONFIG_HAS_IOMEM=y
1303CONFIG_HAS_IOPORT=y 1382CONFIG_HAS_IOPORT=y
1304CONFIG_HAS_DMA=y 1383CONFIG_HAS_DMA=y
1384CONFIG_NLATTR=y
diff --git a/arch/blackfin/configs/BF526-EZBRD_defconfig b/arch/blackfin/configs/BF526-EZBRD_defconfig
index 075e0fdcb399..31c2a6db6ec5 100644
--- a/arch/blackfin/configs/BF526-EZBRD_defconfig
+++ b/arch/blackfin/configs/BF526-EZBRD_defconfig
@@ -1,22 +1,27 @@
1# 1#
2# Automatically generated make config: don't edit 2# Automatically generated make config: don't edit
3# Linux kernel version: 2.6.28.10 3# Linux kernel version: 2.6.32.2
4# Thu May 21 05:50:01 2009
5# 4#
6# CONFIG_MMU is not set 5# CONFIG_MMU is not set
7# CONFIG_FPU is not set 6# CONFIG_FPU is not set
8CONFIG_RWSEM_GENERIC_SPINLOCK=y 7CONFIG_RWSEM_GENERIC_SPINLOCK=y
9# CONFIG_RWSEM_XCHGADD_ALGORITHM is not set 8# CONFIG_RWSEM_XCHGADD_ALGORITHM is not set
10CONFIG_BLACKFIN=y 9CONFIG_BLACKFIN=y
10CONFIG_GENERIC_CSUM=y
11CONFIG_GENERIC_BUG=y
11CONFIG_ZONE_DMA=y 12CONFIG_ZONE_DMA=y
12CONFIG_GENERIC_FIND_NEXT_BIT=y 13CONFIG_GENERIC_FIND_NEXT_BIT=y
13CONFIG_GENERIC_HWEIGHT=y
14CONFIG_GENERIC_HARDIRQS=y 14CONFIG_GENERIC_HARDIRQS=y
15CONFIG_GENERIC_IRQ_PROBE=y 15CONFIG_GENERIC_IRQ_PROBE=y
16CONFIG_GENERIC_HARDIRQS_NO__DO_IRQ=y
16CONFIG_GENERIC_GPIO=y 17CONFIG_GENERIC_GPIO=y
17CONFIG_FORCE_MAX_ZONEORDER=14 18CONFIG_FORCE_MAX_ZONEORDER=14
18CONFIG_GENERIC_CALIBRATE_DELAY=y 19CONFIG_GENERIC_CALIBRATE_DELAY=y
20CONFIG_LOCKDEP_SUPPORT=y
21CONFIG_STACKTRACE_SUPPORT=y
22CONFIG_TRACE_IRQFLAGS_SUPPORT=y
19CONFIG_DEFCONFIG_LIST="/lib/modules/$UNAME_RELEASE/.config" 23CONFIG_DEFCONFIG_LIST="/lib/modules/$UNAME_RELEASE/.config"
24CONFIG_CONSTRUCTORS=y
20 25
21# 26#
22# General setup 27# General setup
@@ -26,22 +31,41 @@ CONFIG_BROKEN_ON_SMP=y
26CONFIG_INIT_ENV_ARG_LIMIT=32 31CONFIG_INIT_ENV_ARG_LIMIT=32
27CONFIG_LOCALVERSION="" 32CONFIG_LOCALVERSION=""
28CONFIG_LOCALVERSION_AUTO=y 33CONFIG_LOCALVERSION_AUTO=y
34CONFIG_HAVE_KERNEL_GZIP=y
35CONFIG_HAVE_KERNEL_BZIP2=y
36CONFIG_HAVE_KERNEL_LZMA=y
37CONFIG_KERNEL_GZIP=y
38# CONFIG_KERNEL_BZIP2 is not set
39# CONFIG_KERNEL_LZMA is not set
29CONFIG_SYSVIPC=y 40CONFIG_SYSVIPC=y
30CONFIG_SYSVIPC_SYSCTL=y 41CONFIG_SYSVIPC_SYSCTL=y
31# CONFIG_POSIX_MQUEUE is not set 42# CONFIG_POSIX_MQUEUE is not set
32# CONFIG_BSD_PROCESS_ACCT is not set 43# CONFIG_BSD_PROCESS_ACCT is not set
33# CONFIG_TASKSTATS is not set 44# CONFIG_TASKSTATS is not set
34# CONFIG_AUDIT is not set 45# CONFIG_AUDIT is not set
46
47#
48# RCU Subsystem
49#
50CONFIG_TREE_RCU=y
51# CONFIG_TREE_PREEMPT_RCU is not set
52# CONFIG_RCU_TRACE is not set
53CONFIG_RCU_FANOUT=32
54# CONFIG_RCU_FANOUT_EXACT is not set
55# CONFIG_TREE_RCU_TRACE is not set
35CONFIG_IKCONFIG=y 56CONFIG_IKCONFIG=y
36CONFIG_IKCONFIG_PROC=y 57CONFIG_IKCONFIG_PROC=y
37CONFIG_LOG_BUF_SHIFT=14 58CONFIG_LOG_BUF_SHIFT=14
38# CONFIG_CGROUPS is not set
39# CONFIG_GROUP_SCHED is not set 59# CONFIG_GROUP_SCHED is not set
60# CONFIG_CGROUPS is not set
40# CONFIG_SYSFS_DEPRECATED_V2 is not set 61# CONFIG_SYSFS_DEPRECATED_V2 is not set
41# CONFIG_RELAY is not set 62# CONFIG_RELAY is not set
42# CONFIG_NAMESPACES is not set 63# CONFIG_NAMESPACES is not set
43CONFIG_BLK_DEV_INITRD=y 64CONFIG_BLK_DEV_INITRD=y
44CONFIG_INITRAMFS_SOURCE="" 65CONFIG_INITRAMFS_SOURCE=""
66CONFIG_RD_GZIP=y
67# CONFIG_RD_BZIP2 is not set
68# CONFIG_RD_LZMA is not set
45# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set 69# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
46CONFIG_SYSCTL=y 70CONFIG_SYSCTL=y
47CONFIG_ANON_INODES=y 71CONFIG_ANON_INODES=y
@@ -62,6 +86,10 @@ CONFIG_EPOLL=y
62# CONFIG_TIMERFD is not set 86# CONFIG_TIMERFD is not set
63# CONFIG_EVENTFD is not set 87# CONFIG_EVENTFD is not set
64# CONFIG_AIO is not set 88# CONFIG_AIO is not set
89
90#
91# Kernel Performance Events And Counters
92#
65CONFIG_VM_EVENT_COUNTERS=y 93CONFIG_VM_EVENT_COUNTERS=y
66CONFIG_COMPAT_BRK=y 94CONFIG_COMPAT_BRK=y
67CONFIG_SLAB=y 95CONFIG_SLAB=y
@@ -69,11 +97,15 @@ CONFIG_SLAB=y
69# CONFIG_SLOB is not set 97# CONFIG_SLOB is not set
70CONFIG_MMAP_ALLOW_UNINITIALIZED=y 98CONFIG_MMAP_ALLOW_UNINITIALIZED=y
71# CONFIG_PROFILING is not set 99# CONFIG_PROFILING is not set
72# CONFIG_MARKERS is not set
73CONFIG_HAVE_OPROFILE=y 100CONFIG_HAVE_OPROFILE=y
101
102#
103# GCOV-based kernel profiling
104#
105# CONFIG_GCOV_KERNEL is not set
106# CONFIG_SLOW_WORK is not set
74# CONFIG_HAVE_GENERIC_DMA_COHERENT is not set 107# CONFIG_HAVE_GENERIC_DMA_COHERENT is not set
75CONFIG_SLABINFO=y 108CONFIG_SLABINFO=y
76CONFIG_TINY_SHMEM=y
77CONFIG_BASE_SMALL=0 109CONFIG_BASE_SMALL=0
78CONFIG_MODULES=y 110CONFIG_MODULES=y
79# CONFIG_MODULE_FORCE_LOAD is not set 111# CONFIG_MODULE_FORCE_LOAD is not set
@@ -81,11 +113,8 @@ CONFIG_MODULE_UNLOAD=y
81# CONFIG_MODULE_FORCE_UNLOAD is not set 113# CONFIG_MODULE_FORCE_UNLOAD is not set
82# CONFIG_MODVERSIONS is not set 114# CONFIG_MODVERSIONS is not set
83# CONFIG_MODULE_SRCVERSION_ALL is not set 115# CONFIG_MODULE_SRCVERSION_ALL is not set
84CONFIG_KMOD=y
85CONFIG_BLOCK=y 116CONFIG_BLOCK=y
86# CONFIG_LBD is not set 117# CONFIG_LBDAF is not set
87# CONFIG_BLK_DEV_IO_TRACE is not set
88# CONFIG_LSF is not set
89# CONFIG_BLK_DEV_BSG is not set 118# CONFIG_BLK_DEV_BSG is not set
90# CONFIG_BLK_DEV_INTEGRITY is not set 119# CONFIG_BLK_DEV_INTEGRITY is not set
91 120
@@ -101,7 +130,6 @@ CONFIG_IOSCHED_NOOP=y
101# CONFIG_DEFAULT_CFQ is not set 130# CONFIG_DEFAULT_CFQ is not set
102CONFIG_DEFAULT_NOOP=y 131CONFIG_DEFAULT_NOOP=y
103CONFIG_DEFAULT_IOSCHED="noop" 132CONFIG_DEFAULT_IOSCHED="noop"
104CONFIG_CLASSIC_RCU=y
105# CONFIG_PREEMPT_NONE is not set 133# CONFIG_PREEMPT_NONE is not set
106CONFIG_PREEMPT_VOLUNTARY=y 134CONFIG_PREEMPT_VOLUNTARY=y
107# CONFIG_PREEMPT is not set 135# CONFIG_PREEMPT is not set
@@ -132,15 +160,15 @@ CONFIG_BF526=y
132# CONFIG_BF537 is not set 160# CONFIG_BF537 is not set
133# CONFIG_BF538 is not set 161# CONFIG_BF538 is not set
134# CONFIG_BF539 is not set 162# CONFIG_BF539 is not set
135# CONFIG_BF542 is not set 163# CONFIG_BF542_std is not set
136# CONFIG_BF542M is not set 164# CONFIG_BF542M is not set
137# CONFIG_BF544 is not set 165# CONFIG_BF544_std is not set
138# CONFIG_BF544M is not set 166# CONFIG_BF544M is not set
139# CONFIG_BF547 is not set 167# CONFIG_BF547_std is not set
140# CONFIG_BF547M is not set 168# CONFIG_BF547M is not set
141# CONFIG_BF548 is not set 169# CONFIG_BF548_std is not set
142# CONFIG_BF548M is not set 170# CONFIG_BF548M is not set
143# CONFIG_BF549 is not set 171# CONFIG_BF549_std is not set
144# CONFIG_BF549M is not set 172# CONFIG_BF549M is not set
145# CONFIG_BF561 is not set 173# CONFIG_BF561 is not set
146CONFIG_BF_REV_MIN=0 174CONFIG_BF_REV_MIN=0
@@ -154,8 +182,7 @@ CONFIG_BF_REV_0_0=y
154# CONFIG_BF_REV_0_6 is not set 182# CONFIG_BF_REV_0_6 is not set
155# CONFIG_BF_REV_ANY is not set 183# CONFIG_BF_REV_ANY is not set
156# CONFIG_BF_REV_NONE is not set 184# CONFIG_BF_REV_NONE is not set
157CONFIG_BF52x=y 185CONFIG_MEM_MT48H32M16LFCJ_75=y
158CONFIG_MEM_MT48LC32M16A2TG_75=y
159CONFIG_IRQ_PLL_WAKEUP=7 186CONFIG_IRQ_PLL_WAKEUP=7
160CONFIG_IRQ_DMA0_ERROR=7 187CONFIG_IRQ_DMA0_ERROR=7
161CONFIG_IRQ_DMAR0_BLK=7 188CONFIG_IRQ_DMAR0_BLK=7
@@ -200,7 +227,9 @@ CONFIG_IRQ_MEM_DMA1=13
200CONFIG_IRQ_WATCH=13 227CONFIG_IRQ_WATCH=13
201CONFIG_IRQ_PORTF_INTA=13 228CONFIG_IRQ_PORTF_INTA=13
202CONFIG_IRQ_PORTF_INTB=13 229CONFIG_IRQ_PORTF_INTB=13
230CONFIG_BF52x=y
203# CONFIG_BFIN527_EZKIT is not set 231# CONFIG_BFIN527_EZKIT is not set
232# CONFIG_BFIN527_EZKIT_V2 is not set
204# CONFIG_BFIN527_BLUETECHNIX_CM is not set 233# CONFIG_BFIN527_BLUETECHNIX_CM is not set
205CONFIG_BFIN526_EZBRD=y 234CONFIG_BFIN526_EZBRD=y
206 235
@@ -318,7 +347,6 @@ CONFIG_FLATMEM=y
318CONFIG_FLAT_NODE_MEM_MAP=y 347CONFIG_FLAT_NODE_MEM_MAP=y
319CONFIG_PAGEFLAGS_EXTENDED=y 348CONFIG_PAGEFLAGS_EXTENDED=y
320CONFIG_SPLIT_PTLOCK_CPUS=4 349CONFIG_SPLIT_PTLOCK_CPUS=4
321# CONFIG_RESOURCES_64BIT is not set
322# CONFIG_PHYS_ADDR_T_64BIT is not set 350# CONFIG_PHYS_ADDR_T_64BIT is not set
323CONFIG_ZONE_DMA_FLAG=1 351CONFIG_ZONE_DMA_FLAG=1
324CONFIG_VIRT_TO_BUS=y 352CONFIG_VIRT_TO_BUS=y
@@ -327,16 +355,18 @@ CONFIG_BFIN_GPTIMERS=m
327# CONFIG_DMA_UNCACHED_4M is not set 355# CONFIG_DMA_UNCACHED_4M is not set
328# CONFIG_DMA_UNCACHED_2M is not set 356# CONFIG_DMA_UNCACHED_2M is not set
329CONFIG_DMA_UNCACHED_1M=y 357CONFIG_DMA_UNCACHED_1M=y
358# CONFIG_DMA_UNCACHED_512K is not set
359# CONFIG_DMA_UNCACHED_256K is not set
360# CONFIG_DMA_UNCACHED_128K is not set
330# CONFIG_DMA_UNCACHED_NONE is not set 361# CONFIG_DMA_UNCACHED_NONE is not set
331 362
332# 363#
333# Cache Support 364# Cache Support
334# 365#
335CONFIG_BFIN_ICACHE=y 366CONFIG_BFIN_ICACHE=y
336# CONFIG_BFIN_ICACHE_LOCK is not set 367CONFIG_BFIN_EXTMEM_ICACHEABLE=y
337CONFIG_BFIN_DCACHE=y 368CONFIG_BFIN_DCACHE=y
338# CONFIG_BFIN_DCACHE_BANKA is not set 369# CONFIG_BFIN_DCACHE_BANKA is not set
339CONFIG_BFIN_EXTMEM_ICACHEABLE=y
340CONFIG_BFIN_EXTMEM_DCACHEABLE=y 370CONFIG_BFIN_EXTMEM_DCACHEABLE=y
341CONFIG_BFIN_EXTMEM_WRITEBACK=y 371CONFIG_BFIN_EXTMEM_WRITEBACK=y
342# CONFIG_BFIN_EXTMEM_WRITETHROUGH is not set 372# CONFIG_BFIN_EXTMEM_WRITETHROUGH is not set
@@ -347,6 +377,10 @@ CONFIG_BFIN_EXTMEM_WRITEBACK=y
347# CONFIG_MPU is not set 377# CONFIG_MPU is not set
348 378
349# 379#
380# Asynchronous Memory Configuration
381#
382
383#
350# EBIU_AMGCTL Global Control 384# EBIU_AMGCTL Global Control
351# 385#
352CONFIG_C_AMCKEN=y 386CONFIG_C_AMCKEN=y
@@ -399,11 +433,6 @@ CONFIG_NET=y
399CONFIG_PACKET=y 433CONFIG_PACKET=y
400# CONFIG_PACKET_MMAP is not set 434# CONFIG_PACKET_MMAP is not set
401CONFIG_UNIX=y 435CONFIG_UNIX=y
402CONFIG_XFRM=y
403# CONFIG_XFRM_USER is not set
404# CONFIG_XFRM_SUB_POLICY is not set
405# CONFIG_XFRM_MIGRATE is not set
406# CONFIG_XFRM_STATISTICS is not set
407# CONFIG_NET_KEY is not set 436# CONFIG_NET_KEY is not set
408CONFIG_INET=y 437CONFIG_INET=y
409# CONFIG_IP_MULTICAST is not set 438# CONFIG_IP_MULTICAST is not set
@@ -427,7 +456,6 @@ CONFIG_IP_PNP=y
427# CONFIG_INET_XFRM_MODE_BEET is not set 456# CONFIG_INET_XFRM_MODE_BEET is not set
428# CONFIG_INET_LRO is not set 457# CONFIG_INET_LRO is not set
429# CONFIG_INET_DIAG is not set 458# CONFIG_INET_DIAG is not set
430CONFIG_INET_TCP_DIAG=y
431# CONFIG_TCP_CONG_ADVANCED is not set 459# CONFIG_TCP_CONG_ADVANCED is not set
432CONFIG_TCP_CONG_CUBIC=y 460CONFIG_TCP_CONG_CUBIC=y
433CONFIG_DEFAULT_TCP_CONG="cubic" 461CONFIG_DEFAULT_TCP_CONG="cubic"
@@ -438,6 +466,7 @@ CONFIG_DEFAULT_TCP_CONG="cubic"
438# CONFIG_NETFILTER is not set 466# CONFIG_NETFILTER is not set
439# CONFIG_IP_DCCP is not set 467# CONFIG_IP_DCCP is not set
440# CONFIG_IP_SCTP is not set 468# CONFIG_IP_SCTP is not set
469# CONFIG_RDS is not set
441# CONFIG_TIPC is not set 470# CONFIG_TIPC is not set
442# CONFIG_ATM is not set 471# CONFIG_ATM is not set
443# CONFIG_BRIDGE is not set 472# CONFIG_BRIDGE is not set
@@ -451,7 +480,10 @@ CONFIG_DEFAULT_TCP_CONG="cubic"
451# CONFIG_LAPB is not set 480# CONFIG_LAPB is not set
452# CONFIG_ECONET is not set 481# CONFIG_ECONET is not set
453# CONFIG_WAN_ROUTER is not set 482# CONFIG_WAN_ROUTER is not set
483# CONFIG_PHONET is not set
484# CONFIG_IEEE802154 is not set
454# CONFIG_NET_SCHED is not set 485# CONFIG_NET_SCHED is not set
486# CONFIG_DCB is not set
455 487
456# 488#
457# Network testing 489# Network testing
@@ -462,13 +494,8 @@ CONFIG_DEFAULT_TCP_CONG="cubic"
462# CONFIG_IRDA is not set 494# CONFIG_IRDA is not set
463# CONFIG_BT is not set 495# CONFIG_BT is not set
464# CONFIG_AF_RXRPC is not set 496# CONFIG_AF_RXRPC is not set
465# CONFIG_PHONET is not set 497# CONFIG_WIRELESS is not set
466CONFIG_WIRELESS=y 498# CONFIG_WIMAX is not set
467# CONFIG_CFG80211 is not set
468CONFIG_WIRELESS_OLD_REGULATORY=y
469# CONFIG_WIRELESS_EXT is not set
470# CONFIG_MAC80211 is not set
471# CONFIG_IEEE80211 is not set
472# CONFIG_RFKILL is not set 499# CONFIG_RFKILL is not set
473# CONFIG_NET_9P is not set 500# CONFIG_NET_9P is not set
474 501
@@ -489,6 +516,7 @@ CONFIG_PREVENT_FIRMWARE_BUILD=y
489# CONFIG_CONNECTOR is not set 516# CONFIG_CONNECTOR is not set
490CONFIG_MTD=y 517CONFIG_MTD=y
491# CONFIG_MTD_DEBUG is not set 518# CONFIG_MTD_DEBUG is not set
519# CONFIG_MTD_TESTS is not set
492# CONFIG_MTD_CONCAT is not set 520# CONFIG_MTD_CONCAT is not set
493CONFIG_MTD_PARTITIONS=y 521CONFIG_MTD_PARTITIONS=y
494# CONFIG_MTD_REDBOOT_PARTS is not set 522# CONFIG_MTD_REDBOOT_PARTS is not set
@@ -549,6 +577,7 @@ CONFIG_MTD_PHYSMAP=y
549# CONFIG_MTD_DATAFLASH is not set 577# CONFIG_MTD_DATAFLASH is not set
550CONFIG_MTD_M25P80=y 578CONFIG_MTD_M25P80=y
551CONFIG_M25PXX_USE_FAST_READ=y 579CONFIG_M25PXX_USE_FAST_READ=y
580# CONFIG_MTD_SST25L is not set
552# CONFIG_MTD_SLRAM is not set 581# CONFIG_MTD_SLRAM is not set
553# CONFIG_MTD_PHRAM is not set 582# CONFIG_MTD_PHRAM is not set
554# CONFIG_MTD_MTDRAM is not set 583# CONFIG_MTD_MTDRAM is not set
@@ -564,11 +593,6 @@ CONFIG_MTD_NAND=m
564# CONFIG_MTD_NAND_VERIFY_WRITE is not set 593# CONFIG_MTD_NAND_VERIFY_WRITE is not set
565# CONFIG_MTD_NAND_ECC_SMC is not set 594# CONFIG_MTD_NAND_ECC_SMC is not set
566# CONFIG_MTD_NAND_MUSEUM_IDS is not set 595# CONFIG_MTD_NAND_MUSEUM_IDS is not set
567CONFIG_MTD_NAND_BFIN=m
568CONFIG_BFIN_NAND_BASE=0x20212000
569CONFIG_BFIN_NAND_CLE=2
570CONFIG_BFIN_NAND_ALE=1
571CONFIG_BFIN_NAND_READY=3
572CONFIG_MTD_NAND_IDS=m 596CONFIG_MTD_NAND_IDS=m
573# CONFIG_MTD_NAND_BF5XX is not set 597# CONFIG_MTD_NAND_BF5XX is not set
574# CONFIG_MTD_NAND_DISKONCHIP is not set 598# CONFIG_MTD_NAND_DISKONCHIP is not set
@@ -578,6 +602,11 @@ CONFIG_MTD_NAND_IDS=m
578# CONFIG_MTD_ONENAND is not set 602# CONFIG_MTD_ONENAND is not set
579 603
580# 604#
605# LPDDR flash memory drivers
606#
607# CONFIG_MTD_LPDDR is not set
608
609#
581# UBI - Unsorted block images 610# UBI - Unsorted block images
582# 611#
583# CONFIG_MTD_UBI is not set 612# CONFIG_MTD_UBI is not set
@@ -595,10 +624,20 @@ CONFIG_BLK_DEV_RAM_SIZE=4096
595# CONFIG_ATA_OVER_ETH is not set 624# CONFIG_ATA_OVER_ETH is not set
596# CONFIG_BLK_DEV_HD is not set 625# CONFIG_BLK_DEV_HD is not set
597CONFIG_MISC_DEVICES=y 626CONFIG_MISC_DEVICES=y
598# CONFIG_EEPROM_93CX6 is not set 627# CONFIG_AD525X_DPOT is not set
599# CONFIG_ICS932S401 is not set 628# CONFIG_ICS932S401 is not set
600# CONFIG_ENCLOSURE_SERVICES is not set 629# CONFIG_ENCLOSURE_SERVICES is not set
630# CONFIG_ISL29003 is not set
601# CONFIG_C2PORT is not set 631# CONFIG_C2PORT is not set
632
633#
634# EEPROM support
635#
636# CONFIG_EEPROM_AT24 is not set
637# CONFIG_EEPROM_AT25 is not set
638# CONFIG_EEPROM_LEGACY is not set
639# CONFIG_EEPROM_MAX6875 is not set
640# CONFIG_EEPROM_93CX6 is not set
602CONFIG_HAVE_IDE=y 641CONFIG_HAVE_IDE=y
603# CONFIG_IDE is not set 642# CONFIG_IDE is not set
604 643
@@ -622,10 +661,6 @@ CONFIG_BLK_DEV_SR=m
622# CONFIG_BLK_DEV_SR_VENDOR is not set 661# CONFIG_BLK_DEV_SR_VENDOR is not set
623# CONFIG_CHR_DEV_SG is not set 662# CONFIG_CHR_DEV_SG is not set
624# CONFIG_CHR_DEV_SCH is not set 663# CONFIG_CHR_DEV_SCH is not set
625
626#
627# Some SCSI devices (e.g. CD jukebox) support multiple LUNs
628#
629# CONFIG_SCSI_MULTI_LUN is not set 664# CONFIG_SCSI_MULTI_LUN is not set
630# CONFIG_SCSI_CONSTANTS is not set 665# CONFIG_SCSI_CONSTANTS is not set
631# CONFIG_SCSI_LOGGING is not set 666# CONFIG_SCSI_LOGGING is not set
@@ -642,6 +677,7 @@ CONFIG_SCSI_WAIT_SCAN=m
642# CONFIG_SCSI_SRP_ATTRS is not set 677# CONFIG_SCSI_SRP_ATTRS is not set
643# CONFIG_SCSI_LOWLEVEL is not set 678# CONFIG_SCSI_LOWLEVEL is not set
644# CONFIG_SCSI_DH is not set 679# CONFIG_SCSI_DH is not set
680# CONFIG_SCSI_OSD_INITIATOR is not set
645# CONFIG_ATA is not set 681# CONFIG_ATA is not set
646# CONFIG_MD is not set 682# CONFIG_MD is not set
647CONFIG_NETDEVICES=y 683CONFIG_NETDEVICES=y
@@ -666,6 +702,9 @@ CONFIG_PHYLIB=y
666# CONFIG_BROADCOM_PHY is not set 702# CONFIG_BROADCOM_PHY is not set
667# CONFIG_ICPLUS_PHY is not set 703# CONFIG_ICPLUS_PHY is not set
668# CONFIG_REALTEK_PHY is not set 704# CONFIG_REALTEK_PHY is not set
705# CONFIG_NATIONAL_PHY is not set
706# CONFIG_STE10XP is not set
707# CONFIG_LSI_ET1011C_PHY is not set
669# CONFIG_FIXED_PHY is not set 708# CONFIG_FIXED_PHY is not set
670# CONFIG_MDIO_BITBANG is not set 709# CONFIG_MDIO_BITBANG is not set
671CONFIG_NET_ETHERNET=y 710CONFIG_NET_ETHERNET=y
@@ -675,9 +714,12 @@ CONFIG_BFIN_TX_DESC_NUM=10
675CONFIG_BFIN_RX_DESC_NUM=20 714CONFIG_BFIN_RX_DESC_NUM=20
676CONFIG_BFIN_MAC_RMII=y 715CONFIG_BFIN_MAC_RMII=y
677# CONFIG_SMC91X is not set 716# CONFIG_SMC91X is not set
678# CONFIG_SMSC911X is not set
679# CONFIG_DM9000 is not set 717# CONFIG_DM9000 is not set
680# CONFIG_ENC28J60 is not set 718# CONFIG_ENC28J60 is not set
719# CONFIG_ETHOC is not set
720# CONFIG_SMSC911X is not set
721# CONFIG_DNET is not set
722# CONFIG_ADF702X is not set
681# CONFIG_IBM_NEW_EMAC_ZMII is not set 723# CONFIG_IBM_NEW_EMAC_ZMII is not set
682# CONFIG_IBM_NEW_EMAC_RGMII is not set 724# CONFIG_IBM_NEW_EMAC_RGMII is not set
683# CONFIG_IBM_NEW_EMAC_TAH is not set 725# CONFIG_IBM_NEW_EMAC_TAH is not set
@@ -686,15 +728,16 @@ CONFIG_BFIN_MAC_RMII=y
686# CONFIG_IBM_NEW_EMAC_MAL_CLR_ICINTSTAT is not set 728# CONFIG_IBM_NEW_EMAC_MAL_CLR_ICINTSTAT is not set
687# CONFIG_IBM_NEW_EMAC_MAL_COMMON_ERR is not set 729# CONFIG_IBM_NEW_EMAC_MAL_COMMON_ERR is not set
688# CONFIG_B44 is not set 730# CONFIG_B44 is not set
731# CONFIG_KS8842 is not set
732# CONFIG_KS8851 is not set
733# CONFIG_KS8851_MLL is not set
689# CONFIG_NETDEV_1000 is not set 734# CONFIG_NETDEV_1000 is not set
690# CONFIG_NETDEV_10000 is not set 735# CONFIG_NETDEV_10000 is not set
736# CONFIG_WLAN is not set
691 737
692# 738#
693# Wireless LAN 739# Enable WiMAX (Networking options) to see the WiMAX drivers
694# 740#
695# CONFIG_WLAN_PRE80211 is not set
696# CONFIG_WLAN_80211 is not set
697# CONFIG_IWLWIFI_LEDS is not set
698 741
699# 742#
700# USB Network Adapters 743# USB Network Adapters
@@ -744,7 +787,11 @@ CONFIG_INPUT_MISC=y
744# CONFIG_INPUT_YEALINK is not set 787# CONFIG_INPUT_YEALINK is not set
745# CONFIG_INPUT_CM109 is not set 788# CONFIG_INPUT_CM109 is not set
746# CONFIG_INPUT_UINPUT is not set 789# CONFIG_INPUT_UINPUT is not set
747# CONFIG_CONFIG_INPUT_PCF8574 is not set 790# CONFIG_INPUT_GPIO_ROTARY_ENCODER is not set
791# CONFIG_INPUT_BFIN_ROTARY is not set
792# CONFIG_INPUT_AD714X is not set
793# CONFIG_INPUT_ADXL34X is not set
794# CONFIG_INPUT_PCF8574 is not set
748 795
749# 796#
750# Hardware I/O ports 797# Hardware I/O ports
@@ -755,16 +802,13 @@ CONFIG_INPUT_MISC=y
755# 802#
756# Character devices 803# Character devices
757# 804#
758# CONFIG_AD9960 is not set
759CONFIG_BFIN_DMA_INTERFACE=m 805CONFIG_BFIN_DMA_INTERFACE=m
760# CONFIG_BFIN_PPI is not set 806# CONFIG_BFIN_PPI is not set
761# CONFIG_BFIN_PPIFCD is not set 807# CONFIG_BFIN_PPIFCD is not set
762# CONFIG_BFIN_SIMPLE_TIMER is not set 808# CONFIG_BFIN_SIMPLE_TIMER is not set
763# CONFIG_BFIN_SPI_ADC is not set 809# CONFIG_BFIN_SPI_ADC is not set
764# CONFIG_BFIN_SPORT is not set 810# CONFIG_BFIN_SPORT is not set
765# CONFIG_BFIN_TIMER_LATENCY is not set
766# CONFIG_BFIN_TWI_LCD is not set 811# CONFIG_BFIN_TWI_LCD is not set
767CONFIG_SIMPLE_GPIO=m
768CONFIG_VT=y 812CONFIG_VT=y
769CONFIG_CONSOLE_TRANSLATIONS=y 813CONFIG_CONSOLE_TRANSLATIONS=y
770CONFIG_VT_CONSOLE=y 814CONFIG_VT_CONSOLE=y
@@ -782,6 +826,7 @@ CONFIG_BFIN_JTAG_COMM=m
782# 826#
783# Non-8250 serial port support 827# Non-8250 serial port support
784# 828#
829# CONFIG_SERIAL_MAX3100 is not set
785CONFIG_SERIAL_BFIN=y 830CONFIG_SERIAL_BFIN=y
786CONFIG_SERIAL_BFIN_CONSOLE=y 831CONFIG_SERIAL_BFIN_CONSOLE=y
787CONFIG_SERIAL_BFIN_DMA=y 832CONFIG_SERIAL_BFIN_DMA=y
@@ -793,14 +838,10 @@ CONFIG_SERIAL_CORE=y
793CONFIG_SERIAL_CORE_CONSOLE=y 838CONFIG_SERIAL_CORE_CONSOLE=y
794# CONFIG_SERIAL_BFIN_SPORT is not set 839# CONFIG_SERIAL_BFIN_SPORT is not set
795CONFIG_UNIX98_PTYS=y 840CONFIG_UNIX98_PTYS=y
841# CONFIG_DEVPTS_MULTIPLE_INSTANCES is not set
796# CONFIG_LEGACY_PTYS is not set 842# CONFIG_LEGACY_PTYS is not set
797CONFIG_BFIN_OTP=y 843CONFIG_BFIN_OTP=y
798# CONFIG_BFIN_OTP_WRITE_ENABLE is not set 844# CONFIG_BFIN_OTP_WRITE_ENABLE is not set
799
800#
801# CAN, the car bus and industrial fieldbus
802#
803# CONFIG_CAN4LINUX is not set
804# CONFIG_IPMI_HANDLER is not set 845# CONFIG_IPMI_HANDLER is not set
805# CONFIG_HW_RANDOM is not set 846# CONFIG_HW_RANDOM is not set
806# CONFIG_R3964 is not set 847# CONFIG_R3964 is not set
@@ -808,6 +849,7 @@ CONFIG_BFIN_OTP=y
808# CONFIG_TCG_TPM is not set 849# CONFIG_TCG_TPM is not set
809CONFIG_I2C=y 850CONFIG_I2C=y
810CONFIG_I2C_BOARDINFO=y 851CONFIG_I2C_BOARDINFO=y
852CONFIG_I2C_COMPAT=y
811CONFIG_I2C_CHARDEV=m 853CONFIG_I2C_CHARDEV=m
812CONFIG_I2C_HELPER_AUTO=y 854CONFIG_I2C_HELPER_AUTO=y
813 855
@@ -841,14 +883,6 @@ CONFIG_I2C_BLACKFIN_TWI_CLK_KHZ=100
841# Miscellaneous I2C Chip support 883# Miscellaneous I2C Chip support
842# 884#
843# CONFIG_DS1682 is not set 885# CONFIG_DS1682 is not set
844# CONFIG_EEPROM_AT24 is not set
845# CONFIG_SENSORS_AD5252 is not set
846# CONFIG_EEPROM_LEGACY is not set
847# CONFIG_SENSORS_PCF8574 is not set
848# CONFIG_PCF8575 is not set
849# CONFIG_SENSORS_PCA9539 is not set
850# CONFIG_SENSORS_PCF8591 is not set
851# CONFIG_SENSORS_MAX6875 is not set
852# CONFIG_SENSORS_TSL2550 is not set 886# CONFIG_SENSORS_TSL2550 is not set
853# CONFIG_I2C_DEBUG_CORE is not set 887# CONFIG_I2C_DEBUG_CORE is not set
854# CONFIG_I2C_DEBUG_ALGO is not set 888# CONFIG_I2C_DEBUG_ALGO is not set
@@ -865,13 +899,18 @@ CONFIG_SPI_BFIN=y
865# CONFIG_SPI_BFIN_LOCK is not set 899# CONFIG_SPI_BFIN_LOCK is not set
866# CONFIG_SPI_BFIN_SPORT is not set 900# CONFIG_SPI_BFIN_SPORT is not set
867# CONFIG_SPI_BITBANG is not set 901# CONFIG_SPI_BITBANG is not set
902# CONFIG_SPI_GPIO is not set
868 903
869# 904#
870# SPI Protocol Masters 905# SPI Protocol Masters
871# 906#
872# CONFIG_EEPROM_AT25 is not set
873# CONFIG_SPI_SPIDEV is not set 907# CONFIG_SPI_SPIDEV is not set
874# CONFIG_SPI_TLE62X0 is not set 908# CONFIG_SPI_TLE62X0 is not set
909
910#
911# PPS support
912#
913# CONFIG_PPS is not set
875CONFIG_ARCH_WANT_OPTIONAL_GPIOLIB=y 914CONFIG_ARCH_WANT_OPTIONAL_GPIOLIB=y
876CONFIG_GPIOLIB=y 915CONFIG_GPIOLIB=y
877# CONFIG_DEBUG_GPIO is not set 916# CONFIG_DEBUG_GPIO is not set
@@ -887,6 +926,7 @@ CONFIG_GPIO_SYSFS=y
887# CONFIG_GPIO_MAX732X is not set 926# CONFIG_GPIO_MAX732X is not set
888# CONFIG_GPIO_PCA953X is not set 927# CONFIG_GPIO_PCA953X is not set
889# CONFIG_GPIO_PCF857X is not set 928# CONFIG_GPIO_PCF857X is not set
929# CONFIG_GPIO_ADP5588 is not set
890 930
891# 931#
892# PCI GPIO expanders: 932# PCI GPIO expanders:
@@ -897,11 +937,20 @@ CONFIG_GPIO_SYSFS=y
897# 937#
898# CONFIG_GPIO_MAX7301 is not set 938# CONFIG_GPIO_MAX7301 is not set
899# CONFIG_GPIO_MCP23S08 is not set 939# CONFIG_GPIO_MCP23S08 is not set
940# CONFIG_GPIO_MC33880 is not set
941
942#
943# AC97 GPIO expanders:
944#
900# CONFIG_W1 is not set 945# CONFIG_W1 is not set
901# CONFIG_POWER_SUPPLY is not set 946# CONFIG_POWER_SUPPLY is not set
902CONFIG_HWMON=y 947CONFIG_HWMON=y
903# CONFIG_HWMON_VID is not set 948# CONFIG_HWMON_VID is not set
904# CONFIG_SENSORS_AD5252 is not set 949# CONFIG_HWMON_DEBUG_CHIP is not set
950
951#
952# Native drivers
953#
905# CONFIG_SENSORS_AD7414 is not set 954# CONFIG_SENSORS_AD7414 is not set
906# CONFIG_SENSORS_AD7418 is not set 955# CONFIG_SENSORS_AD7418 is not set
907# CONFIG_SENSORS_ADCXX is not set 956# CONFIG_SENSORS_ADCXX is not set
@@ -914,11 +963,13 @@ CONFIG_HWMON=y
914# CONFIG_SENSORS_ADT7462 is not set 963# CONFIG_SENSORS_ADT7462 is not set
915# CONFIG_SENSORS_ADT7470 is not set 964# CONFIG_SENSORS_ADT7470 is not set
916# CONFIG_SENSORS_ADT7473 is not set 965# CONFIG_SENSORS_ADT7473 is not set
966# CONFIG_SENSORS_ADT7475 is not set
917# CONFIG_SENSORS_ATXP1 is not set 967# CONFIG_SENSORS_ATXP1 is not set
918# CONFIG_SENSORS_DS1621 is not set 968# CONFIG_SENSORS_DS1621 is not set
919# CONFIG_SENSORS_F71805F is not set 969# CONFIG_SENSORS_F71805F is not set
920# CONFIG_SENSORS_F71882FG is not set 970# CONFIG_SENSORS_F71882FG is not set
921# CONFIG_SENSORS_F75375S is not set 971# CONFIG_SENSORS_F75375S is not set
972# CONFIG_SENSORS_G760A is not set
922# CONFIG_SENSORS_GL518SM is not set 973# CONFIG_SENSORS_GL518SM is not set
923# CONFIG_SENSORS_GL520SM is not set 974# CONFIG_SENSORS_GL520SM is not set
924# CONFIG_SENSORS_IT87 is not set 975# CONFIG_SENSORS_IT87 is not set
@@ -934,17 +985,24 @@ CONFIG_HWMON=y
934# CONFIG_SENSORS_LM90 is not set 985# CONFIG_SENSORS_LM90 is not set
935# CONFIG_SENSORS_LM92 is not set 986# CONFIG_SENSORS_LM92 is not set
936# CONFIG_SENSORS_LM93 is not set 987# CONFIG_SENSORS_LM93 is not set
988# CONFIG_SENSORS_LTC4215 is not set
989# CONFIG_SENSORS_LTC4245 is not set
990# CONFIG_SENSORS_LM95241 is not set
937# CONFIG_SENSORS_MAX1111 is not set 991# CONFIG_SENSORS_MAX1111 is not set
938# CONFIG_SENSORS_MAX1619 is not set 992# CONFIG_SENSORS_MAX1619 is not set
939# CONFIG_SENSORS_MAX6650 is not set 993# CONFIG_SENSORS_MAX6650 is not set
940# CONFIG_SENSORS_PC87360 is not set 994# CONFIG_SENSORS_PC87360 is not set
941# CONFIG_SENSORS_PC87427 is not set 995# CONFIG_SENSORS_PC87427 is not set
996# CONFIG_SENSORS_PCF8591 is not set
997# CONFIG_SENSORS_SHT15 is not set
942# CONFIG_SENSORS_DME1737 is not set 998# CONFIG_SENSORS_DME1737 is not set
943# CONFIG_SENSORS_SMSC47M1 is not set 999# CONFIG_SENSORS_SMSC47M1 is not set
944# CONFIG_SENSORS_SMSC47M192 is not set 1000# CONFIG_SENSORS_SMSC47M192 is not set
945# CONFIG_SENSORS_SMSC47B397 is not set 1001# CONFIG_SENSORS_SMSC47B397 is not set
946# CONFIG_SENSORS_ADS7828 is not set 1002# CONFIG_SENSORS_ADS7828 is not set
947# CONFIG_SENSORS_THMC50 is not set 1003# CONFIG_SENSORS_THMC50 is not set
1004# CONFIG_SENSORS_TMP401 is not set
1005# CONFIG_SENSORS_TMP421 is not set
948# CONFIG_SENSORS_VT1211 is not set 1006# CONFIG_SENSORS_VT1211 is not set
949# CONFIG_SENSORS_W83781D is not set 1007# CONFIG_SENSORS_W83781D is not set
950# CONFIG_SENSORS_W83791D is not set 1008# CONFIG_SENSORS_W83791D is not set
@@ -954,9 +1012,8 @@ CONFIG_HWMON=y
954# CONFIG_SENSORS_W83L786NG is not set 1012# CONFIG_SENSORS_W83L786NG is not set
955# CONFIG_SENSORS_W83627HF is not set 1013# CONFIG_SENSORS_W83627HF is not set
956# CONFIG_SENSORS_W83627EHF is not set 1014# CONFIG_SENSORS_W83627EHF is not set
957# CONFIG_HWMON_DEBUG_CHIP is not set 1015# CONFIG_SENSORS_LIS3_SPI is not set
958# CONFIG_THERMAL is not set 1016# CONFIG_THERMAL is not set
959# CONFIG_THERMAL_HWMON is not set
960CONFIG_WATCHDOG=y 1017CONFIG_WATCHDOG=y
961# CONFIG_WATCHDOG_NOWAYOUT is not set 1018# CONFIG_WATCHDOG_NOWAYOUT is not set
962 1019
@@ -983,28 +1040,20 @@ CONFIG_SSB_POSSIBLE=y
983# CONFIG_MFD_CORE is not set 1040# CONFIG_MFD_CORE is not set
984# CONFIG_MFD_SM501 is not set 1041# CONFIG_MFD_SM501 is not set
985# CONFIG_HTC_PASIC3 is not set 1042# CONFIG_HTC_PASIC3 is not set
1043# CONFIG_TPS65010 is not set
1044# CONFIG_TWL4030_CORE is not set
986# CONFIG_MFD_TMIO is not set 1045# CONFIG_MFD_TMIO is not set
987# CONFIG_PMIC_DA903X is not set 1046# CONFIG_PMIC_DA903X is not set
988# CONFIG_PMIC_ADP5520 is not set 1047# CONFIG_PMIC_ADP5520 is not set
989# CONFIG_MFD_WM8400 is not set 1048# CONFIG_MFD_WM8400 is not set
1049# CONFIG_MFD_WM831X is not set
990# CONFIG_MFD_WM8350_I2C is not set 1050# CONFIG_MFD_WM8350_I2C is not set
1051# CONFIG_MFD_PCF50633 is not set
1052# CONFIG_MFD_MC13783 is not set
1053# CONFIG_AB3100_CORE is not set
1054# CONFIG_EZX_PCAP is not set
991# CONFIG_REGULATOR is not set 1055# CONFIG_REGULATOR is not set
992 1056# CONFIG_MEDIA_SUPPORT is not set
993#
994# Multimedia devices
995#
996
997#
998# Multimedia core support
999#
1000# CONFIG_VIDEO_DEV is not set
1001# CONFIG_DVB_CORE is not set
1002# CONFIG_VIDEO_MEDIA is not set
1003
1004#
1005# Multimedia drivers
1006#
1007# CONFIG_DAB is not set
1008 1057
1009# 1058#
1010# Graphics support 1059# Graphics support
@@ -1026,7 +1075,6 @@ CONFIG_DUMMY_CONSOLE=y
1026# CONFIG_SOUND is not set 1075# CONFIG_SOUND is not set
1027CONFIG_HID_SUPPORT=y 1076CONFIG_HID_SUPPORT=y
1028CONFIG_HID=y 1077CONFIG_HID=y
1029# CONFIG_HID_DEBUG is not set
1030# CONFIG_HIDRAW is not set 1078# CONFIG_HIDRAW is not set
1031 1079
1032# 1080#
@@ -1039,30 +1087,35 @@ CONFIG_USB_HID=y
1039# 1087#
1040# Special HID drivers 1088# Special HID drivers
1041# 1089#
1042CONFIG_HID_COMPAT=y
1043CONFIG_HID_A4TECH=y 1090CONFIG_HID_A4TECH=y
1044CONFIG_HID_APPLE=y 1091CONFIG_HID_APPLE=y
1045CONFIG_HID_BELKIN=y 1092CONFIG_HID_BELKIN=y
1046CONFIG_HID_BRIGHT=y
1047CONFIG_HID_CHERRY=y 1093CONFIG_HID_CHERRY=y
1048CONFIG_HID_CHICONY=y 1094CONFIG_HID_CHICONY=y
1049CONFIG_HID_CYPRESS=y 1095CONFIG_HID_CYPRESS=y
1050CONFIG_HID_DELL=y 1096# CONFIG_HID_DRAGONRISE is not set
1051CONFIG_HID_EZKEY=y 1097CONFIG_HID_EZKEY=y
1098# CONFIG_HID_KYE is not set
1052CONFIG_HID_GYRATION=y 1099CONFIG_HID_GYRATION=y
1100# CONFIG_HID_TWINHAN is not set
1101# CONFIG_HID_KENSINGTON is not set
1053CONFIG_HID_LOGITECH=y 1102CONFIG_HID_LOGITECH=y
1054# CONFIG_LOGITECH_FF is not set 1103# CONFIG_LOGITECH_FF is not set
1055# CONFIG_LOGIRUMBLEPAD2_FF is not set 1104# CONFIG_LOGIRUMBLEPAD2_FF is not set
1056CONFIG_HID_MICROSOFT=y 1105CONFIG_HID_MICROSOFT=y
1057CONFIG_HID_MONTEREY=y 1106CONFIG_HID_MONTEREY=y
1107# CONFIG_HID_NTRIG is not set
1058CONFIG_HID_PANTHERLORD=y 1108CONFIG_HID_PANTHERLORD=y
1059# CONFIG_PANTHERLORD_FF is not set 1109# CONFIG_PANTHERLORD_FF is not set
1060CONFIG_HID_PETALYNX=y 1110CONFIG_HID_PETALYNX=y
1061CONFIG_HID_SAMSUNG=y 1111CONFIG_HID_SAMSUNG=y
1062CONFIG_HID_SONY=y 1112CONFIG_HID_SONY=y
1063CONFIG_HID_SUNPLUS=y 1113CONFIG_HID_SUNPLUS=y
1064CONFIG_THRUSTMASTER_FF=m 1114# CONFIG_HID_GREENASIA is not set
1065CONFIG_ZEROPLUS_FF=m 1115# CONFIG_HID_SMARTJOYPLUS is not set
1116# CONFIG_HID_TOPSEED is not set
1117# CONFIG_HID_THRUSTMASTER is not set
1118# CONFIG_HID_ZEROPLUS is not set
1066CONFIG_USB_SUPPORT=y 1119CONFIG_USB_SUPPORT=y
1067CONFIG_USB_ARCH_HAS_HCD=y 1120CONFIG_USB_ARCH_HAS_HCD=y
1068# CONFIG_USB_ARCH_HAS_OHCI is not set 1121# CONFIG_USB_ARCH_HAS_OHCI is not set
@@ -1088,6 +1141,7 @@ CONFIG_USB_MON=y
1088# USB Host Controller Drivers 1141# USB Host Controller Drivers
1089# 1142#
1090# CONFIG_USB_C67X00_HCD is not set 1143# CONFIG_USB_C67X00_HCD is not set
1144# CONFIG_USB_OXU210HP_HCD is not set
1091# CONFIG_USB_ISP116X_HCD is not set 1145# CONFIG_USB_ISP116X_HCD is not set
1092# CONFIG_USB_ISP1760_HCD is not set 1146# CONFIG_USB_ISP1760_HCD is not set
1093# CONFIG_USB_ISP1362_HCD is not set 1147# CONFIG_USB_ISP1362_HCD is not set
@@ -1118,18 +1172,17 @@ CONFIG_USB_INVENTRA_DMA=y
1118# CONFIG_USB_TMC is not set 1172# CONFIG_USB_TMC is not set
1119 1173
1120# 1174#
1121# NOTE: USB_STORAGE depends on SCSI but BLK_DEV_SD may also be needed; 1175# NOTE: USB_STORAGE depends on SCSI but BLK_DEV_SD may
1122# 1176#
1123 1177
1124# 1178#
1125# see USB_STORAGE Help for more information 1179# also be needed; see USB_STORAGE Help for more info
1126# 1180#
1127CONFIG_USB_STORAGE=y 1181CONFIG_USB_STORAGE=y
1128# CONFIG_USB_STORAGE_DEBUG is not set 1182# CONFIG_USB_STORAGE_DEBUG is not set
1129# CONFIG_USB_STORAGE_DATAFAB is not set 1183# CONFIG_USB_STORAGE_DATAFAB is not set
1130# CONFIG_USB_STORAGE_FREECOM is not set 1184# CONFIG_USB_STORAGE_FREECOM is not set
1131# CONFIG_USB_STORAGE_ISD200 is not set 1185# CONFIG_USB_STORAGE_ISD200 is not set
1132# CONFIG_USB_STORAGE_DPCM is not set
1133# CONFIG_USB_STORAGE_USBAT is not set 1186# CONFIG_USB_STORAGE_USBAT is not set
1134# CONFIG_USB_STORAGE_SDDR09 is not set 1187# CONFIG_USB_STORAGE_SDDR09 is not set
1135# CONFIG_USB_STORAGE_SDDR55 is not set 1188# CONFIG_USB_STORAGE_SDDR55 is not set
@@ -1165,7 +1218,6 @@ CONFIG_USB_STORAGE=y
1165# CONFIG_USB_LED is not set 1218# CONFIG_USB_LED is not set
1166# CONFIG_USB_CYPRESS_CY7C63 is not set 1219# CONFIG_USB_CYPRESS_CY7C63 is not set
1167# CONFIG_USB_CYTHERM is not set 1220# CONFIG_USB_CYTHERM is not set
1168# CONFIG_USB_PHIDGET is not set
1169# CONFIG_USB_IDMOUSE is not set 1221# CONFIG_USB_IDMOUSE is not set
1170# CONFIG_USB_FTDI_ELAN is not set 1222# CONFIG_USB_FTDI_ELAN is not set
1171# CONFIG_USB_APPLEDISPLAY is not set 1223# CONFIG_USB_APPLEDISPLAY is not set
@@ -1177,6 +1229,13 @@ CONFIG_USB_STORAGE=y
1177# CONFIG_USB_ISIGHTFW is not set 1229# CONFIG_USB_ISIGHTFW is not set
1178# CONFIG_USB_VST is not set 1230# CONFIG_USB_VST is not set
1179# CONFIG_USB_GADGET is not set 1231# CONFIG_USB_GADGET is not set
1232
1233#
1234# OTG and related infrastructure
1235#
1236CONFIG_USB_OTG_UTILS=y
1237# CONFIG_USB_GPIO_VBUS is not set
1238CONFIG_NOP_USB_XCEIV=y
1180# CONFIG_MMC is not set 1239# CONFIG_MMC is not set
1181# CONFIG_MEMSTICK is not set 1240# CONFIG_MEMSTICK is not set
1182# CONFIG_NEW_LEDS is not set 1241# CONFIG_NEW_LEDS is not set
@@ -1212,6 +1271,7 @@ CONFIG_RTC_INTF_DEV=y
1212# CONFIG_RTC_DRV_S35390A is not set 1271# CONFIG_RTC_DRV_S35390A is not set
1213# CONFIG_RTC_DRV_FM3130 is not set 1272# CONFIG_RTC_DRV_FM3130 is not set
1214# CONFIG_RTC_DRV_RX8581 is not set 1273# CONFIG_RTC_DRV_RX8581 is not set
1274# CONFIG_RTC_DRV_RX8025 is not set
1215 1275
1216# 1276#
1217# SPI RTC drivers 1277# SPI RTC drivers
@@ -1223,6 +1283,7 @@ CONFIG_RTC_INTF_DEV=y
1223# CONFIG_RTC_DRV_R9701 is not set 1283# CONFIG_RTC_DRV_R9701 is not set
1224# CONFIG_RTC_DRV_RS5C348 is not set 1284# CONFIG_RTC_DRV_RS5C348 is not set
1225# CONFIG_RTC_DRV_DS3234 is not set 1285# CONFIG_RTC_DRV_DS3234 is not set
1286# CONFIG_RTC_DRV_PCF2123 is not set
1226 1287
1227# 1288#
1228# Platform RTC drivers 1289# Platform RTC drivers
@@ -1243,10 +1304,21 @@ CONFIG_RTC_INTF_DEV=y
1243# 1304#
1244CONFIG_RTC_DRV_BFIN=y 1305CONFIG_RTC_DRV_BFIN=y
1245# CONFIG_DMADEVICES is not set 1306# CONFIG_DMADEVICES is not set
1307# CONFIG_AUXDISPLAY is not set
1246# CONFIG_UIO is not set 1308# CONFIG_UIO is not set
1309
1310#
1311# TI VLYNQ
1312#
1247# CONFIG_STAGING is not set 1313# CONFIG_STAGING is not set
1248 1314
1249# 1315#
1316# Firmware Drivers
1317#
1318# CONFIG_FIRMWARE_MEMMAP is not set
1319# CONFIG_SIGMA is not set
1320
1321#
1250# File systems 1322# File systems
1251# 1323#
1252CONFIG_EXT2_FS=m 1324CONFIG_EXT2_FS=m
@@ -1256,9 +1328,13 @@ CONFIG_EXT2_FS=m
1256# CONFIG_REISERFS_FS is not set 1328# CONFIG_REISERFS_FS is not set
1257# CONFIG_JFS_FS is not set 1329# CONFIG_JFS_FS is not set
1258# CONFIG_FS_POSIX_ACL is not set 1330# CONFIG_FS_POSIX_ACL is not set
1259CONFIG_FILE_LOCKING=y
1260# CONFIG_XFS_FS is not set 1331# CONFIG_XFS_FS is not set
1332# CONFIG_GFS2_FS is not set
1261# CONFIG_OCFS2_FS is not set 1333# CONFIG_OCFS2_FS is not set
1334# CONFIG_BTRFS_FS is not set
1335# CONFIG_NILFS2_FS is not set
1336CONFIG_FILE_LOCKING=y
1337CONFIG_FSNOTIFY=y
1262# CONFIG_DNOTIFY is not set 1338# CONFIG_DNOTIFY is not set
1263CONFIG_INOTIFY=y 1339CONFIG_INOTIFY=y
1264CONFIG_INOTIFY_USER=y 1340CONFIG_INOTIFY_USER=y
@@ -1268,6 +1344,11 @@ CONFIG_INOTIFY_USER=y
1268# CONFIG_FUSE_FS is not set 1344# CONFIG_FUSE_FS is not set
1269 1345
1270# 1346#
1347# Caches
1348#
1349# CONFIG_FSCACHE is not set
1350
1351#
1271# CD-ROM/DVD Filesystems 1352# CD-ROM/DVD Filesystems
1272# 1353#
1273CONFIG_ISO9660_FS=m 1354CONFIG_ISO9660_FS=m
@@ -1291,13 +1372,9 @@ CONFIG_FAT_DEFAULT_IOCHARSET="iso8859-1"
1291CONFIG_PROC_FS=y 1372CONFIG_PROC_FS=y
1292CONFIG_PROC_SYSCTL=y 1373CONFIG_PROC_SYSCTL=y
1293CONFIG_SYSFS=y 1374CONFIG_SYSFS=y
1294# CONFIG_TMPFS is not set
1295# CONFIG_HUGETLB_PAGE is not set 1375# CONFIG_HUGETLB_PAGE is not set
1296# CONFIG_CONFIGFS_FS is not set 1376# CONFIG_CONFIGFS_FS is not set
1297 1377CONFIG_MISC_FILESYSTEMS=y
1298#
1299# Miscellaneous filesystems
1300#
1301# CONFIG_ADFS_FS is not set 1378# CONFIG_ADFS_FS is not set
1302# CONFIG_AFFS_FS is not set 1379# CONFIG_AFFS_FS is not set
1303# CONFIG_HFS_FS is not set 1380# CONFIG_HFS_FS is not set
@@ -1316,17 +1393,8 @@ CONFIG_JFFS2_ZLIB=y
1316# CONFIG_JFFS2_LZO is not set 1393# CONFIG_JFFS2_LZO is not set
1317CONFIG_JFFS2_RTIME=y 1394CONFIG_JFFS2_RTIME=y
1318# CONFIG_JFFS2_RUBIN is not set 1395# CONFIG_JFFS2_RUBIN is not set
1319CONFIG_YAFFS_FS=m
1320CONFIG_YAFFS_YAFFS1=y
1321# CONFIG_YAFFS_9BYTE_TAGS is not set
1322# CONFIG_YAFFS_DOES_ECC is not set
1323CONFIG_YAFFS_YAFFS2=y
1324CONFIG_YAFFS_AUTO_YAFFS2=y
1325# CONFIG_YAFFS_DISABLE_LAZY_LOAD is not set
1326# CONFIG_YAFFS_DISABLE_WIDE_TNODES is not set
1327# CONFIG_YAFFS_ALWAYS_CHECK_CHUNK_ERASED is not set
1328CONFIG_YAFFS_SHORT_NAMES_IN_RAM=y
1329# CONFIG_CRAMFS is not set 1396# CONFIG_CRAMFS is not set
1397# CONFIG_SQUASHFS is not set
1330# CONFIG_VXFS_FS is not set 1398# CONFIG_VXFS_FS is not set
1331# CONFIG_MINIX_FS is not set 1399# CONFIG_MINIX_FS is not set
1332# CONFIG_OMFS_FS is not set 1400# CONFIG_OMFS_FS is not set
@@ -1345,7 +1413,6 @@ CONFIG_LOCKD=m
1345CONFIG_LOCKD_V4=y 1413CONFIG_LOCKD_V4=y
1346CONFIG_NFS_COMMON=y 1414CONFIG_NFS_COMMON=y
1347CONFIG_SUNRPC=m 1415CONFIG_SUNRPC=m
1348# CONFIG_SUNRPC_REGISTER_V4 is not set
1349# CONFIG_RPCSEC_GSS_KRB5 is not set 1416# CONFIG_RPCSEC_GSS_KRB5 is not set
1350# CONFIG_RPCSEC_GSS_SPKM3 is not set 1417# CONFIG_RPCSEC_GSS_SPKM3 is not set
1351CONFIG_SMB_FS=m 1418CONFIG_SMB_FS=m
@@ -1360,7 +1427,7 @@ CONFIG_SMB_FS=m
1360# 1427#
1361# CONFIG_PARTITION_ADVANCED is not set 1428# CONFIG_PARTITION_ADVANCED is not set
1362CONFIG_MSDOS_PARTITION=y 1429CONFIG_MSDOS_PARTITION=y
1363CONFIG_NLS=m 1430CONFIG_NLS=y
1364CONFIG_NLS_DEFAULT="iso8859-1" 1431CONFIG_NLS_DEFAULT="iso8859-1"
1365CONFIG_NLS_CODEPAGE_437=m 1432CONFIG_NLS_CODEPAGE_437=m
1366# CONFIG_NLS_CODEPAGE_737 is not set 1433# CONFIG_NLS_CODEPAGE_737 is not set
@@ -1410,14 +1477,19 @@ CONFIG_ENABLE_WARN_DEPRECATED=y
1410CONFIG_ENABLE_MUST_CHECK=y 1477CONFIG_ENABLE_MUST_CHECK=y
1411CONFIG_FRAME_WARN=1024 1478CONFIG_FRAME_WARN=1024
1412# CONFIG_MAGIC_SYSRQ is not set 1479# CONFIG_MAGIC_SYSRQ is not set
1480# CONFIG_STRIP_ASM_SYMS is not set
1413# CONFIG_UNUSED_SYMBOLS is not set 1481# CONFIG_UNUSED_SYMBOLS is not set
1414CONFIG_DEBUG_FS=y 1482CONFIG_DEBUG_FS=y
1415# CONFIG_HEADERS_CHECK is not set 1483# CONFIG_HEADERS_CHECK is not set
1484CONFIG_DEBUG_SECTION_MISMATCH=y
1416CONFIG_DEBUG_KERNEL=y 1485CONFIG_DEBUG_KERNEL=y
1417CONFIG_DEBUG_SHIRQ=y 1486CONFIG_DEBUG_SHIRQ=y
1418CONFIG_DETECT_SOFTLOCKUP=y 1487CONFIG_DETECT_SOFTLOCKUP=y
1419# CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC is not set 1488# CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC is not set
1420CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC_VALUE=0 1489CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC_VALUE=0
1490CONFIG_DETECT_HUNG_TASK=y
1491# CONFIG_BOOTPARAM_HUNG_TASK_PANIC is not set
1492CONFIG_BOOTPARAM_HUNG_TASK_PANIC_VALUE=0
1421CONFIG_SCHED_DEBUG=y 1493CONFIG_SCHED_DEBUG=y
1422# CONFIG_SCHEDSTATS is not set 1494# CONFIG_SCHEDSTATS is not set
1423# CONFIG_TIMER_STATS is not set 1495# CONFIG_TIMER_STATS is not set
@@ -1425,31 +1497,39 @@ CONFIG_SCHED_DEBUG=y
1425# CONFIG_DEBUG_SLAB is not set 1497# CONFIG_DEBUG_SLAB is not set
1426# CONFIG_DEBUG_SPINLOCK is not set 1498# CONFIG_DEBUG_SPINLOCK is not set
1427# CONFIG_DEBUG_MUTEXES is not set 1499# CONFIG_DEBUG_MUTEXES is not set
1500# CONFIG_DEBUG_LOCK_ALLOC is not set
1501# CONFIG_PROVE_LOCKING is not set
1502# CONFIG_LOCK_STAT is not set
1428# CONFIG_DEBUG_SPINLOCK_SLEEP is not set 1503# CONFIG_DEBUG_SPINLOCK_SLEEP is not set
1429# CONFIG_DEBUG_LOCKING_API_SELFTESTS is not set 1504# CONFIG_DEBUG_LOCKING_API_SELFTESTS is not set
1430# CONFIG_DEBUG_KOBJECT is not set 1505# CONFIG_DEBUG_KOBJECT is not set
1431CONFIG_DEBUG_BUGVERBOSE=y 1506CONFIG_DEBUG_BUGVERBOSE=y
1432CONFIG_DEBUG_INFO=y 1507CONFIG_DEBUG_INFO=y
1433# CONFIG_DEBUG_VM is not set 1508# CONFIG_DEBUG_VM is not set
1509# CONFIG_DEBUG_NOMMU_REGIONS is not set
1434# CONFIG_DEBUG_WRITECOUNT is not set 1510# CONFIG_DEBUG_WRITECOUNT is not set
1435# CONFIG_DEBUG_MEMORY_INIT is not set 1511# CONFIG_DEBUG_MEMORY_INIT is not set
1436# CONFIG_DEBUG_LIST is not set 1512# CONFIG_DEBUG_LIST is not set
1437# CONFIG_DEBUG_SG is not set 1513# CONFIG_DEBUG_SG is not set
1514# CONFIG_DEBUG_NOTIFIERS is not set
1515# CONFIG_DEBUG_CREDENTIALS is not set
1438# CONFIG_FRAME_POINTER is not set 1516# CONFIG_FRAME_POINTER is not set
1439# CONFIG_BOOT_PRINTK_DELAY is not set 1517# CONFIG_BOOT_PRINTK_DELAY is not set
1440# CONFIG_RCU_TORTURE_TEST is not set 1518# CONFIG_RCU_TORTURE_TEST is not set
1441# CONFIG_RCU_CPU_STALL_DETECTOR is not set 1519# CONFIG_RCU_CPU_STALL_DETECTOR is not set
1442# CONFIG_BACKTRACE_SELF_TEST is not set 1520# CONFIG_BACKTRACE_SELF_TEST is not set
1443# CONFIG_DEBUG_BLOCK_EXT_DEVT is not set 1521# CONFIG_DEBUG_BLOCK_EXT_DEVT is not set
1522# CONFIG_DEBUG_FORCE_WEAK_PER_CPU is not set
1444# CONFIG_FAULT_INJECTION is not set 1523# CONFIG_FAULT_INJECTION is not set
1445 1524# CONFIG_PAGE_POISONING is not set
1446# 1525CONFIG_HAVE_FUNCTION_TRACER=y
1447# Tracers 1526CONFIG_HAVE_FUNCTION_GRAPH_TRACER=y
1448# 1527CONFIG_TRACING_SUPPORT=y
1449# CONFIG_SCHED_TRACER is not set 1528# CONFIG_FTRACE is not set
1450# CONFIG_CONTEXT_SWITCH_TRACER is not set 1529# CONFIG_BRANCH_PROFILE_NONE is not set
1451# CONFIG_BOOT_TRACER is not set 1530# CONFIG_PROFILE_ANNOTATED_BRANCHES is not set
1452# CONFIG_DYNAMIC_PRINTK_DEBUG is not set 1531# CONFIG_PROFILE_ALL_BRANCHES is not set
1532# CONFIG_DYNAMIC_DEBUG is not set
1453# CONFIG_SAMPLES is not set 1533# CONFIG_SAMPLES is not set
1454CONFIG_HAVE_ARCH_KGDB=y 1534CONFIG_HAVE_ARCH_KGDB=y
1455# CONFIG_KGDB is not set 1535# CONFIG_KGDB is not set
@@ -1474,6 +1554,7 @@ CONFIG_DEBUG_BFIN_NO_KERN_HWTRACE=y
1474CONFIG_EARLY_PRINTK=y 1554CONFIG_EARLY_PRINTK=y
1475CONFIG_CPLB_INFO=y 1555CONFIG_CPLB_INFO=y
1476CONFIG_ACCESS_CHECK=y 1556CONFIG_ACCESS_CHECK=y
1557# CONFIG_BFIN_ISRAM_SELF_TEST is not set
1477 1558
1478# 1559#
1479# Security options 1560# Security options
@@ -1482,15 +1563,15 @@ CONFIG_ACCESS_CHECK=y
1482CONFIG_SECURITY=y 1563CONFIG_SECURITY=y
1483# CONFIG_SECURITYFS is not set 1564# CONFIG_SECURITYFS is not set
1484# CONFIG_SECURITY_NETWORK is not set 1565# CONFIG_SECURITY_NETWORK is not set
1566# CONFIG_SECURITY_PATH is not set
1485# CONFIG_SECURITY_FILE_CAPABILITIES is not set 1567# CONFIG_SECURITY_FILE_CAPABILITIES is not set
1486# CONFIG_SECURITY_ROOTPLUG is not set 1568# CONFIG_SECURITY_ROOTPLUG is not set
1487CONFIG_SECURITY_DEFAULT_MMAP_MIN_ADDR=0 1569# CONFIG_SECURITY_TOMOYO is not set
1488CONFIG_CRYPTO=y 1570CONFIG_CRYPTO=y
1489 1571
1490# 1572#
1491# Crypto core or helper 1573# Crypto core or helper
1492# 1574#
1493# CONFIG_CRYPTO_FIPS is not set
1494# CONFIG_CRYPTO_MANAGER is not set 1575# CONFIG_CRYPTO_MANAGER is not set
1495# CONFIG_CRYPTO_MANAGER2 is not set 1576# CONFIG_CRYPTO_MANAGER2 is not set
1496# CONFIG_CRYPTO_GF128MUL is not set 1577# CONFIG_CRYPTO_GF128MUL is not set
@@ -1522,11 +1603,13 @@ CONFIG_CRYPTO=y
1522# 1603#
1523# CONFIG_CRYPTO_HMAC is not set 1604# CONFIG_CRYPTO_HMAC is not set
1524# CONFIG_CRYPTO_XCBC is not set 1605# CONFIG_CRYPTO_XCBC is not set
1606# CONFIG_CRYPTO_VMAC is not set
1525 1607
1526# 1608#
1527# Digest 1609# Digest
1528# 1610#
1529# CONFIG_CRYPTO_CRC32C is not set 1611# CONFIG_CRYPTO_CRC32C is not set
1612# CONFIG_CRYPTO_GHASH is not set
1530# CONFIG_CRYPTO_MD4 is not set 1613# CONFIG_CRYPTO_MD4 is not set
1531# CONFIG_CRYPTO_MD5 is not set 1614# CONFIG_CRYPTO_MD5 is not set
1532# CONFIG_CRYPTO_MICHAEL_MIC is not set 1615# CONFIG_CRYPTO_MICHAEL_MIC is not set
@@ -1563,6 +1646,7 @@ CONFIG_CRYPTO=y
1563# Compression 1646# Compression
1564# 1647#
1565# CONFIG_CRYPTO_DEFLATE is not set 1648# CONFIG_CRYPTO_DEFLATE is not set
1649# CONFIG_CRYPTO_ZLIB is not set
1566# CONFIG_CRYPTO_LZO is not set 1650# CONFIG_CRYPTO_LZO is not set
1567 1651
1568# 1652#
@@ -1570,11 +1654,13 @@ CONFIG_CRYPTO=y
1570# 1654#
1571# CONFIG_CRYPTO_ANSI_CPRNG is not set 1655# CONFIG_CRYPTO_ANSI_CPRNG is not set
1572CONFIG_CRYPTO_HW=y 1656CONFIG_CRYPTO_HW=y
1657# CONFIG_BINARY_PRINTF is not set
1573 1658
1574# 1659#
1575# Library routines 1660# Library routines
1576# 1661#
1577CONFIG_BITREVERSE=y 1662CONFIG_BITREVERSE=y
1663CONFIG_GENERIC_FIND_LAST_BIT=y
1578CONFIG_CRC_CCITT=m 1664CONFIG_CRC_CCITT=m
1579# CONFIG_CRC16 is not set 1665# CONFIG_CRC16 is not set
1580# CONFIG_CRC_T10DIF is not set 1666# CONFIG_CRC_T10DIF is not set
@@ -1584,6 +1670,8 @@ CONFIG_CRC32=y
1584# CONFIG_LIBCRC32C is not set 1670# CONFIG_LIBCRC32C is not set
1585CONFIG_ZLIB_INFLATE=y 1671CONFIG_ZLIB_INFLATE=y
1586CONFIG_ZLIB_DEFLATE=m 1672CONFIG_ZLIB_DEFLATE=m
1673CONFIG_DECOMPRESS_GZIP=y
1587CONFIG_HAS_IOMEM=y 1674CONFIG_HAS_IOMEM=y
1588CONFIG_HAS_IOPORT=y 1675CONFIG_HAS_IOPORT=y
1589CONFIG_HAS_DMA=y 1676CONFIG_HAS_DMA=y
1677CONFIG_NLATTR=y
diff --git a/arch/blackfin/configs/BF527-EZKIT-V2_defconfig b/arch/blackfin/configs/BF527-EZKIT-V2_defconfig
new file mode 100644
index 000000000000..d2dfcb0e7ce4
--- /dev/null
+++ b/arch/blackfin/configs/BF527-EZKIT-V2_defconfig
@@ -0,0 +1,1811 @@
1#
2# Automatically generated make config: don't edit
3# Linux kernel version: 2.6.32.2
4#
5# CONFIG_MMU is not set
6# CONFIG_FPU is not set
7CONFIG_RWSEM_GENERIC_SPINLOCK=y
8# CONFIG_RWSEM_XCHGADD_ALGORITHM is not set
9CONFIG_BLACKFIN=y
10CONFIG_GENERIC_CSUM=y
11CONFIG_GENERIC_BUG=y
12CONFIG_ZONE_DMA=y
13CONFIG_GENERIC_FIND_NEXT_BIT=y
14CONFIG_GENERIC_HARDIRQS=y
15CONFIG_GENERIC_IRQ_PROBE=y
16CONFIG_GENERIC_HARDIRQS_NO__DO_IRQ=y
17CONFIG_GENERIC_GPIO=y
18CONFIG_FORCE_MAX_ZONEORDER=14
19CONFIG_GENERIC_CALIBRATE_DELAY=y
20CONFIG_LOCKDEP_SUPPORT=y
21CONFIG_STACKTRACE_SUPPORT=y
22CONFIG_TRACE_IRQFLAGS_SUPPORT=y
23CONFIG_DEFCONFIG_LIST="/lib/modules/$UNAME_RELEASE/.config"
24CONFIG_CONSTRUCTORS=y
25
26#
27# General setup
28#
29CONFIG_EXPERIMENTAL=y
30CONFIG_BROKEN_ON_SMP=y
31CONFIG_INIT_ENV_ARG_LIMIT=32
32CONFIG_LOCALVERSION=""
33CONFIG_LOCALVERSION_AUTO=y
34CONFIG_HAVE_KERNEL_GZIP=y
35CONFIG_HAVE_KERNEL_BZIP2=y
36CONFIG_HAVE_KERNEL_LZMA=y
37CONFIG_KERNEL_GZIP=y
38# CONFIG_KERNEL_BZIP2 is not set
39# CONFIG_KERNEL_LZMA is not set
40CONFIG_SYSVIPC=y
41CONFIG_SYSVIPC_SYSCTL=y
42# CONFIG_POSIX_MQUEUE is not set
43# CONFIG_BSD_PROCESS_ACCT is not set
44# CONFIG_TASKSTATS is not set
45# CONFIG_AUDIT is not set
46
47#
48# RCU Subsystem
49#
50CONFIG_TREE_RCU=y
51# CONFIG_TREE_PREEMPT_RCU is not set
52# CONFIG_RCU_TRACE is not set
53CONFIG_RCU_FANOUT=32
54# CONFIG_RCU_FANOUT_EXACT is not set
55# CONFIG_TREE_RCU_TRACE is not set
56CONFIG_IKCONFIG=y
57CONFIG_IKCONFIG_PROC=y
58CONFIG_LOG_BUF_SHIFT=14
59# CONFIG_GROUP_SCHED is not set
60# CONFIG_CGROUPS is not set
61# CONFIG_SYSFS_DEPRECATED_V2 is not set
62# CONFIG_RELAY is not set
63# CONFIG_NAMESPACES is not set
64CONFIG_BLK_DEV_INITRD=y
65CONFIG_INITRAMFS_SOURCE=""
66CONFIG_RD_GZIP=y
67# CONFIG_RD_BZIP2 is not set
68# CONFIG_RD_LZMA is not set
69# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
70CONFIG_SYSCTL=y
71CONFIG_ANON_INODES=y
72CONFIG_EMBEDDED=y
73CONFIG_UID16=y
74# CONFIG_SYSCTL_SYSCALL is not set
75CONFIG_KALLSYMS=y
76# CONFIG_KALLSYMS_ALL is not set
77# CONFIG_KALLSYMS_EXTRA_PASS is not set
78CONFIG_HOTPLUG=y
79CONFIG_PRINTK=y
80CONFIG_BUG=y
81# CONFIG_ELF_CORE is not set
82CONFIG_BASE_FULL=y
83# CONFIG_FUTEX is not set
84CONFIG_EPOLL=y
85# CONFIG_SIGNALFD is not set
86# CONFIG_TIMERFD is not set
87# CONFIG_EVENTFD is not set
88# CONFIG_AIO is not set
89
90#
91# Kernel Performance Events And Counters
92#
93CONFIG_VM_EVENT_COUNTERS=y
94CONFIG_COMPAT_BRK=y
95CONFIG_SLAB=y
96# CONFIG_SLUB is not set
97# CONFIG_SLOB is not set
98CONFIG_MMAP_ALLOW_UNINITIALIZED=y
99# CONFIG_PROFILING is not set
100CONFIG_HAVE_OPROFILE=y
101
102#
103# GCOV-based kernel profiling
104#
105# CONFIG_GCOV_KERNEL is not set
106# CONFIG_SLOW_WORK is not set
107# CONFIG_HAVE_GENERIC_DMA_COHERENT is not set
108CONFIG_SLABINFO=y
109CONFIG_BASE_SMALL=0
110CONFIG_MODULES=y
111# CONFIG_MODULE_FORCE_LOAD is not set
112CONFIG_MODULE_UNLOAD=y
113# CONFIG_MODULE_FORCE_UNLOAD is not set
114# CONFIG_MODVERSIONS is not set
115# CONFIG_MODULE_SRCVERSION_ALL is not set
116CONFIG_BLOCK=y
117# CONFIG_LBDAF is not set
118# CONFIG_BLK_DEV_BSG is not set
119# CONFIG_BLK_DEV_INTEGRITY is not set
120
121#
122# IO Schedulers
123#
124CONFIG_IOSCHED_NOOP=y
125CONFIG_IOSCHED_AS=y
126# CONFIG_IOSCHED_DEADLINE is not set
127CONFIG_IOSCHED_CFQ=y
128CONFIG_DEFAULT_AS=y
129# CONFIG_DEFAULT_DEADLINE is not set
130# CONFIG_DEFAULT_CFQ is not set
131# CONFIG_DEFAULT_NOOP is not set
132CONFIG_DEFAULT_IOSCHED="anticipatory"
133# CONFIG_PREEMPT_NONE is not set
134CONFIG_PREEMPT_VOLUNTARY=y
135# CONFIG_PREEMPT is not set
136# CONFIG_FREEZER is not set
137
138#
139# Blackfin Processor Options
140#
141
142#
143# Processor and Board Settings
144#
145# CONFIG_BF512 is not set
146# CONFIG_BF514 is not set
147# CONFIG_BF516 is not set
148# CONFIG_BF518 is not set
149# CONFIG_BF522 is not set
150# CONFIG_BF523 is not set
151# CONFIG_BF524 is not set
152# CONFIG_BF525 is not set
153# CONFIG_BF526 is not set
154CONFIG_BF527=y
155# CONFIG_BF531 is not set
156# CONFIG_BF532 is not set
157# CONFIG_BF533 is not set
158# CONFIG_BF534 is not set
159# CONFIG_BF536 is not set
160# CONFIG_BF537 is not set
161# CONFIG_BF538 is not set
162# CONFIG_BF539 is not set
163# CONFIG_BF542_std is not set
164# CONFIG_BF542M is not set
165# CONFIG_BF544_std is not set
166# CONFIG_BF544M is not set
167# CONFIG_BF547_std is not set
168# CONFIG_BF547M is not set
169# CONFIG_BF548_std is not set
170# CONFIG_BF548M is not set
171# CONFIG_BF549_std is not set
172# CONFIG_BF549M is not set
173# CONFIG_BF561 is not set
174CONFIG_BF_REV_MIN=0
175CONFIG_BF_REV_MAX=2
176# CONFIG_BF_REV_0_0 is not set
177# CONFIG_BF_REV_0_1 is not set
178CONFIG_BF_REV_0_2=y
179# CONFIG_BF_REV_0_3 is not set
180# CONFIG_BF_REV_0_4 is not set
181# CONFIG_BF_REV_0_5 is not set
182# CONFIG_BF_REV_0_6 is not set
183# CONFIG_BF_REV_ANY is not set
184# CONFIG_BF_REV_NONE is not set
185CONFIG_MEM_MT48LC32M16A2TG_75=y
186CONFIG_IRQ_PLL_WAKEUP=7
187CONFIG_IRQ_DMA0_ERROR=7
188CONFIG_IRQ_DMAR0_BLK=7
189CONFIG_IRQ_DMAR1_BLK=7
190CONFIG_IRQ_DMAR0_OVR=7
191CONFIG_IRQ_DMAR1_OVR=7
192CONFIG_IRQ_PPI_ERROR=7
193CONFIG_IRQ_MAC_ERROR=7
194CONFIG_IRQ_SPORT0_ERROR=7
195CONFIG_IRQ_SPORT1_ERROR=7
196CONFIG_IRQ_UART0_ERROR=7
197CONFIG_IRQ_UART1_ERROR=7
198CONFIG_IRQ_RTC=8
199CONFIG_IRQ_PPI=8
200CONFIG_IRQ_SPORT0_RX=9
201CONFIG_IRQ_SPORT0_TX=9
202CONFIG_IRQ_SPORT1_RX=9
203CONFIG_IRQ_SPORT1_TX=9
204CONFIG_IRQ_TWI=10
205CONFIG_IRQ_UART0_RX=10
206CONFIG_IRQ_UART0_TX=10
207CONFIG_IRQ_UART1_RX=10
208CONFIG_IRQ_UART1_TX=10
209CONFIG_IRQ_OPTSEC=11
210CONFIG_IRQ_CNT=11
211CONFIG_IRQ_MAC_RX=11
212CONFIG_IRQ_PORTH_INTA=11
213CONFIG_IRQ_MAC_TX=11
214CONFIG_IRQ_PORTH_INTB=11
215CONFIG_IRQ_TIMER0=8
216CONFIG_IRQ_TIMER1=12
217CONFIG_IRQ_TIMER2=12
218CONFIG_IRQ_TIMER3=12
219CONFIG_IRQ_TIMER4=12
220CONFIG_IRQ_TIMER5=12
221CONFIG_IRQ_TIMER6=12
222CONFIG_IRQ_TIMER7=12
223CONFIG_IRQ_PORTG_INTA=12
224CONFIG_IRQ_PORTG_INTB=12
225CONFIG_IRQ_MEM_DMA0=13
226CONFIG_IRQ_MEM_DMA1=13
227CONFIG_IRQ_WATCH=13
228CONFIG_IRQ_PORTF_INTA=13
229CONFIG_IRQ_PORTF_INTB=13
230CONFIG_BF52x=y
231# CONFIG_BFIN527_EZKIT is not set
232CONFIG_BFIN527_EZKIT_V2=y
233# CONFIG_BFIN527_BLUETECHNIX_CM is not set
234# CONFIG_BFIN526_EZBRD is not set
235
236#
237# BF527 Specific Configuration
238#
239
240#
241# Alternative Multiplexing Scheme
242#
243# CONFIG_BF527_SPORT0_PORTF is not set
244CONFIG_BF527_SPORT0_PORTG=y
245CONFIG_BF527_SPORT0_TSCLK_PG10=y
246# CONFIG_BF527_SPORT0_TSCLK_PG14 is not set
247CONFIG_BF527_UART1_PORTF=y
248# CONFIG_BF527_UART1_PORTG is not set
249# CONFIG_BF527_NAND_D_PORTF is not set
250CONFIG_BF527_NAND_D_PORTH=y
251
252#
253# Interrupt Priority Assignment
254#
255
256#
257# Priority
258#
259CONFIG_IRQ_SPI=10
260CONFIG_IRQ_SPI_ERROR=7
261CONFIG_IRQ_NFC_ERROR=7
262CONFIG_IRQ_HDMA_ERROR=7
263CONFIG_IRQ_HDMA=7
264CONFIG_IRQ_USB_EINT=10
265CONFIG_IRQ_USB_INT0=11
266CONFIG_IRQ_USB_INT1=11
267CONFIG_IRQ_USB_INT2=11
268CONFIG_IRQ_USB_DMA=11
269
270#
271# Board customizations
272#
273# CONFIG_CMDLINE_BOOL is not set
274CONFIG_BOOT_LOAD=0x1000
275
276#
277# Clock/PLL Setup
278#
279CONFIG_CLKIN_HZ=25000000
280# CONFIG_BFIN_KERNEL_CLOCK is not set
281CONFIG_MAX_VCO_HZ=600000000
282CONFIG_MIN_VCO_HZ=50000000
283CONFIG_MAX_SCLK_HZ=133333333
284CONFIG_MIN_SCLK_HZ=27000000
285
286#
287# Kernel Timer/Scheduler
288#
289# CONFIG_HZ_100 is not set
290CONFIG_HZ_250=y
291# CONFIG_HZ_300 is not set
292# CONFIG_HZ_1000 is not set
293CONFIG_HZ=250
294# CONFIG_SCHED_HRTICK is not set
295CONFIG_GENERIC_TIME=y
296CONFIG_GENERIC_CLOCKEVENTS=y
297# CONFIG_TICKSOURCE_GPTMR0 is not set
298CONFIG_TICKSOURCE_CORETMR=y
299# CONFIG_CYCLES_CLOCKSOURCE is not set
300# CONFIG_GPTMR0_CLOCKSOURCE is not set
301# CONFIG_NO_HZ is not set
302# CONFIG_HIGH_RES_TIMERS is not set
303CONFIG_GENERIC_CLOCKEVENTS_BUILD=y
304
305#
306# Misc
307#
308CONFIG_BFIN_SCRATCH_REG_RETN=y
309# CONFIG_BFIN_SCRATCH_REG_RETE is not set
310# CONFIG_BFIN_SCRATCH_REG_CYCLES is not set
311
312#
313# Blackfin Kernel Optimizations
314#
315
316#
317# Memory Optimizations
318#
319CONFIG_I_ENTRY_L1=y
320CONFIG_EXCPT_IRQ_SYSC_L1=y
321CONFIG_DO_IRQ_L1=y
322CONFIG_CORE_TIMER_IRQ_L1=y
323CONFIG_IDLE_L1=y
324# CONFIG_SCHEDULE_L1 is not set
325CONFIG_ARITHMETIC_OPS_L1=y
326CONFIG_ACCESS_OK_L1=y
327# CONFIG_MEMSET_L1 is not set
328# CONFIG_MEMCPY_L1 is not set
329# CONFIG_SYS_BFIN_SPINLOCK_L1 is not set
330# CONFIG_IP_CHECKSUM_L1 is not set
331CONFIG_CACHELINE_ALIGNED_L1=y
332# CONFIG_SYSCALL_TAB_L1 is not set
333# CONFIG_CPLB_SWITCH_TAB_L1 is not set
334CONFIG_APP_STACK_L1=y
335
336#
337# Speed Optimizations
338#
339CONFIG_BFIN_INS_LOWOVERHEAD=y
340CONFIG_RAMKERNEL=y
341# CONFIG_ROMKERNEL is not set
342CONFIG_SELECT_MEMORY_MODEL=y
343CONFIG_FLATMEM_MANUAL=y
344# CONFIG_DISCONTIGMEM_MANUAL is not set
345# CONFIG_SPARSEMEM_MANUAL is not set
346CONFIG_FLATMEM=y
347CONFIG_FLAT_NODE_MEM_MAP=y
348CONFIG_PAGEFLAGS_EXTENDED=y
349CONFIG_SPLIT_PTLOCK_CPUS=4
350# CONFIG_PHYS_ADDR_T_64BIT is not set
351CONFIG_ZONE_DMA_FLAG=1
352CONFIG_VIRT_TO_BUS=y
353CONFIG_NOMMU_INITIAL_TRIM_EXCESS=0
354CONFIG_BFIN_GPTIMERS=y
355# CONFIG_DMA_UNCACHED_4M is not set
356# CONFIG_DMA_UNCACHED_2M is not set
357CONFIG_DMA_UNCACHED_1M=y
358# CONFIG_DMA_UNCACHED_512K is not set
359# CONFIG_DMA_UNCACHED_256K is not set
360# CONFIG_DMA_UNCACHED_128K is not set
361# CONFIG_DMA_UNCACHED_NONE is not set
362
363#
364# Cache Support
365#
366CONFIG_BFIN_ICACHE=y
367CONFIG_BFIN_EXTMEM_ICACHEABLE=y
368CONFIG_BFIN_DCACHE=y
369# CONFIG_BFIN_DCACHE_BANKA is not set
370CONFIG_BFIN_EXTMEM_DCACHEABLE=y
371CONFIG_BFIN_EXTMEM_WRITEBACK=y
372# CONFIG_BFIN_EXTMEM_WRITETHROUGH is not set
373
374#
375# Memory Protection Unit
376#
377# CONFIG_MPU is not set
378
379#
380# Asynchronous Memory Configuration
381#
382
383#
384# EBIU_AMGCTL Global Control
385#
386CONFIG_C_AMCKEN=y
387CONFIG_C_CDPRIO=y
388# CONFIG_C_AMBEN is not set
389# CONFIG_C_AMBEN_B0 is not set
390# CONFIG_C_AMBEN_B0_B1 is not set
391# CONFIG_C_AMBEN_B0_B1_B2 is not set
392CONFIG_C_AMBEN_ALL=y
393
394#
395# EBIU_AMBCTL Control
396#
397CONFIG_BANK_0=0x7BB0
398CONFIG_BANK_1=0x7BB0
399CONFIG_BANK_2=0x7BB0
400CONFIG_BANK_3=0x99B2
401
402#
403# Bus options (PCI, PCMCIA, EISA, MCA, ISA)
404#
405# CONFIG_ARCH_SUPPORTS_MSI is not set
406# CONFIG_PCCARD is not set
407
408#
409# Executable file formats
410#
411CONFIG_BINFMT_ELF_FDPIC=y
412CONFIG_BINFMT_FLAT=y
413CONFIG_BINFMT_ZFLAT=y
414# CONFIG_BINFMT_SHARED_FLAT is not set
415# CONFIG_HAVE_AOUT is not set
416# CONFIG_BINFMT_MISC is not set
417
418#
419# Power management options
420#
421# CONFIG_PM is not set
422CONFIG_ARCH_SUSPEND_POSSIBLE=y
423
424#
425# CPU Frequency scaling
426#
427# CONFIG_CPU_FREQ is not set
428CONFIG_NET=y
429
430#
431# Networking options
432#
433CONFIG_PACKET=y
434# CONFIG_PACKET_MMAP is not set
435CONFIG_UNIX=y
436# CONFIG_NET_KEY is not set
437CONFIG_INET=y
438# CONFIG_IP_MULTICAST is not set
439# CONFIG_IP_ADVANCED_ROUTER is not set
440CONFIG_IP_FIB_HASH=y
441CONFIG_IP_PNP=y
442# CONFIG_IP_PNP_DHCP is not set
443# CONFIG_IP_PNP_BOOTP is not set
444# CONFIG_IP_PNP_RARP is not set
445# CONFIG_NET_IPIP is not set
446# CONFIG_NET_IPGRE is not set
447# CONFIG_ARPD is not set
448# CONFIG_SYN_COOKIES is not set
449# CONFIG_INET_AH is not set
450# CONFIG_INET_ESP is not set
451# CONFIG_INET_IPCOMP is not set
452# CONFIG_INET_XFRM_TUNNEL is not set
453# CONFIG_INET_TUNNEL is not set
454# CONFIG_INET_XFRM_MODE_TRANSPORT is not set
455# CONFIG_INET_XFRM_MODE_TUNNEL is not set
456# CONFIG_INET_XFRM_MODE_BEET is not set
457# CONFIG_INET_LRO is not set
458# CONFIG_INET_DIAG is not set
459# CONFIG_TCP_CONG_ADVANCED is not set
460CONFIG_TCP_CONG_CUBIC=y
461CONFIG_DEFAULT_TCP_CONG="cubic"
462# CONFIG_TCP_MD5SIG is not set
463# CONFIG_IPV6 is not set
464# CONFIG_NETLABEL is not set
465# CONFIG_NETWORK_SECMARK is not set
466# CONFIG_NETFILTER is not set
467# CONFIG_IP_DCCP is not set
468# CONFIG_IP_SCTP is not set
469# CONFIG_RDS is not set
470# CONFIG_TIPC is not set
471# CONFIG_ATM is not set
472# CONFIG_BRIDGE is not set
473# CONFIG_NET_DSA is not set
474# CONFIG_VLAN_8021Q is not set
475# CONFIG_DECNET is not set
476# CONFIG_LLC2 is not set
477# CONFIG_IPX is not set
478# CONFIG_ATALK is not set
479# CONFIG_X25 is not set
480# CONFIG_LAPB is not set
481# CONFIG_ECONET is not set
482# CONFIG_WAN_ROUTER is not set
483# CONFIG_PHONET is not set
484# CONFIG_IEEE802154 is not set
485# CONFIG_NET_SCHED is not set
486# CONFIG_DCB is not set
487
488#
489# Network testing
490#
491# CONFIG_NET_PKTGEN is not set
492# CONFIG_HAMRADIO is not set
493# CONFIG_CAN is not set
494CONFIG_IRDA=m
495
496#
497# IrDA protocols
498#
499CONFIG_IRLAN=m
500CONFIG_IRCOMM=m
501# CONFIG_IRDA_ULTRA is not set
502
503#
504# IrDA options
505#
506# CONFIG_IRDA_CACHE_LAST_LSAP is not set
507# CONFIG_IRDA_FAST_RR is not set
508# CONFIG_IRDA_DEBUG is not set
509
510#
511# Infrared-port device drivers
512#
513
514#
515# SIR device drivers
516#
517CONFIG_IRTTY_SIR=m
518CONFIG_BFIN_SIR=m
519CONFIG_BFIN_SIR0=y
520CONFIG_SIR_BFIN_DMA=y
521# CONFIG_SIR_BFIN_PIO is not set
522
523#
524# Dongle support
525#
526# CONFIG_DONGLE is not set
527# CONFIG_KINGSUN_DONGLE is not set
528# CONFIG_KSDAZZLE_DONGLE is not set
529# CONFIG_KS959_DONGLE is not set
530
531#
532# FIR device drivers
533#
534# CONFIG_USB_IRDA is not set
535# CONFIG_SIGMATEL_FIR is not set
536# CONFIG_MCS_FIR is not set
537# CONFIG_BT is not set
538# CONFIG_AF_RXRPC is not set
539# CONFIG_WIRELESS is not set
540# CONFIG_WIMAX is not set
541# CONFIG_RFKILL is not set
542# CONFIG_NET_9P is not set
543
544#
545# Device Drivers
546#
547
548#
549# Generic Driver Options
550#
551CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
552CONFIG_STANDALONE=y
553CONFIG_PREVENT_FIRMWARE_BUILD=y
554# CONFIG_FW_LOADER is not set
555# CONFIG_DEBUG_DRIVER is not set
556# CONFIG_DEBUG_DEVRES is not set
557# CONFIG_SYS_HYPERVISOR is not set
558# CONFIG_CONNECTOR is not set
559CONFIG_MTD=y
560# CONFIG_MTD_DEBUG is not set
561# CONFIG_MTD_TESTS is not set
562# CONFIG_MTD_CONCAT is not set
563CONFIG_MTD_PARTITIONS=y
564# CONFIG_MTD_REDBOOT_PARTS is not set
565# CONFIG_MTD_CMDLINE_PARTS is not set
566# CONFIG_MTD_AR7_PARTS is not set
567
568#
569# User Modules And Translation Layers
570#
571CONFIG_MTD_CHAR=m
572CONFIG_MTD_BLKDEVS=y
573CONFIG_MTD_BLOCK=y
574# CONFIG_FTL is not set
575# CONFIG_NFTL is not set
576# CONFIG_INFTL is not set
577# CONFIG_RFD_FTL is not set
578# CONFIG_SSFDC is not set
579# CONFIG_MTD_OOPS is not set
580
581#
582# RAM/ROM/Flash chip drivers
583#
584# CONFIG_MTD_CFI is not set
585CONFIG_MTD_JEDECPROBE=m
586CONFIG_MTD_GEN_PROBE=m
587# CONFIG_MTD_CFI_ADV_OPTIONS is not set
588CONFIG_MTD_MAP_BANK_WIDTH_1=y
589CONFIG_MTD_MAP_BANK_WIDTH_2=y
590CONFIG_MTD_MAP_BANK_WIDTH_4=y
591# CONFIG_MTD_MAP_BANK_WIDTH_8 is not set
592# CONFIG_MTD_MAP_BANK_WIDTH_16 is not set
593# CONFIG_MTD_MAP_BANK_WIDTH_32 is not set
594CONFIG_MTD_CFI_I1=y
595CONFIG_MTD_CFI_I2=y
596# CONFIG_MTD_CFI_I4 is not set
597# CONFIG_MTD_CFI_I8 is not set
598# CONFIG_MTD_CFI_INTELEXT is not set
599# CONFIG_MTD_CFI_AMDSTD is not set
600# CONFIG_MTD_CFI_STAA is not set
601CONFIG_MTD_RAM=y
602CONFIG_MTD_ROM=m
603# CONFIG_MTD_ABSENT is not set
604
605#
606# Mapping drivers for chip access
607#
608CONFIG_MTD_COMPLEX_MAPPINGS=y
609# CONFIG_MTD_PHYSMAP is not set
610# CONFIG_MTD_GPIO_ADDR is not set
611# CONFIG_MTD_UCLINUX is not set
612# CONFIG_MTD_PLATRAM is not set
613
614#
615# Self-contained MTD device drivers
616#
617# CONFIG_MTD_DATAFLASH is not set
618CONFIG_MTD_M25P80=y
619CONFIG_M25PXX_USE_FAST_READ=y
620# CONFIG_MTD_SST25L is not set
621# CONFIG_MTD_SLRAM is not set
622# CONFIG_MTD_PHRAM is not set
623# CONFIG_MTD_MTDRAM is not set
624# CONFIG_MTD_BLOCK2MTD is not set
625
626#
627# Disk-On-Chip Device Drivers
628#
629# CONFIG_MTD_DOC2000 is not set
630# CONFIG_MTD_DOC2001 is not set
631# CONFIG_MTD_DOC2001PLUS is not set
632CONFIG_MTD_NAND=m
633# CONFIG_MTD_NAND_VERIFY_WRITE is not set
634# CONFIG_MTD_NAND_ECC_SMC is not set
635# CONFIG_MTD_NAND_MUSEUM_IDS is not set
636CONFIG_MTD_NAND_IDS=m
637# CONFIG_MTD_NAND_BF5XX is not set
638# CONFIG_MTD_NAND_DISKONCHIP is not set
639# CONFIG_MTD_NAND_NANDSIM is not set
640# CONFIG_MTD_NAND_PLATFORM is not set
641# CONFIG_MTD_ALAUDA is not set
642# CONFIG_MTD_ONENAND is not set
643
644#
645# LPDDR flash memory drivers
646#
647# CONFIG_MTD_LPDDR is not set
648
649#
650# UBI - Unsorted block images
651#
652# CONFIG_MTD_UBI is not set
653# CONFIG_PARPORT is not set
654CONFIG_BLK_DEV=y
655# CONFIG_BLK_DEV_COW_COMMON is not set
656# CONFIG_BLK_DEV_LOOP is not set
657# CONFIG_BLK_DEV_NBD is not set
658# CONFIG_BLK_DEV_UB is not set
659CONFIG_BLK_DEV_RAM=y
660CONFIG_BLK_DEV_RAM_COUNT=16
661CONFIG_BLK_DEV_RAM_SIZE=4096
662# CONFIG_BLK_DEV_XIP is not set
663# CONFIG_CDROM_PKTCDVD is not set
664# CONFIG_ATA_OVER_ETH is not set
665# CONFIG_BLK_DEV_HD is not set
666CONFIG_MISC_DEVICES=y
667# CONFIG_AD525X_DPOT is not set
668# CONFIG_ICS932S401 is not set
669# CONFIG_ENCLOSURE_SERVICES is not set
670# CONFIG_ISL29003 is not set
671# CONFIG_C2PORT is not set
672
673#
674# EEPROM support
675#
676# CONFIG_EEPROM_AT24 is not set
677# CONFIG_EEPROM_AT25 is not set
678# CONFIG_EEPROM_LEGACY is not set
679# CONFIG_EEPROM_MAX6875 is not set
680# CONFIG_EEPROM_93CX6 is not set
681CONFIG_HAVE_IDE=y
682# CONFIG_IDE is not set
683
684#
685# SCSI device support
686#
687# CONFIG_RAID_ATTRS is not set
688CONFIG_SCSI=y
689CONFIG_SCSI_DMA=y
690# CONFIG_SCSI_TGT is not set
691# CONFIG_SCSI_NETLINK is not set
692# CONFIG_SCSI_PROC_FS is not set
693
694#
695# SCSI support type (disk, tape, CD-ROM)
696#
697CONFIG_BLK_DEV_SD=y
698# CONFIG_CHR_DEV_ST is not set
699# CONFIG_CHR_DEV_OSST is not set
700CONFIG_BLK_DEV_SR=m
701# CONFIG_BLK_DEV_SR_VENDOR is not set
702# CONFIG_CHR_DEV_SG is not set
703# CONFIG_CHR_DEV_SCH is not set
704# CONFIG_SCSI_MULTI_LUN is not set
705# CONFIG_SCSI_CONSTANTS is not set
706# CONFIG_SCSI_LOGGING is not set
707# CONFIG_SCSI_SCAN_ASYNC is not set
708CONFIG_SCSI_WAIT_SCAN=m
709
710#
711# SCSI Transports
712#
713# CONFIG_SCSI_SPI_ATTRS is not set
714# CONFIG_SCSI_FC_ATTRS is not set
715# CONFIG_SCSI_ISCSI_ATTRS is not set
716# CONFIG_SCSI_SAS_LIBSAS is not set
717# CONFIG_SCSI_SRP_ATTRS is not set
718# CONFIG_SCSI_LOWLEVEL is not set
719# CONFIG_SCSI_DH is not set
720# CONFIG_SCSI_OSD_INITIATOR is not set
721# CONFIG_ATA is not set
722# CONFIG_MD is not set
723CONFIG_NETDEVICES=y
724# CONFIG_DUMMY is not set
725# CONFIG_BONDING is not set
726# CONFIG_MACVLAN is not set
727# CONFIG_EQUALIZER is not set
728# CONFIG_TUN is not set
729# CONFIG_VETH is not set
730CONFIG_PHYLIB=y
731
732#
733# MII PHY device drivers
734#
735# CONFIG_MARVELL_PHY is not set
736# CONFIG_DAVICOM_PHY is not set
737# CONFIG_QSEMI_PHY is not set
738# CONFIG_LXT_PHY is not set
739# CONFIG_CICADA_PHY is not set
740# CONFIG_VITESSE_PHY is not set
741# CONFIG_SMSC_PHY is not set
742# CONFIG_BROADCOM_PHY is not set
743# CONFIG_ICPLUS_PHY is not set
744# CONFIG_REALTEK_PHY is not set
745# CONFIG_NATIONAL_PHY is not set
746# CONFIG_STE10XP is not set
747# CONFIG_LSI_ET1011C_PHY is not set
748# CONFIG_FIXED_PHY is not set
749# CONFIG_MDIO_BITBANG is not set
750CONFIG_NET_ETHERNET=y
751CONFIG_MII=y
752CONFIG_BFIN_MAC=y
753CONFIG_BFIN_MAC_USE_L1=y
754CONFIG_BFIN_TX_DESC_NUM=10
755CONFIG_BFIN_RX_DESC_NUM=20
756CONFIG_BFIN_MAC_RMII=y
757# CONFIG_SMC91X is not set
758# CONFIG_DM9000 is not set
759# CONFIG_ENC28J60 is not set
760# CONFIG_ETHOC is not set
761# CONFIG_SMSC911X is not set
762# CONFIG_DNET is not set
763# CONFIG_ADF702X is not set
764# CONFIG_IBM_NEW_EMAC_ZMII is not set
765# CONFIG_IBM_NEW_EMAC_RGMII is not set
766# CONFIG_IBM_NEW_EMAC_TAH is not set
767# CONFIG_IBM_NEW_EMAC_EMAC4 is not set
768# CONFIG_IBM_NEW_EMAC_NO_FLOW_CTRL is not set
769# CONFIG_IBM_NEW_EMAC_MAL_CLR_ICINTSTAT is not set
770# CONFIG_IBM_NEW_EMAC_MAL_COMMON_ERR is not set
771# CONFIG_B44 is not set
772# CONFIG_KS8842 is not set
773# CONFIG_KS8851 is not set
774# CONFIG_KS8851_MLL is not set
775# CONFIG_NETDEV_1000 is not set
776# CONFIG_NETDEV_10000 is not set
777# CONFIG_WLAN is not set
778
779#
780# Enable WiMAX (Networking options) to see the WiMAX drivers
781#
782
783#
784# USB Network Adapters
785#
786# CONFIG_USB_CATC is not set
787# CONFIG_USB_KAWETH is not set
788# CONFIG_USB_PEGASUS is not set
789# CONFIG_USB_RTL8150 is not set
790# CONFIG_USB_USBNET is not set
791# CONFIG_WAN is not set
792# CONFIG_PPP is not set
793# CONFIG_SLIP is not set
794# CONFIG_NETCONSOLE is not set
795# CONFIG_NETPOLL is not set
796# CONFIG_NET_POLL_CONTROLLER is not set
797# CONFIG_ISDN is not set
798# CONFIG_PHONE is not set
799
800#
801# Input device support
802#
803CONFIG_INPUT=y
804CONFIG_INPUT_FF_MEMLESS=m
805# CONFIG_INPUT_POLLDEV is not set
806
807#
808# Userland interfaces
809#
810# CONFIG_INPUT_MOUSEDEV is not set
811# CONFIG_INPUT_JOYDEV is not set
812CONFIG_INPUT_EVDEV=y
813# CONFIG_INPUT_EVBUG is not set
814
815#
816# Input Device Drivers
817#
818CONFIG_INPUT_KEYBOARD=y
819CONFIG_KEYBOARD_ADP5520=y
820# CONFIG_KEYBOARD_ADP5588 is not set
821# CONFIG_KEYBOARD_ATKBD is not set
822# CONFIG_QT2160 is not set
823# CONFIG_KEYBOARD_LKKBD is not set
824# CONFIG_KEYBOARD_GPIO is not set
825# CONFIG_KEYBOARD_MATRIX is not set
826# CONFIG_KEYBOARD_LM8323 is not set
827# CONFIG_KEYBOARD_MAX7359 is not set
828# CONFIG_KEYBOARD_NEWTON is not set
829# CONFIG_KEYBOARD_OPENCORES is not set
830# CONFIG_KEYBOARD_STOWAWAY is not set
831# CONFIG_KEYBOARD_SUNKBD is not set
832# CONFIG_KEYBOARD_XTKBD is not set
833# CONFIG_INPUT_MOUSE is not set
834# CONFIG_INPUT_JOYSTICK is not set
835# CONFIG_INPUT_TABLET is not set
836CONFIG_INPUT_TOUCHSCREEN=y
837# CONFIG_TOUCHSCREEN_ADS7846 is not set
838# CONFIG_TOUCHSCREEN_AD7877 is not set
839CONFIG_TOUCHSCREEN_AD7879_I2C=y
840CONFIG_TOUCHSCREEN_AD7879=y
841# CONFIG_TOUCHSCREEN_EETI is not set
842# CONFIG_TOUCHSCREEN_FUJITSU is not set
843# CONFIG_TOUCHSCREEN_GUNZE is not set
844# CONFIG_TOUCHSCREEN_ELO is not set
845# CONFIG_TOUCHSCREEN_WACOM_W8001 is not set
846# CONFIG_TOUCHSCREEN_MCS5000 is not set
847# CONFIG_TOUCHSCREEN_MTOUCH is not set
848# CONFIG_TOUCHSCREEN_INEXIO is not set
849# CONFIG_TOUCHSCREEN_MK712 is not set
850# CONFIG_TOUCHSCREEN_PENMOUNT is not set
851# CONFIG_TOUCHSCREEN_TOUCHRIGHT is not set
852# CONFIG_TOUCHSCREEN_TOUCHWIN is not set
853# CONFIG_TOUCHSCREEN_WM97XX is not set
854# CONFIG_TOUCHSCREEN_USB_COMPOSITE is not set
855# CONFIG_TOUCHSCREEN_TOUCHIT213 is not set
856# CONFIG_TOUCHSCREEN_TSC2007 is not set
857CONFIG_INPUT_MISC=y
858# CONFIG_INPUT_ATI_REMOTE is not set
859# CONFIG_INPUT_ATI_REMOTE2 is not set
860# CONFIG_INPUT_KEYSPAN_REMOTE is not set
861# CONFIG_INPUT_POWERMATE is not set
862# CONFIG_INPUT_YEALINK is not set
863# CONFIG_INPUT_CM109 is not set
864# CONFIG_INPUT_UINPUT is not set
865# CONFIG_INPUT_GPIO_ROTARY_ENCODER is not set
866# CONFIG_INPUT_BFIN_ROTARY is not set
867# CONFIG_INPUT_AD714X is not set
868# CONFIG_INPUT_ADXL34X is not set
869# CONFIG_INPUT_PCF8574 is not set
870
871#
872# Hardware I/O ports
873#
874# CONFIG_SERIO is not set
875# CONFIG_GAMEPORT is not set
876
877#
878# Character devices
879#
880CONFIG_BFIN_DMA_INTERFACE=m
881# CONFIG_BFIN_PPI is not set
882# CONFIG_BFIN_PPIFCD is not set
883# CONFIG_BFIN_SIMPLE_TIMER is not set
884# CONFIG_BFIN_SPI_ADC is not set
885CONFIG_BFIN_SPORT=m
886# CONFIG_BFIN_TWI_LCD is not set
887CONFIG_VT=y
888CONFIG_CONSOLE_TRANSLATIONS=y
889CONFIG_VT_CONSOLE=y
890CONFIG_HW_CONSOLE=y
891# CONFIG_VT_HW_CONSOLE_BINDING is not set
892# CONFIG_DEVKMEM is not set
893CONFIG_BFIN_JTAG_COMM=m
894# CONFIG_SERIAL_NONSTANDARD is not set
895
896#
897# Serial drivers
898#
899# CONFIG_SERIAL_8250 is not set
900
901#
902# Non-8250 serial port support
903#
904# CONFIG_SERIAL_MAX3100 is not set
905CONFIG_SERIAL_BFIN=y
906CONFIG_SERIAL_BFIN_CONSOLE=y
907CONFIG_SERIAL_BFIN_DMA=y
908# CONFIG_SERIAL_BFIN_PIO is not set
909# CONFIG_SERIAL_BFIN_UART0 is not set
910CONFIG_SERIAL_BFIN_UART1=y
911# CONFIG_BFIN_UART1_CTSRTS is not set
912CONFIG_SERIAL_CORE=y
913CONFIG_SERIAL_CORE_CONSOLE=y
914# CONFIG_SERIAL_BFIN_SPORT is not set
915CONFIG_UNIX98_PTYS=y
916# CONFIG_DEVPTS_MULTIPLE_INSTANCES is not set
917# CONFIG_LEGACY_PTYS is not set
918CONFIG_BFIN_OTP=y
919# CONFIG_BFIN_OTP_WRITE_ENABLE is not set
920# CONFIG_IPMI_HANDLER is not set
921# CONFIG_HW_RANDOM is not set
922# CONFIG_R3964 is not set
923# CONFIG_RAW_DRIVER is not set
924# CONFIG_TCG_TPM is not set
925CONFIG_I2C=y
926CONFIG_I2C_BOARDINFO=y
927CONFIG_I2C_COMPAT=y
928CONFIG_I2C_CHARDEV=m
929CONFIG_I2C_HELPER_AUTO=y
930
931#
932# I2C Hardware Bus support
933#
934
935#
936# I2C system bus drivers (mostly embedded / system-on-chip)
937#
938CONFIG_I2C_BLACKFIN_TWI=y
939CONFIG_I2C_BLACKFIN_TWI_CLK_KHZ=100
940# CONFIG_I2C_GPIO is not set
941# CONFIG_I2C_OCORES is not set
942# CONFIG_I2C_SIMTEC is not set
943
944#
945# External I2C/SMBus adapter drivers
946#
947# CONFIG_I2C_PARPORT_LIGHT is not set
948# CONFIG_I2C_TAOS_EVM is not set
949# CONFIG_I2C_TINY_USB is not set
950
951#
952# Other I2C/SMBus bus drivers
953#
954# CONFIG_I2C_PCA_PLATFORM is not set
955# CONFIG_I2C_STUB is not set
956
957#
958# Miscellaneous I2C Chip support
959#
960# CONFIG_DS1682 is not set
961# CONFIG_SENSORS_TSL2550 is not set
962# CONFIG_I2C_DEBUG_CORE is not set
963# CONFIG_I2C_DEBUG_ALGO is not set
964# CONFIG_I2C_DEBUG_BUS is not set
965# CONFIG_I2C_DEBUG_CHIP is not set
966CONFIG_SPI=y
967# CONFIG_SPI_DEBUG is not set
968CONFIG_SPI_MASTER=y
969
970#
971# SPI Master Controller Drivers
972#
973CONFIG_SPI_BFIN=y
974# CONFIG_SPI_BFIN_LOCK is not set
975# CONFIG_SPI_BFIN_SPORT is not set
976# CONFIG_SPI_BITBANG is not set
977# CONFIG_SPI_GPIO is not set
978
979#
980# SPI Protocol Masters
981#
982# CONFIG_SPI_SPIDEV is not set
983# CONFIG_SPI_TLE62X0 is not set
984
985#
986# PPS support
987#
988# CONFIG_PPS is not set
989CONFIG_ARCH_WANT_OPTIONAL_GPIOLIB=y
990CONFIG_GPIOLIB=y
991# CONFIG_DEBUG_GPIO is not set
992CONFIG_GPIO_SYSFS=y
993
994#
995# Memory mapped GPIO expanders:
996#
997
998#
999# I2C GPIO expanders:
1000#
1001# CONFIG_GPIO_MAX732X is not set
1002# CONFIG_GPIO_PCA953X is not set
1003# CONFIG_GPIO_PCF857X is not set
1004# CONFIG_GPIO_ADP5520 is not set
1005# CONFIG_GPIO_ADP5588 is not set
1006
1007#
1008# PCI GPIO expanders:
1009#
1010
1011#
1012# SPI GPIO expanders:
1013#
1014# CONFIG_GPIO_MAX7301 is not set
1015# CONFIG_GPIO_MCP23S08 is not set
1016# CONFIG_GPIO_MC33880 is not set
1017
1018#
1019# AC97 GPIO expanders:
1020#
1021# CONFIG_W1 is not set
1022# CONFIG_POWER_SUPPLY is not set
1023# CONFIG_HWMON is not set
1024# CONFIG_THERMAL is not set
1025CONFIG_WATCHDOG=y
1026# CONFIG_WATCHDOG_NOWAYOUT is not set
1027
1028#
1029# Watchdog Device Drivers
1030#
1031# CONFIG_SOFT_WATCHDOG is not set
1032CONFIG_BFIN_WDT=y
1033
1034#
1035# USB-based Watchdog Cards
1036#
1037# CONFIG_USBPCWATCHDOG is not set
1038CONFIG_SSB_POSSIBLE=y
1039
1040#
1041# Sonics Silicon Backplane
1042#
1043# CONFIG_SSB is not set
1044
1045#
1046# Multifunction device drivers
1047#
1048# CONFIG_MFD_CORE is not set
1049# CONFIG_MFD_SM501 is not set
1050# CONFIG_HTC_PASIC3 is not set
1051# CONFIG_UCB1400_CORE is not set
1052# CONFIG_TPS65010 is not set
1053# CONFIG_TWL4030_CORE is not set
1054# CONFIG_MFD_TMIO is not set
1055# CONFIG_PMIC_DA903X is not set
1056CONFIG_PMIC_ADP5520=y
1057# CONFIG_MFD_WM8400 is not set
1058# CONFIG_MFD_WM831X is not set
1059# CONFIG_MFD_WM8350_I2C is not set
1060# CONFIG_MFD_PCF50633 is not set
1061# CONFIG_MFD_MC13783 is not set
1062# CONFIG_AB3100_CORE is not set
1063# CONFIG_EZX_PCAP is not set
1064# CONFIG_REGULATOR is not set
1065# CONFIG_MEDIA_SUPPORT is not set
1066
1067#
1068# Graphics support
1069#
1070# CONFIG_VGASTATE is not set
1071# CONFIG_VIDEO_OUTPUT_CONTROL is not set
1072CONFIG_FB=y
1073# CONFIG_FIRMWARE_EDID is not set
1074# CONFIG_FB_DDC is not set
1075# CONFIG_FB_BOOT_VESA_SUPPORT is not set
1076CONFIG_FB_CFB_FILLRECT=y
1077CONFIG_FB_CFB_COPYAREA=y
1078CONFIG_FB_CFB_IMAGEBLIT=y
1079# CONFIG_FB_CFB_REV_PIXELS_IN_BYTE is not set
1080# CONFIG_FB_SYS_FILLRECT is not set
1081# CONFIG_FB_SYS_COPYAREA is not set
1082# CONFIG_FB_SYS_IMAGEBLIT is not set
1083# CONFIG_FB_FOREIGN_ENDIAN is not set
1084# CONFIG_FB_SYS_FOPS is not set
1085# CONFIG_FB_SVGALIB is not set
1086# CONFIG_FB_MACMODES is not set
1087# CONFIG_FB_BACKLIGHT is not set
1088# CONFIG_FB_MODE_HELPERS is not set
1089# CONFIG_FB_TILEBLITTING is not set
1090
1091#
1092# Frame buffer hardware drivers
1093#
1094# CONFIG_FB_BFIN_T350MCQB is not set
1095CONFIG_FB_BFIN_LQ035Q1=y
1096# CONFIG_FB_BFIN_7393 is not set
1097# CONFIG_FB_S1D13XXX is not set
1098# CONFIG_FB_VIRTUAL is not set
1099# CONFIG_FB_METRONOME is not set
1100# CONFIG_FB_MB862XX is not set
1101# CONFIG_FB_BROADSHEET is not set
1102CONFIG_BACKLIGHT_LCD_SUPPORT=y
1103CONFIG_LCD_CLASS_DEVICE=m
1104# CONFIG_LCD_LMS283GF05 is not set
1105# CONFIG_LCD_LTV350QV is not set
1106# CONFIG_LCD_ILI9320 is not set
1107# CONFIG_LCD_TDO24M is not set
1108# CONFIG_LCD_VGG2432A4 is not set
1109# CONFIG_LCD_PLATFORM is not set
1110CONFIG_BACKLIGHT_CLASS_DEVICE=m
1111CONFIG_BACKLIGHT_GENERIC=m
1112# CONFIG_BACKLIGHT_ADP5520 is not set
1113# CONFIG_BACKLIGHT_ADP8870 is not set
1114
1115#
1116# Display device support
1117#
1118# CONFIG_DISPLAY_SUPPORT is not set
1119
1120#
1121# Console display driver support
1122#
1123CONFIG_DUMMY_CONSOLE=y
1124CONFIG_FRAMEBUFFER_CONSOLE=y
1125# CONFIG_FRAMEBUFFER_CONSOLE_DETECT_PRIMARY is not set
1126# CONFIG_FRAMEBUFFER_CONSOLE_ROTATION is not set
1127# CONFIG_FONTS is not set
1128CONFIG_FONT_8x8=y
1129CONFIG_FONT_8x16=y
1130CONFIG_LOGO=y
1131# CONFIG_LOGO_LINUX_MONO is not set
1132# CONFIG_LOGO_LINUX_VGA16 is not set
1133# CONFIG_LOGO_LINUX_CLUT224 is not set
1134# CONFIG_LOGO_BLACKFIN_VGA16 is not set
1135CONFIG_LOGO_BLACKFIN_CLUT224=y
1136CONFIG_SOUND=m
1137# CONFIG_SOUND_OSS_CORE is not set
1138CONFIG_SND=m
1139CONFIG_SND_TIMER=m
1140CONFIG_SND_PCM=m
1141CONFIG_SND_JACK=y
1142# CONFIG_SND_SEQUENCER is not set
1143# CONFIG_SND_MIXER_OSS is not set
1144# CONFIG_SND_PCM_OSS is not set
1145# CONFIG_SND_DYNAMIC_MINORS is not set
1146CONFIG_SND_SUPPORT_OLD_API=y
1147CONFIG_SND_VERBOSE_PROCFS=y
1148# CONFIG_SND_VERBOSE_PRINTK is not set
1149# CONFIG_SND_DEBUG is not set
1150# CONFIG_SND_RAWMIDI_SEQ is not set
1151# CONFIG_SND_OPL3_LIB_SEQ is not set
1152# CONFIG_SND_OPL4_LIB_SEQ is not set
1153# CONFIG_SND_SBAWE_SEQ is not set
1154# CONFIG_SND_EMU10K1_SEQ is not set
1155CONFIG_SND_DRIVERS=y
1156# CONFIG_SND_DUMMY is not set
1157# CONFIG_SND_MTPAV is not set
1158# CONFIG_SND_SERIAL_U16550 is not set
1159# CONFIG_SND_MPU401 is not set
1160CONFIG_SND_SPI=y
1161
1162#
1163# ALSA Blackfin devices
1164#
1165# CONFIG_SND_BFIN_AD73322 is not set
1166CONFIG_SND_USB=y
1167# CONFIG_SND_USB_AUDIO is not set
1168# CONFIG_SND_USB_CAIAQ is not set
1169CONFIG_SND_SOC=m
1170CONFIG_SND_SOC_AC97_BUS=y
1171CONFIG_SND_BF5XX_I2S=m
1172CONFIG_SND_BF5XX_SOC_SSM2602=m
1173# CONFIG_SND_BF5XX_SOC_AD73311 is not set
1174# CONFIG_SND_BF5XX_SOC_ADAU1371 is not set
1175# CONFIG_SND_BF5XX_SOC_ADAU1761 is not set
1176# CONFIG_SND_BF5XX_TDM is not set
1177CONFIG_SND_BF5XX_AC97=m
1178CONFIG_SND_BF5XX_MMAP_SUPPORT=y
1179# CONFIG_SND_BF5XX_MULTICHAN_SUPPORT is not set
1180# CONFIG_SND_BF5XX_HAVE_COLD_RESET is not set
1181CONFIG_SND_BF5XX_SOC_AD1980=m
1182CONFIG_SND_BF5XX_SOC_SPORT=m
1183CONFIG_SND_BF5XX_SOC_I2S=m
1184CONFIG_SND_BF5XX_SOC_AC97=m
1185CONFIG_SND_BF5XX_SPORT_NUM=0
1186CONFIG_SND_SOC_I2C_AND_SPI=m
1187# CONFIG_SND_SOC_ALL_CODECS is not set
1188CONFIG_SND_SOC_AD1980=m
1189CONFIG_SND_SOC_SSM2602=m
1190# CONFIG_SOUND_PRIME is not set
1191CONFIG_AC97_BUS=m
1192CONFIG_HID_SUPPORT=y
1193CONFIG_HID=y
1194# CONFIG_HIDRAW is not set
1195
1196#
1197# USB Input Devices
1198#
1199CONFIG_USB_HID=y
1200# CONFIG_HID_PID is not set
1201# CONFIG_USB_HIDDEV is not set
1202
1203#
1204# Special HID drivers
1205#
1206CONFIG_HID_A4TECH=y
1207CONFIG_HID_APPLE=y
1208CONFIG_HID_BELKIN=y
1209CONFIG_HID_CHERRY=y
1210CONFIG_HID_CHICONY=y
1211CONFIG_HID_CYPRESS=y
1212# CONFIG_HID_DRAGONRISE is not set
1213CONFIG_HID_EZKEY=y
1214# CONFIG_HID_KYE is not set
1215CONFIG_HID_GYRATION=y
1216# CONFIG_HID_TWINHAN is not set
1217# CONFIG_HID_KENSINGTON is not set
1218CONFIG_HID_LOGITECH=y
1219# CONFIG_LOGITECH_FF is not set
1220# CONFIG_LOGIRUMBLEPAD2_FF is not set
1221CONFIG_HID_MICROSOFT=y
1222CONFIG_HID_MONTEREY=y
1223# CONFIG_HID_NTRIG is not set
1224CONFIG_HID_PANTHERLORD=y
1225# CONFIG_PANTHERLORD_FF is not set
1226CONFIG_HID_PETALYNX=y
1227CONFIG_HID_SAMSUNG=y
1228CONFIG_HID_SONY=y
1229CONFIG_HID_SUNPLUS=y
1230# CONFIG_HID_GREENASIA is not set
1231# CONFIG_HID_SMARTJOYPLUS is not set
1232# CONFIG_HID_TOPSEED is not set
1233# CONFIG_HID_THRUSTMASTER is not set
1234# CONFIG_HID_ZEROPLUS is not set
1235CONFIG_USB_SUPPORT=y
1236CONFIG_USB_ARCH_HAS_HCD=y
1237# CONFIG_USB_ARCH_HAS_OHCI is not set
1238# CONFIG_USB_ARCH_HAS_EHCI is not set
1239CONFIG_USB=y
1240# CONFIG_USB_DEBUG is not set
1241# CONFIG_USB_ANNOUNCE_NEW_DEVICES is not set
1242
1243#
1244# Miscellaneous USB options
1245#
1246CONFIG_USB_DEVICEFS=y
1247# CONFIG_USB_DEVICE_CLASS is not set
1248# CONFIG_USB_DYNAMIC_MINORS is not set
1249# CONFIG_USB_OTG is not set
1250# CONFIG_USB_OTG_WHITELIST is not set
1251CONFIG_USB_OTG_BLACKLIST_HUB=y
1252CONFIG_USB_MON=y
1253# CONFIG_USB_WUSB is not set
1254# CONFIG_USB_WUSB_CBAF is not set
1255
1256#
1257# USB Host Controller Drivers
1258#
1259# CONFIG_USB_C67X00_HCD is not set
1260# CONFIG_USB_OXU210HP_HCD is not set
1261# CONFIG_USB_ISP116X_HCD is not set
1262# CONFIG_USB_ISP1760_HCD is not set
1263# CONFIG_USB_ISP1362_HCD is not set
1264# CONFIG_USB_SL811_HCD is not set
1265# CONFIG_USB_R8A66597_HCD is not set
1266# CONFIG_USB_HWA_HCD is not set
1267CONFIG_USB_MUSB_HDRC=y
1268CONFIG_USB_MUSB_SOC=y
1269
1270#
1271# Blackfin high speed USB Support
1272#
1273CONFIG_USB_MUSB_HOST=y
1274# CONFIG_USB_MUSB_PERIPHERAL is not set
1275# CONFIG_USB_MUSB_OTG is not set
1276CONFIG_USB_MUSB_HDRC_HCD=y
1277# CONFIG_MUSB_PIO_ONLY is not set
1278CONFIG_USB_INVENTRA_DMA=y
1279# CONFIG_USB_TI_CPPI_DMA is not set
1280# CONFIG_USB_MUSB_DEBUG is not set
1281
1282#
1283# USB Device Class drivers
1284#
1285# CONFIG_USB_ACM is not set
1286# CONFIG_USB_PRINTER is not set
1287# CONFIG_USB_WDM is not set
1288# CONFIG_USB_TMC is not set
1289
1290#
1291# NOTE: USB_STORAGE depends on SCSI but BLK_DEV_SD may
1292#
1293
1294#
1295# also be needed; see USB_STORAGE Help for more info
1296#
1297CONFIG_USB_STORAGE=y
1298# CONFIG_USB_STORAGE_DEBUG is not set
1299# CONFIG_USB_STORAGE_DATAFAB is not set
1300# CONFIG_USB_STORAGE_FREECOM is not set
1301# CONFIG_USB_STORAGE_ISD200 is not set
1302# CONFIG_USB_STORAGE_USBAT is not set
1303# CONFIG_USB_STORAGE_SDDR09 is not set
1304# CONFIG_USB_STORAGE_SDDR55 is not set
1305# CONFIG_USB_STORAGE_JUMPSHOT is not set
1306# CONFIG_USB_STORAGE_ALAUDA is not set
1307# CONFIG_USB_STORAGE_ONETOUCH is not set
1308# CONFIG_USB_STORAGE_KARMA is not set
1309# CONFIG_USB_STORAGE_CYPRESS_ATACB is not set
1310# CONFIG_USB_LIBUSUAL is not set
1311
1312#
1313# USB Imaging devices
1314#
1315# CONFIG_USB_MDC800 is not set
1316# CONFIG_USB_MICROTEK is not set
1317
1318#
1319# USB port drivers
1320#
1321# CONFIG_USB_SERIAL is not set
1322
1323#
1324# USB Miscellaneous drivers
1325#
1326# CONFIG_USB_EMI62 is not set
1327# CONFIG_USB_EMI26 is not set
1328# CONFIG_USB_ADUTUX is not set
1329# CONFIG_USB_SEVSEG is not set
1330# CONFIG_USB_RIO500 is not set
1331# CONFIG_USB_LEGOTOWER is not set
1332# CONFIG_USB_LCD is not set
1333# CONFIG_USB_BERRY_CHARGE is not set
1334# CONFIG_USB_LED is not set
1335# CONFIG_USB_CYPRESS_CY7C63 is not set
1336# CONFIG_USB_CYTHERM is not set
1337# CONFIG_USB_IDMOUSE is not set
1338# CONFIG_USB_FTDI_ELAN is not set
1339# CONFIG_USB_APPLEDISPLAY is not set
1340# CONFIG_USB_SISUSBVGA is not set
1341# CONFIG_USB_LD is not set
1342# CONFIG_USB_TRANCEVIBRATOR is not set
1343# CONFIG_USB_IOWARRIOR is not set
1344# CONFIG_USB_TEST is not set
1345# CONFIG_USB_ISIGHTFW is not set
1346# CONFIG_USB_VST is not set
1347# CONFIG_USB_GADGET is not set
1348
1349#
1350# OTG and related infrastructure
1351#
1352CONFIG_USB_OTG_UTILS=y
1353# CONFIG_USB_GPIO_VBUS is not set
1354CONFIG_NOP_USB_XCEIV=y
1355# CONFIG_MMC is not set
1356# CONFIG_MEMSTICK is not set
1357CONFIG_NEW_LEDS=y
1358CONFIG_LEDS_CLASS=y
1359
1360#
1361# LED drivers
1362#
1363# CONFIG_LEDS_PCA9532 is not set
1364# CONFIG_LEDS_GPIO is not set
1365# CONFIG_LEDS_LP3944 is not set
1366# CONFIG_LEDS_PCA955X is not set
1367# CONFIG_LEDS_DAC124S085 is not set
1368# CONFIG_LEDS_BD2802 is not set
1369CONFIG_LEDS_ADP5520=y
1370
1371#
1372# LED Triggers
1373#
1374# CONFIG_LEDS_TRIGGERS is not set
1375# CONFIG_ACCESSIBILITY is not set
1376CONFIG_RTC_LIB=y
1377CONFIG_RTC_CLASS=y
1378CONFIG_RTC_HCTOSYS=y
1379CONFIG_RTC_HCTOSYS_DEVICE="rtc0"
1380# CONFIG_RTC_DEBUG is not set
1381
1382#
1383# RTC interfaces
1384#
1385CONFIG_RTC_INTF_SYSFS=y
1386CONFIG_RTC_INTF_PROC=y
1387CONFIG_RTC_INTF_DEV=y
1388# CONFIG_RTC_INTF_DEV_UIE_EMUL is not set
1389# CONFIG_RTC_DRV_TEST is not set
1390
1391#
1392# I2C RTC drivers
1393#
1394# CONFIG_RTC_DRV_DS1307 is not set
1395# CONFIG_RTC_DRV_DS1374 is not set
1396# CONFIG_RTC_DRV_DS1672 is not set
1397# CONFIG_RTC_DRV_MAX6900 is not set
1398# CONFIG_RTC_DRV_RS5C372 is not set
1399# CONFIG_RTC_DRV_ISL1208 is not set
1400# CONFIG_RTC_DRV_X1205 is not set
1401# CONFIG_RTC_DRV_PCF8563 is not set
1402# CONFIG_RTC_DRV_PCF8583 is not set
1403# CONFIG_RTC_DRV_M41T80 is not set
1404# CONFIG_RTC_DRV_S35390A is not set
1405# CONFIG_RTC_DRV_FM3130 is not set
1406# CONFIG_RTC_DRV_RX8581 is not set
1407# CONFIG_RTC_DRV_RX8025 is not set
1408
1409#
1410# SPI RTC drivers
1411#
1412# CONFIG_RTC_DRV_M41T94 is not set
1413# CONFIG_RTC_DRV_DS1305 is not set
1414# CONFIG_RTC_DRV_DS1390 is not set
1415# CONFIG_RTC_DRV_MAX6902 is not set
1416# CONFIG_RTC_DRV_R9701 is not set
1417# CONFIG_RTC_DRV_RS5C348 is not set
1418# CONFIG_RTC_DRV_DS3234 is not set
1419# CONFIG_RTC_DRV_PCF2123 is not set
1420
1421#
1422# Platform RTC drivers
1423#
1424# CONFIG_RTC_DRV_DS1286 is not set
1425# CONFIG_RTC_DRV_DS1511 is not set
1426# CONFIG_RTC_DRV_DS1553 is not set
1427# CONFIG_RTC_DRV_DS1742 is not set
1428# CONFIG_RTC_DRV_STK17TA8 is not set
1429# CONFIG_RTC_DRV_M48T86 is not set
1430# CONFIG_RTC_DRV_M48T35 is not set
1431# CONFIG_RTC_DRV_M48T59 is not set
1432# CONFIG_RTC_DRV_BQ4802 is not set
1433# CONFIG_RTC_DRV_V3020 is not set
1434
1435#
1436# on-CPU RTC drivers
1437#
1438CONFIG_RTC_DRV_BFIN=y
1439# CONFIG_DMADEVICES is not set
1440# CONFIG_AUXDISPLAY is not set
1441# CONFIG_UIO is not set
1442
1443#
1444# TI VLYNQ
1445#
1446# CONFIG_STAGING is not set
1447
1448#
1449# Firmware Drivers
1450#
1451# CONFIG_FIRMWARE_MEMMAP is not set
1452# CONFIG_SIGMA is not set
1453
1454#
1455# File systems
1456#
1457CONFIG_EXT2_FS=m
1458# CONFIG_EXT2_FS_XATTR is not set
1459# CONFIG_EXT3_FS is not set
1460# CONFIG_EXT4_FS is not set
1461# CONFIG_REISERFS_FS is not set
1462# CONFIG_JFS_FS is not set
1463# CONFIG_FS_POSIX_ACL is not set
1464# CONFIG_XFS_FS is not set
1465# CONFIG_GFS2_FS is not set
1466# CONFIG_OCFS2_FS is not set
1467# CONFIG_BTRFS_FS is not set
1468# CONFIG_NILFS2_FS is not set
1469CONFIG_FILE_LOCKING=y
1470CONFIG_FSNOTIFY=y
1471# CONFIG_DNOTIFY is not set
1472CONFIG_INOTIFY=y
1473CONFIG_INOTIFY_USER=y
1474# CONFIG_QUOTA is not set
1475# CONFIG_AUTOFS_FS is not set
1476# CONFIG_AUTOFS4_FS is not set
1477# CONFIG_FUSE_FS is not set
1478
1479#
1480# Caches
1481#
1482# CONFIG_FSCACHE is not set
1483
1484#
1485# CD-ROM/DVD Filesystems
1486#
1487CONFIG_ISO9660_FS=m
1488CONFIG_JOLIET=y
1489# CONFIG_ZISOFS is not set
1490CONFIG_UDF_FS=m
1491CONFIG_UDF_NLS=y
1492
1493#
1494# DOS/FAT/NT Filesystems
1495#
1496CONFIG_FAT_FS=m
1497# CONFIG_MSDOS_FS is not set
1498CONFIG_VFAT_FS=m
1499CONFIG_FAT_DEFAULT_CODEPAGE=437
1500CONFIG_FAT_DEFAULT_IOCHARSET="iso8859-1"
1501# CONFIG_NTFS_FS is not set
1502
1503#
1504# Pseudo filesystems
1505#
1506CONFIG_PROC_FS=y
1507CONFIG_PROC_SYSCTL=y
1508CONFIG_SYSFS=y
1509# CONFIG_HUGETLB_PAGE is not set
1510# CONFIG_CONFIGFS_FS is not set
1511CONFIG_MISC_FILESYSTEMS=y
1512# CONFIG_ADFS_FS is not set
1513# CONFIG_AFFS_FS is not set
1514# CONFIG_HFS_FS is not set
1515# CONFIG_HFSPLUS_FS is not set
1516# CONFIG_BEFS_FS is not set
1517# CONFIG_BFS_FS is not set
1518# CONFIG_EFS_FS is not set
1519CONFIG_JFFS2_FS=m
1520CONFIG_JFFS2_FS_DEBUG=0
1521CONFIG_JFFS2_FS_WRITEBUFFER=y
1522# CONFIG_JFFS2_FS_WBUF_VERIFY is not set
1523# CONFIG_JFFS2_SUMMARY is not set
1524# CONFIG_JFFS2_FS_XATTR is not set
1525# CONFIG_JFFS2_COMPRESSION_OPTIONS is not set
1526CONFIG_JFFS2_ZLIB=y
1527# CONFIG_JFFS2_LZO is not set
1528CONFIG_JFFS2_RTIME=y
1529# CONFIG_JFFS2_RUBIN is not set
1530# CONFIG_CRAMFS is not set
1531# CONFIG_SQUASHFS is not set
1532# CONFIG_VXFS_FS is not set
1533# CONFIG_MINIX_FS is not set
1534# CONFIG_OMFS_FS is not set
1535# CONFIG_HPFS_FS is not set
1536# CONFIG_QNX4FS_FS is not set
1537# CONFIG_ROMFS_FS is not set
1538# CONFIG_SYSV_FS is not set
1539# CONFIG_UFS_FS is not set
1540CONFIG_NETWORK_FILESYSTEMS=y
1541CONFIG_NFS_FS=m
1542CONFIG_NFS_V3=y
1543# CONFIG_NFS_V3_ACL is not set
1544# CONFIG_NFS_V4 is not set
1545# CONFIG_NFSD is not set
1546CONFIG_LOCKD=m
1547CONFIG_LOCKD_V4=y
1548CONFIG_NFS_COMMON=y
1549CONFIG_SUNRPC=m
1550# CONFIG_RPCSEC_GSS_KRB5 is not set
1551# CONFIG_RPCSEC_GSS_SPKM3 is not set
1552CONFIG_SMB_FS=m
1553# CONFIG_SMB_NLS_DEFAULT is not set
1554# CONFIG_CIFS is not set
1555# CONFIG_NCP_FS is not set
1556# CONFIG_CODA_FS is not set
1557# CONFIG_AFS_FS is not set
1558
1559#
1560# Partition Types
1561#
1562# CONFIG_PARTITION_ADVANCED is not set
1563CONFIG_MSDOS_PARTITION=y
1564CONFIG_NLS=y
1565CONFIG_NLS_DEFAULT="iso8859-1"
1566CONFIG_NLS_CODEPAGE_437=m
1567# CONFIG_NLS_CODEPAGE_737 is not set
1568# CONFIG_NLS_CODEPAGE_775 is not set
1569# CONFIG_NLS_CODEPAGE_850 is not set
1570# CONFIG_NLS_CODEPAGE_852 is not set
1571# CONFIG_NLS_CODEPAGE_855 is not set
1572# CONFIG_NLS_CODEPAGE_857 is not set
1573# CONFIG_NLS_CODEPAGE_860 is not set
1574# CONFIG_NLS_CODEPAGE_861 is not set
1575# CONFIG_NLS_CODEPAGE_862 is not set
1576# CONFIG_NLS_CODEPAGE_863 is not set
1577# CONFIG_NLS_CODEPAGE_864 is not set
1578# CONFIG_NLS_CODEPAGE_865 is not set
1579# CONFIG_NLS_CODEPAGE_866 is not set
1580# CONFIG_NLS_CODEPAGE_869 is not set
1581CONFIG_NLS_CODEPAGE_936=m
1582# CONFIG_NLS_CODEPAGE_950 is not set
1583# CONFIG_NLS_CODEPAGE_932 is not set
1584# CONFIG_NLS_CODEPAGE_949 is not set
1585# CONFIG_NLS_CODEPAGE_874 is not set
1586# CONFIG_NLS_ISO8859_8 is not set
1587# CONFIG_NLS_CODEPAGE_1250 is not set
1588# CONFIG_NLS_CODEPAGE_1251 is not set
1589# CONFIG_NLS_ASCII is not set
1590CONFIG_NLS_ISO8859_1=m
1591# CONFIG_NLS_ISO8859_2 is not set
1592# CONFIG_NLS_ISO8859_3 is not set
1593# CONFIG_NLS_ISO8859_4 is not set
1594# CONFIG_NLS_ISO8859_5 is not set
1595# CONFIG_NLS_ISO8859_6 is not set
1596# CONFIG_NLS_ISO8859_7 is not set
1597# CONFIG_NLS_ISO8859_9 is not set
1598# CONFIG_NLS_ISO8859_13 is not set
1599# CONFIG_NLS_ISO8859_14 is not set
1600# CONFIG_NLS_ISO8859_15 is not set
1601# CONFIG_NLS_KOI8_R is not set
1602# CONFIG_NLS_KOI8_U is not set
1603CONFIG_NLS_UTF8=m
1604# CONFIG_DLM is not set
1605
1606#
1607# Kernel hacking
1608#
1609# CONFIG_PRINTK_TIME is not set
1610CONFIG_ENABLE_WARN_DEPRECATED=y
1611CONFIG_ENABLE_MUST_CHECK=y
1612CONFIG_FRAME_WARN=1024
1613# CONFIG_MAGIC_SYSRQ is not set
1614# CONFIG_STRIP_ASM_SYMS is not set
1615# CONFIG_UNUSED_SYMBOLS is not set
1616CONFIG_DEBUG_FS=y
1617# CONFIG_HEADERS_CHECK is not set
1618CONFIG_DEBUG_SECTION_MISMATCH=y
1619CONFIG_DEBUG_KERNEL=y
1620CONFIG_DEBUG_SHIRQ=y
1621CONFIG_DETECT_SOFTLOCKUP=y
1622# CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC is not set
1623CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC_VALUE=0
1624CONFIG_DETECT_HUNG_TASK=y
1625# CONFIG_BOOTPARAM_HUNG_TASK_PANIC is not set
1626CONFIG_BOOTPARAM_HUNG_TASK_PANIC_VALUE=0
1627CONFIG_SCHED_DEBUG=y
1628# CONFIG_SCHEDSTATS is not set
1629# CONFIG_TIMER_STATS is not set
1630# CONFIG_DEBUG_OBJECTS is not set
1631# CONFIG_DEBUG_SLAB is not set
1632# CONFIG_DEBUG_SPINLOCK is not set
1633# CONFIG_DEBUG_MUTEXES is not set
1634# CONFIG_DEBUG_LOCK_ALLOC is not set
1635# CONFIG_PROVE_LOCKING is not set
1636# CONFIG_LOCK_STAT is not set
1637# CONFIG_DEBUG_SPINLOCK_SLEEP is not set
1638# CONFIG_DEBUG_LOCKING_API_SELFTESTS is not set
1639# CONFIG_DEBUG_KOBJECT is not set
1640CONFIG_DEBUG_BUGVERBOSE=y
1641CONFIG_DEBUG_INFO=y
1642# CONFIG_DEBUG_VM is not set
1643# CONFIG_DEBUG_NOMMU_REGIONS is not set
1644# CONFIG_DEBUG_WRITECOUNT is not set
1645# CONFIG_DEBUG_MEMORY_INIT is not set
1646# CONFIG_DEBUG_LIST is not set
1647# CONFIG_DEBUG_SG is not set
1648# CONFIG_DEBUG_NOTIFIERS is not set
1649# CONFIG_DEBUG_CREDENTIALS is not set
1650# CONFIG_FRAME_POINTER is not set
1651# CONFIG_BOOT_PRINTK_DELAY is not set
1652# CONFIG_RCU_TORTURE_TEST is not set
1653# CONFIG_RCU_CPU_STALL_DETECTOR is not set
1654# CONFIG_BACKTRACE_SELF_TEST is not set
1655# CONFIG_DEBUG_BLOCK_EXT_DEVT is not set
1656# CONFIG_DEBUG_FORCE_WEAK_PER_CPU is not set
1657# CONFIG_FAULT_INJECTION is not set
1658# CONFIG_PAGE_POISONING is not set
1659CONFIG_HAVE_FUNCTION_TRACER=y
1660CONFIG_HAVE_FUNCTION_GRAPH_TRACER=y
1661CONFIG_TRACING_SUPPORT=y
1662# CONFIG_FTRACE is not set
1663# CONFIG_BRANCH_PROFILE_NONE is not set
1664# CONFIG_PROFILE_ANNOTATED_BRANCHES is not set
1665# CONFIG_PROFILE_ALL_BRANCHES is not set
1666# CONFIG_DYNAMIC_DEBUG is not set
1667# CONFIG_SAMPLES is not set
1668CONFIG_HAVE_ARCH_KGDB=y
1669# CONFIG_KGDB is not set
1670# CONFIG_DEBUG_STACKOVERFLOW is not set
1671# CONFIG_DEBUG_STACK_USAGE is not set
1672CONFIG_DEBUG_VERBOSE=y
1673CONFIG_DEBUG_MMRS=y
1674CONFIG_DEBUG_HWERR=y
1675CONFIG_EXACT_HWERR=y
1676CONFIG_DEBUG_DOUBLEFAULT=y
1677CONFIG_DEBUG_DOUBLEFAULT_PRINT=y
1678# CONFIG_DEBUG_DOUBLEFAULT_RESET is not set
1679# CONFIG_DEBUG_ICACHE_CHECK is not set
1680CONFIG_DEBUG_HUNT_FOR_ZERO=y
1681CONFIG_DEBUG_BFIN_HWTRACE_ON=y
1682# CONFIG_DEBUG_BFIN_HWTRACE_COMPRESSION_OFF is not set
1683CONFIG_DEBUG_BFIN_HWTRACE_COMPRESSION_ONE=y
1684# CONFIG_DEBUG_BFIN_HWTRACE_COMPRESSION_TWO is not set
1685CONFIG_DEBUG_BFIN_HWTRACE_COMPRESSION=1
1686# CONFIG_DEBUG_BFIN_HWTRACE_EXPAND is not set
1687CONFIG_DEBUG_BFIN_NO_KERN_HWTRACE=y
1688CONFIG_EARLY_PRINTK=y
1689CONFIG_CPLB_INFO=y
1690CONFIG_ACCESS_CHECK=y
1691# CONFIG_BFIN_ISRAM_SELF_TEST is not set
1692
1693#
1694# Security options
1695#
1696# CONFIG_KEYS is not set
1697CONFIG_SECURITY=y
1698# CONFIG_SECURITYFS is not set
1699# CONFIG_SECURITY_NETWORK is not set
1700# CONFIG_SECURITY_PATH is not set
1701# CONFIG_SECURITY_FILE_CAPABILITIES is not set
1702# CONFIG_SECURITY_ROOTPLUG is not set
1703# CONFIG_SECURITY_TOMOYO is not set
1704CONFIG_CRYPTO=y
1705
1706#
1707# Crypto core or helper
1708#
1709# CONFIG_CRYPTO_MANAGER is not set
1710# CONFIG_CRYPTO_MANAGER2 is not set
1711# CONFIG_CRYPTO_GF128MUL is not set
1712# CONFIG_CRYPTO_NULL is not set
1713# CONFIG_CRYPTO_CRYPTD is not set
1714# CONFIG_CRYPTO_AUTHENC is not set
1715# CONFIG_CRYPTO_TEST is not set
1716
1717#
1718# Authenticated Encryption with Associated Data
1719#
1720# CONFIG_CRYPTO_CCM is not set
1721# CONFIG_CRYPTO_GCM is not set
1722# CONFIG_CRYPTO_SEQIV is not set
1723
1724#
1725# Block modes
1726#
1727# CONFIG_CRYPTO_CBC is not set
1728# CONFIG_CRYPTO_CTR is not set
1729# CONFIG_CRYPTO_CTS is not set
1730# CONFIG_CRYPTO_ECB is not set
1731# CONFIG_CRYPTO_LRW is not set
1732# CONFIG_CRYPTO_PCBC is not set
1733# CONFIG_CRYPTO_XTS is not set
1734
1735#
1736# Hash modes
1737#
1738# CONFIG_CRYPTO_HMAC is not set
1739# CONFIG_CRYPTO_XCBC is not set
1740# CONFIG_CRYPTO_VMAC is not set
1741
1742#
1743# Digest
1744#
1745# CONFIG_CRYPTO_CRC32C is not set
1746# CONFIG_CRYPTO_GHASH is not set
1747# CONFIG_CRYPTO_MD4 is not set
1748# CONFIG_CRYPTO_MD5 is not set
1749# CONFIG_CRYPTO_MICHAEL_MIC is not set
1750# CONFIG_CRYPTO_RMD128 is not set
1751# CONFIG_CRYPTO_RMD160 is not set
1752# CONFIG_CRYPTO_RMD256 is not set
1753# CONFIG_CRYPTO_RMD320 is not set
1754# CONFIG_CRYPTO_SHA1 is not set
1755# CONFIG_CRYPTO_SHA256 is not set
1756# CONFIG_CRYPTO_SHA512 is not set
1757# CONFIG_CRYPTO_TGR192 is not set
1758# CONFIG_CRYPTO_WP512 is not set
1759
1760#
1761# Ciphers
1762#
1763# CONFIG_CRYPTO_AES is not set
1764# CONFIG_CRYPTO_ANUBIS is not set
1765# CONFIG_CRYPTO_ARC4 is not set
1766# CONFIG_CRYPTO_BLOWFISH is not set
1767# CONFIG_CRYPTO_CAMELLIA is not set
1768# CONFIG_CRYPTO_CAST5 is not set
1769# CONFIG_CRYPTO_CAST6 is not set
1770# CONFIG_CRYPTO_DES is not set
1771# CONFIG_CRYPTO_FCRYPT is not set
1772# CONFIG_CRYPTO_KHAZAD is not set
1773# CONFIG_CRYPTO_SALSA20 is not set
1774# CONFIG_CRYPTO_SEED is not set
1775# CONFIG_CRYPTO_SERPENT is not set
1776# CONFIG_CRYPTO_TEA is not set
1777# CONFIG_CRYPTO_TWOFISH is not set
1778
1779#
1780# Compression
1781#
1782# CONFIG_CRYPTO_DEFLATE is not set
1783# CONFIG_CRYPTO_ZLIB is not set
1784# CONFIG_CRYPTO_LZO is not set
1785
1786#
1787# Random Number Generation
1788#
1789# CONFIG_CRYPTO_ANSI_CPRNG is not set
1790CONFIG_CRYPTO_HW=y
1791# CONFIG_BINARY_PRINTF is not set
1792
1793#
1794# Library routines
1795#
1796CONFIG_BITREVERSE=y
1797CONFIG_GENERIC_FIND_LAST_BIT=y
1798CONFIG_CRC_CCITT=m
1799# CONFIG_CRC16 is not set
1800# CONFIG_CRC_T10DIF is not set
1801CONFIG_CRC_ITU_T=m
1802CONFIG_CRC32=y
1803# CONFIG_CRC7 is not set
1804# CONFIG_LIBCRC32C is not set
1805CONFIG_ZLIB_INFLATE=y
1806CONFIG_ZLIB_DEFLATE=m
1807CONFIG_DECOMPRESS_GZIP=y
1808CONFIG_HAS_IOMEM=y
1809CONFIG_HAS_IOPORT=y
1810CONFIG_HAS_DMA=y
1811CONFIG_NLATTR=y
diff --git a/arch/blackfin/configs/BF527-EZKIT_defconfig b/arch/blackfin/configs/BF527-EZKIT_defconfig
index 6d1a623fb149..edbb44d26bbf 100644
--- a/arch/blackfin/configs/BF527-EZKIT_defconfig
+++ b/arch/blackfin/configs/BF527-EZKIT_defconfig
@@ -1,22 +1,27 @@
1# 1#
2# Automatically generated make config: don't edit 2# Automatically generated make config: don't edit
3# Linux kernel version: 2.6.28.10 3# Linux kernel version: 2.6.32.2
4# Thu May 21 05:50:01 2009
5# 4#
6# CONFIG_MMU is not set 5# CONFIG_MMU is not set
7# CONFIG_FPU is not set 6# CONFIG_FPU is not set
8CONFIG_RWSEM_GENERIC_SPINLOCK=y 7CONFIG_RWSEM_GENERIC_SPINLOCK=y
9# CONFIG_RWSEM_XCHGADD_ALGORITHM is not set 8# CONFIG_RWSEM_XCHGADD_ALGORITHM is not set
10CONFIG_BLACKFIN=y 9CONFIG_BLACKFIN=y
10CONFIG_GENERIC_CSUM=y
11CONFIG_GENERIC_BUG=y
11CONFIG_ZONE_DMA=y 12CONFIG_ZONE_DMA=y
12CONFIG_GENERIC_FIND_NEXT_BIT=y 13CONFIG_GENERIC_FIND_NEXT_BIT=y
13CONFIG_GENERIC_HWEIGHT=y
14CONFIG_GENERIC_HARDIRQS=y 14CONFIG_GENERIC_HARDIRQS=y
15CONFIG_GENERIC_IRQ_PROBE=y 15CONFIG_GENERIC_IRQ_PROBE=y
16CONFIG_GENERIC_HARDIRQS_NO__DO_IRQ=y
16CONFIG_GENERIC_GPIO=y 17CONFIG_GENERIC_GPIO=y
17CONFIG_FORCE_MAX_ZONEORDER=14 18CONFIG_FORCE_MAX_ZONEORDER=14
18CONFIG_GENERIC_CALIBRATE_DELAY=y 19CONFIG_GENERIC_CALIBRATE_DELAY=y
20CONFIG_LOCKDEP_SUPPORT=y
21CONFIG_STACKTRACE_SUPPORT=y
22CONFIG_TRACE_IRQFLAGS_SUPPORT=y
19CONFIG_DEFCONFIG_LIST="/lib/modules/$UNAME_RELEASE/.config" 23CONFIG_DEFCONFIG_LIST="/lib/modules/$UNAME_RELEASE/.config"
24CONFIG_CONSTRUCTORS=y
20 25
21# 26#
22# General setup 27# General setup
@@ -26,22 +31,41 @@ CONFIG_BROKEN_ON_SMP=y
26CONFIG_INIT_ENV_ARG_LIMIT=32 31CONFIG_INIT_ENV_ARG_LIMIT=32
27CONFIG_LOCALVERSION="" 32CONFIG_LOCALVERSION=""
28CONFIG_LOCALVERSION_AUTO=y 33CONFIG_LOCALVERSION_AUTO=y
34CONFIG_HAVE_KERNEL_GZIP=y
35CONFIG_HAVE_KERNEL_BZIP2=y
36CONFIG_HAVE_KERNEL_LZMA=y
37CONFIG_KERNEL_GZIP=y
38# CONFIG_KERNEL_BZIP2 is not set
39# CONFIG_KERNEL_LZMA is not set
29CONFIG_SYSVIPC=y 40CONFIG_SYSVIPC=y
30CONFIG_SYSVIPC_SYSCTL=y 41CONFIG_SYSVIPC_SYSCTL=y
31# CONFIG_POSIX_MQUEUE is not set 42# CONFIG_POSIX_MQUEUE is not set
32# CONFIG_BSD_PROCESS_ACCT is not set 43# CONFIG_BSD_PROCESS_ACCT is not set
33# CONFIG_TASKSTATS is not set 44# CONFIG_TASKSTATS is not set
34# CONFIG_AUDIT is not set 45# CONFIG_AUDIT is not set
46
47#
48# RCU Subsystem
49#
50CONFIG_TREE_RCU=y
51# CONFIG_TREE_PREEMPT_RCU is not set
52# CONFIG_RCU_TRACE is not set
53CONFIG_RCU_FANOUT=32
54# CONFIG_RCU_FANOUT_EXACT is not set
55# CONFIG_TREE_RCU_TRACE is not set
35CONFIG_IKCONFIG=y 56CONFIG_IKCONFIG=y
36CONFIG_IKCONFIG_PROC=y 57CONFIG_IKCONFIG_PROC=y
37CONFIG_LOG_BUF_SHIFT=14 58CONFIG_LOG_BUF_SHIFT=14
38# CONFIG_CGROUPS is not set
39# CONFIG_GROUP_SCHED is not set 59# CONFIG_GROUP_SCHED is not set
60# CONFIG_CGROUPS is not set
40# CONFIG_SYSFS_DEPRECATED_V2 is not set 61# CONFIG_SYSFS_DEPRECATED_V2 is not set
41# CONFIG_RELAY is not set 62# CONFIG_RELAY is not set
42# CONFIG_NAMESPACES is not set 63# CONFIG_NAMESPACES is not set
43CONFIG_BLK_DEV_INITRD=y 64CONFIG_BLK_DEV_INITRD=y
44CONFIG_INITRAMFS_SOURCE="" 65CONFIG_INITRAMFS_SOURCE=""
66CONFIG_RD_GZIP=y
67# CONFIG_RD_BZIP2 is not set
68# CONFIG_RD_LZMA is not set
45# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set 69# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
46CONFIG_SYSCTL=y 70CONFIG_SYSCTL=y
47CONFIG_ANON_INODES=y 71CONFIG_ANON_INODES=y
@@ -62,6 +86,10 @@ CONFIG_EPOLL=y
62# CONFIG_TIMERFD is not set 86# CONFIG_TIMERFD is not set
63# CONFIG_EVENTFD is not set 87# CONFIG_EVENTFD is not set
64# CONFIG_AIO is not set 88# CONFIG_AIO is not set
89
90#
91# Kernel Performance Events And Counters
92#
65CONFIG_VM_EVENT_COUNTERS=y 93CONFIG_VM_EVENT_COUNTERS=y
66CONFIG_COMPAT_BRK=y 94CONFIG_COMPAT_BRK=y
67CONFIG_SLAB=y 95CONFIG_SLAB=y
@@ -69,11 +97,15 @@ CONFIG_SLAB=y
69# CONFIG_SLOB is not set 97# CONFIG_SLOB is not set
70CONFIG_MMAP_ALLOW_UNINITIALIZED=y 98CONFIG_MMAP_ALLOW_UNINITIALIZED=y
71# CONFIG_PROFILING is not set 99# CONFIG_PROFILING is not set
72# CONFIG_MARKERS is not set
73CONFIG_HAVE_OPROFILE=y 100CONFIG_HAVE_OPROFILE=y
101
102#
103# GCOV-based kernel profiling
104#
105# CONFIG_GCOV_KERNEL is not set
106# CONFIG_SLOW_WORK is not set
74# CONFIG_HAVE_GENERIC_DMA_COHERENT is not set 107# CONFIG_HAVE_GENERIC_DMA_COHERENT is not set
75CONFIG_SLABINFO=y 108CONFIG_SLABINFO=y
76CONFIG_TINY_SHMEM=y
77CONFIG_BASE_SMALL=0 109CONFIG_BASE_SMALL=0
78CONFIG_MODULES=y 110CONFIG_MODULES=y
79# CONFIG_MODULE_FORCE_LOAD is not set 111# CONFIG_MODULE_FORCE_LOAD is not set
@@ -81,11 +113,8 @@ CONFIG_MODULE_UNLOAD=y
81# CONFIG_MODULE_FORCE_UNLOAD is not set 113# CONFIG_MODULE_FORCE_UNLOAD is not set
82# CONFIG_MODVERSIONS is not set 114# CONFIG_MODVERSIONS is not set
83# CONFIG_MODULE_SRCVERSION_ALL is not set 115# CONFIG_MODULE_SRCVERSION_ALL is not set
84CONFIG_KMOD=y
85CONFIG_BLOCK=y 116CONFIG_BLOCK=y
86# CONFIG_LBD is not set 117# CONFIG_LBDAF is not set
87# CONFIG_BLK_DEV_IO_TRACE is not set
88# CONFIG_LSF is not set
89# CONFIG_BLK_DEV_BSG is not set 118# CONFIG_BLK_DEV_BSG is not set
90# CONFIG_BLK_DEV_INTEGRITY is not set 119# CONFIG_BLK_DEV_INTEGRITY is not set
91 120
@@ -101,7 +130,6 @@ CONFIG_DEFAULT_AS=y
101# CONFIG_DEFAULT_CFQ is not set 130# CONFIG_DEFAULT_CFQ is not set
102# CONFIG_DEFAULT_NOOP is not set 131# CONFIG_DEFAULT_NOOP is not set
103CONFIG_DEFAULT_IOSCHED="anticipatory" 132CONFIG_DEFAULT_IOSCHED="anticipatory"
104CONFIG_CLASSIC_RCU=y
105# CONFIG_PREEMPT_NONE is not set 133# CONFIG_PREEMPT_NONE is not set
106CONFIG_PREEMPT_VOLUNTARY=y 134CONFIG_PREEMPT_VOLUNTARY=y
107# CONFIG_PREEMPT is not set 135# CONFIG_PREEMPT is not set
@@ -132,29 +160,28 @@ CONFIG_BF527=y
132# CONFIG_BF537 is not set 160# CONFIG_BF537 is not set
133# CONFIG_BF538 is not set 161# CONFIG_BF538 is not set
134# CONFIG_BF539 is not set 162# CONFIG_BF539 is not set
135# CONFIG_BF542 is not set 163# CONFIG_BF542_std is not set
136# CONFIG_BF542M is not set 164# CONFIG_BF542M is not set
137# CONFIG_BF544 is not set 165# CONFIG_BF544_std is not set
138# CONFIG_BF544M is not set 166# CONFIG_BF544M is not set
139# CONFIG_BF547 is not set 167# CONFIG_BF547_std is not set
140# CONFIG_BF547M is not set 168# CONFIG_BF547M is not set
141# CONFIG_BF548 is not set 169# CONFIG_BF548_std is not set
142# CONFIG_BF548M is not set 170# CONFIG_BF548M is not set
143# CONFIG_BF549 is not set 171# CONFIG_BF549_std is not set
144# CONFIG_BF549M is not set 172# CONFIG_BF549M is not set
145# CONFIG_BF561 is not set 173# CONFIG_BF561 is not set
146CONFIG_BF_REV_MIN=0 174CONFIG_BF_REV_MIN=0
147CONFIG_BF_REV_MAX=2 175CONFIG_BF_REV_MAX=2
148# CONFIG_BF_REV_0_0 is not set 176# CONFIG_BF_REV_0_0 is not set
149# CONFIG_BF_REV_0_1 is not set 177CONFIG_BF_REV_0_1=y
150CONFIG_BF_REV_0_2=y 178# CONFIG_BF_REV_0_2 is not set
151# CONFIG_BF_REV_0_3 is not set 179# CONFIG_BF_REV_0_3 is not set
152# CONFIG_BF_REV_0_4 is not set 180# CONFIG_BF_REV_0_4 is not set
153# CONFIG_BF_REV_0_5 is not set 181# CONFIG_BF_REV_0_5 is not set
154# CONFIG_BF_REV_0_6 is not set 182# CONFIG_BF_REV_0_6 is not set
155# CONFIG_BF_REV_ANY is not set 183# CONFIG_BF_REV_ANY is not set
156# CONFIG_BF_REV_NONE is not set 184# CONFIG_BF_REV_NONE is not set
157CONFIG_BF52x=y
158CONFIG_MEM_MT48LC32M16A2TG_75=y 185CONFIG_MEM_MT48LC32M16A2TG_75=y
159CONFIG_IRQ_PLL_WAKEUP=7 186CONFIG_IRQ_PLL_WAKEUP=7
160CONFIG_IRQ_DMA0_ERROR=7 187CONFIG_IRQ_DMA0_ERROR=7
@@ -200,7 +227,9 @@ CONFIG_IRQ_MEM_DMA1=13
200CONFIG_IRQ_WATCH=13 227CONFIG_IRQ_WATCH=13
201CONFIG_IRQ_PORTF_INTA=13 228CONFIG_IRQ_PORTF_INTA=13
202CONFIG_IRQ_PORTF_INTB=13 229CONFIG_IRQ_PORTF_INTB=13
230CONFIG_BF52x=y
203CONFIG_BFIN527_EZKIT=y 231CONFIG_BFIN527_EZKIT=y
232# CONFIG_BFIN527_EZKIT_V2 is not set
204# CONFIG_BFIN527_BLUETECHNIX_CM is not set 233# CONFIG_BFIN527_BLUETECHNIX_CM is not set
205# CONFIG_BFIN526_EZBRD is not set 234# CONFIG_BFIN526_EZBRD is not set
206 235
@@ -318,7 +347,6 @@ CONFIG_FLATMEM=y
318CONFIG_FLAT_NODE_MEM_MAP=y 347CONFIG_FLAT_NODE_MEM_MAP=y
319CONFIG_PAGEFLAGS_EXTENDED=y 348CONFIG_PAGEFLAGS_EXTENDED=y
320CONFIG_SPLIT_PTLOCK_CPUS=4 349CONFIG_SPLIT_PTLOCK_CPUS=4
321# CONFIG_RESOURCES_64BIT is not set
322# CONFIG_PHYS_ADDR_T_64BIT is not set 350# CONFIG_PHYS_ADDR_T_64BIT is not set
323CONFIG_ZONE_DMA_FLAG=1 351CONFIG_ZONE_DMA_FLAG=1
324CONFIG_VIRT_TO_BUS=y 352CONFIG_VIRT_TO_BUS=y
@@ -327,16 +355,18 @@ CONFIG_BFIN_GPTIMERS=y
327# CONFIG_DMA_UNCACHED_4M is not set 355# CONFIG_DMA_UNCACHED_4M is not set
328# CONFIG_DMA_UNCACHED_2M is not set 356# CONFIG_DMA_UNCACHED_2M is not set
329CONFIG_DMA_UNCACHED_1M=y 357CONFIG_DMA_UNCACHED_1M=y
358# CONFIG_DMA_UNCACHED_512K is not set
359# CONFIG_DMA_UNCACHED_256K is not set
360# CONFIG_DMA_UNCACHED_128K is not set
330# CONFIG_DMA_UNCACHED_NONE is not set 361# CONFIG_DMA_UNCACHED_NONE is not set
331 362
332# 363#
333# Cache Support 364# Cache Support
334# 365#
335CONFIG_BFIN_ICACHE=y 366CONFIG_BFIN_ICACHE=y
336# CONFIG_BFIN_ICACHE_LOCK is not set 367CONFIG_BFIN_EXTMEM_ICACHEABLE=y
337CONFIG_BFIN_DCACHE=y 368CONFIG_BFIN_DCACHE=y
338# CONFIG_BFIN_DCACHE_BANKA is not set 369# CONFIG_BFIN_DCACHE_BANKA is not set
339CONFIG_BFIN_EXTMEM_ICACHEABLE=y
340CONFIG_BFIN_EXTMEM_DCACHEABLE=y 370CONFIG_BFIN_EXTMEM_DCACHEABLE=y
341CONFIG_BFIN_EXTMEM_WRITEBACK=y 371CONFIG_BFIN_EXTMEM_WRITEBACK=y
342# CONFIG_BFIN_EXTMEM_WRITETHROUGH is not set 372# CONFIG_BFIN_EXTMEM_WRITETHROUGH is not set
@@ -347,7 +377,7 @@ CONFIG_BFIN_EXTMEM_WRITEBACK=y
347# CONFIG_MPU is not set 377# CONFIG_MPU is not set
348 378
349# 379#
350# Asynchonous Memory Configuration 380# Asynchronous Memory Configuration
351# 381#
352 382
353# 383#
@@ -403,11 +433,6 @@ CONFIG_NET=y
403CONFIG_PACKET=y 433CONFIG_PACKET=y
404# CONFIG_PACKET_MMAP is not set 434# CONFIG_PACKET_MMAP is not set
405CONFIG_UNIX=y 435CONFIG_UNIX=y
406CONFIG_XFRM=y
407# CONFIG_XFRM_USER is not set
408# CONFIG_XFRM_SUB_POLICY is not set
409# CONFIG_XFRM_MIGRATE is not set
410# CONFIG_XFRM_STATISTICS is not set
411# CONFIG_NET_KEY is not set 436# CONFIG_NET_KEY is not set
412CONFIG_INET=y 437CONFIG_INET=y
413# CONFIG_IP_MULTICAST is not set 438# CONFIG_IP_MULTICAST is not set
@@ -431,7 +456,6 @@ CONFIG_IP_PNP=y
431# CONFIG_INET_XFRM_MODE_BEET is not set 456# CONFIG_INET_XFRM_MODE_BEET is not set
432# CONFIG_INET_LRO is not set 457# CONFIG_INET_LRO is not set
433# CONFIG_INET_DIAG is not set 458# CONFIG_INET_DIAG is not set
434CONFIG_INET_TCP_DIAG=y
435# CONFIG_TCP_CONG_ADVANCED is not set 459# CONFIG_TCP_CONG_ADVANCED is not set
436CONFIG_TCP_CONG_CUBIC=y 460CONFIG_TCP_CONG_CUBIC=y
437CONFIG_DEFAULT_TCP_CONG="cubic" 461CONFIG_DEFAULT_TCP_CONG="cubic"
@@ -442,6 +466,7 @@ CONFIG_DEFAULT_TCP_CONG="cubic"
442# CONFIG_NETFILTER is not set 466# CONFIG_NETFILTER is not set
443# CONFIG_IP_DCCP is not set 467# CONFIG_IP_DCCP is not set
444# CONFIG_IP_SCTP is not set 468# CONFIG_IP_SCTP is not set
469# CONFIG_RDS is not set
445# CONFIG_TIPC is not set 470# CONFIG_TIPC is not set
446# CONFIG_ATM is not set 471# CONFIG_ATM is not set
447# CONFIG_BRIDGE is not set 472# CONFIG_BRIDGE is not set
@@ -455,7 +480,10 @@ CONFIG_DEFAULT_TCP_CONG="cubic"
455# CONFIG_LAPB is not set 480# CONFIG_LAPB is not set
456# CONFIG_ECONET is not set 481# CONFIG_ECONET is not set
457# CONFIG_WAN_ROUTER is not set 482# CONFIG_WAN_ROUTER is not set
483# CONFIG_PHONET is not set
484# CONFIG_IEEE802154 is not set
458# CONFIG_NET_SCHED is not set 485# CONFIG_NET_SCHED is not set
486# CONFIG_DCB is not set
459 487
460# 488#
461# Network testing 489# Network testing
@@ -508,13 +536,8 @@ CONFIG_SIR_BFIN_DMA=y
508# CONFIG_MCS_FIR is not set 536# CONFIG_MCS_FIR is not set
509# CONFIG_BT is not set 537# CONFIG_BT is not set
510# CONFIG_AF_RXRPC is not set 538# CONFIG_AF_RXRPC is not set
511# CONFIG_PHONET is not set 539# CONFIG_WIRELESS is not set
512CONFIG_WIRELESS=y 540# CONFIG_WIMAX is not set
513# CONFIG_CFG80211 is not set
514CONFIG_WIRELESS_OLD_REGULATORY=y
515# CONFIG_WIRELESS_EXT is not set
516# CONFIG_MAC80211 is not set
517# CONFIG_IEEE80211 is not set
518# CONFIG_RFKILL is not set 541# CONFIG_RFKILL is not set
519# CONFIG_NET_9P is not set 542# CONFIG_NET_9P is not set
520 543
@@ -535,6 +558,7 @@ CONFIG_PREVENT_FIRMWARE_BUILD=y
535# CONFIG_CONNECTOR is not set 558# CONFIG_CONNECTOR is not set
536CONFIG_MTD=y 559CONFIG_MTD=y
537# CONFIG_MTD_DEBUG is not set 560# CONFIG_MTD_DEBUG is not set
561# CONFIG_MTD_TESTS is not set
538# CONFIG_MTD_CONCAT is not set 562# CONFIG_MTD_CONCAT is not set
539CONFIG_MTD_PARTITIONS=y 563CONFIG_MTD_PARTITIONS=y
540# CONFIG_MTD_REDBOOT_PARTS is not set 564# CONFIG_MTD_REDBOOT_PARTS is not set
@@ -593,6 +617,7 @@ CONFIG_MTD_COMPLEX_MAPPINGS=y
593# CONFIG_MTD_DATAFLASH is not set 617# CONFIG_MTD_DATAFLASH is not set
594CONFIG_MTD_M25P80=y 618CONFIG_MTD_M25P80=y
595CONFIG_M25PXX_USE_FAST_READ=y 619CONFIG_M25PXX_USE_FAST_READ=y
620# CONFIG_MTD_SST25L is not set
596# CONFIG_MTD_SLRAM is not set 621# CONFIG_MTD_SLRAM is not set
597# CONFIG_MTD_PHRAM is not set 622# CONFIG_MTD_PHRAM is not set
598# CONFIG_MTD_MTDRAM is not set 623# CONFIG_MTD_MTDRAM is not set
@@ -608,11 +633,6 @@ CONFIG_MTD_NAND=m
608# CONFIG_MTD_NAND_VERIFY_WRITE is not set 633# CONFIG_MTD_NAND_VERIFY_WRITE is not set
609# CONFIG_MTD_NAND_ECC_SMC is not set 634# CONFIG_MTD_NAND_ECC_SMC is not set
610# CONFIG_MTD_NAND_MUSEUM_IDS is not set 635# CONFIG_MTD_NAND_MUSEUM_IDS is not set
611CONFIG_MTD_NAND_BFIN=m
612CONFIG_BFIN_NAND_BASE=0x20212000
613CONFIG_BFIN_NAND_CLE=2
614CONFIG_BFIN_NAND_ALE=1
615CONFIG_BFIN_NAND_READY=3
616CONFIG_MTD_NAND_IDS=m 636CONFIG_MTD_NAND_IDS=m
617# CONFIG_MTD_NAND_BF5XX is not set 637# CONFIG_MTD_NAND_BF5XX is not set
618# CONFIG_MTD_NAND_DISKONCHIP is not set 638# CONFIG_MTD_NAND_DISKONCHIP is not set
@@ -622,6 +642,11 @@ CONFIG_MTD_NAND_IDS=m
622# CONFIG_MTD_ONENAND is not set 642# CONFIG_MTD_ONENAND is not set
623 643
624# 644#
645# LPDDR flash memory drivers
646#
647# CONFIG_MTD_LPDDR is not set
648
649#
625# UBI - Unsorted block images 650# UBI - Unsorted block images
626# 651#
627# CONFIG_MTD_UBI is not set 652# CONFIG_MTD_UBI is not set
@@ -639,10 +664,20 @@ CONFIG_BLK_DEV_RAM_SIZE=4096
639# CONFIG_ATA_OVER_ETH is not set 664# CONFIG_ATA_OVER_ETH is not set
640# CONFIG_BLK_DEV_HD is not set 665# CONFIG_BLK_DEV_HD is not set
641CONFIG_MISC_DEVICES=y 666CONFIG_MISC_DEVICES=y
642# CONFIG_EEPROM_93CX6 is not set 667# CONFIG_AD525X_DPOT is not set
643# CONFIG_ICS932S401 is not set 668# CONFIG_ICS932S401 is not set
644# CONFIG_ENCLOSURE_SERVICES is not set 669# CONFIG_ENCLOSURE_SERVICES is not set
670# CONFIG_ISL29003 is not set
645# CONFIG_C2PORT is not set 671# CONFIG_C2PORT is not set
672
673#
674# EEPROM support
675#
676# CONFIG_EEPROM_AT24 is not set
677# CONFIG_EEPROM_AT25 is not set
678# CONFIG_EEPROM_LEGACY is not set
679# CONFIG_EEPROM_MAX6875 is not set
680# CONFIG_EEPROM_93CX6 is not set
646CONFIG_HAVE_IDE=y 681CONFIG_HAVE_IDE=y
647# CONFIG_IDE is not set 682# CONFIG_IDE is not set
648 683
@@ -666,10 +701,6 @@ CONFIG_BLK_DEV_SR=m
666# CONFIG_BLK_DEV_SR_VENDOR is not set 701# CONFIG_BLK_DEV_SR_VENDOR is not set
667# CONFIG_CHR_DEV_SG is not set 702# CONFIG_CHR_DEV_SG is not set
668# CONFIG_CHR_DEV_SCH is not set 703# CONFIG_CHR_DEV_SCH is not set
669
670#
671# Some SCSI devices (e.g. CD jukebox) support multiple LUNs
672#
673# CONFIG_SCSI_MULTI_LUN is not set 704# CONFIG_SCSI_MULTI_LUN is not set
674# CONFIG_SCSI_CONSTANTS is not set 705# CONFIG_SCSI_CONSTANTS is not set
675# CONFIG_SCSI_LOGGING is not set 706# CONFIG_SCSI_LOGGING is not set
@@ -686,6 +717,7 @@ CONFIG_SCSI_WAIT_SCAN=m
686# CONFIG_SCSI_SRP_ATTRS is not set 717# CONFIG_SCSI_SRP_ATTRS is not set
687# CONFIG_SCSI_LOWLEVEL is not set 718# CONFIG_SCSI_LOWLEVEL is not set
688# CONFIG_SCSI_DH is not set 719# CONFIG_SCSI_DH is not set
720# CONFIG_SCSI_OSD_INITIATOR is not set
689# CONFIG_ATA is not set 721# CONFIG_ATA is not set
690# CONFIG_MD is not set 722# CONFIG_MD is not set
691CONFIG_NETDEVICES=y 723CONFIG_NETDEVICES=y
@@ -710,6 +742,9 @@ CONFIG_PHYLIB=y
710# CONFIG_BROADCOM_PHY is not set 742# CONFIG_BROADCOM_PHY is not set
711# CONFIG_ICPLUS_PHY is not set 743# CONFIG_ICPLUS_PHY is not set
712# CONFIG_REALTEK_PHY is not set 744# CONFIG_REALTEK_PHY is not set
745# CONFIG_NATIONAL_PHY is not set
746# CONFIG_STE10XP is not set
747# CONFIG_LSI_ET1011C_PHY is not set
713# CONFIG_FIXED_PHY is not set 748# CONFIG_FIXED_PHY is not set
714# CONFIG_MDIO_BITBANG is not set 749# CONFIG_MDIO_BITBANG is not set
715CONFIG_NET_ETHERNET=y 750CONFIG_NET_ETHERNET=y
@@ -720,9 +755,12 @@ CONFIG_BFIN_TX_DESC_NUM=10
720CONFIG_BFIN_RX_DESC_NUM=20 755CONFIG_BFIN_RX_DESC_NUM=20
721CONFIG_BFIN_MAC_RMII=y 756CONFIG_BFIN_MAC_RMII=y
722# CONFIG_SMC91X is not set 757# CONFIG_SMC91X is not set
723# CONFIG_SMSC911X is not set
724# CONFIG_DM9000 is not set 758# CONFIG_DM9000 is not set
725# CONFIG_ENC28J60 is not set 759# CONFIG_ENC28J60 is not set
760# CONFIG_ETHOC is not set
761# CONFIG_SMSC911X is not set
762# CONFIG_DNET is not set
763# CONFIG_ADF702X is not set
726# CONFIG_IBM_NEW_EMAC_ZMII is not set 764# CONFIG_IBM_NEW_EMAC_ZMII is not set
727# CONFIG_IBM_NEW_EMAC_RGMII is not set 765# CONFIG_IBM_NEW_EMAC_RGMII is not set
728# CONFIG_IBM_NEW_EMAC_TAH is not set 766# CONFIG_IBM_NEW_EMAC_TAH is not set
@@ -731,15 +769,16 @@ CONFIG_BFIN_MAC_RMII=y
731# CONFIG_IBM_NEW_EMAC_MAL_CLR_ICINTSTAT is not set 769# CONFIG_IBM_NEW_EMAC_MAL_CLR_ICINTSTAT is not set
732# CONFIG_IBM_NEW_EMAC_MAL_COMMON_ERR is not set 770# CONFIG_IBM_NEW_EMAC_MAL_COMMON_ERR is not set
733# CONFIG_B44 is not set 771# CONFIG_B44 is not set
772# CONFIG_KS8842 is not set
773# CONFIG_KS8851 is not set
774# CONFIG_KS8851_MLL is not set
734# CONFIG_NETDEV_1000 is not set 775# CONFIG_NETDEV_1000 is not set
735# CONFIG_NETDEV_10000 is not set 776# CONFIG_NETDEV_10000 is not set
777# CONFIG_WLAN is not set
736 778
737# 779#
738# Wireless LAN 780# Enable WiMAX (Networking options) to see the WiMAX drivers
739# 781#
740# CONFIG_WLAN_PRE80211 is not set
741# CONFIG_WLAN_80211 is not set
742# CONFIG_IWLWIFI_LEDS is not set
743 782
744# 783#
745# USB Network Adapters 784# USB Network Adapters
@@ -789,7 +828,11 @@ CONFIG_INPUT_MISC=y
789# CONFIG_INPUT_YEALINK is not set 828# CONFIG_INPUT_YEALINK is not set
790# CONFIG_INPUT_CM109 is not set 829# CONFIG_INPUT_CM109 is not set
791# CONFIG_INPUT_UINPUT is not set 830# CONFIG_INPUT_UINPUT is not set
792# CONFIG_CONFIG_INPUT_PCF8574 is not set 831# CONFIG_INPUT_GPIO_ROTARY_ENCODER is not set
832# CONFIG_INPUT_BFIN_ROTARY is not set
833# CONFIG_INPUT_AD714X is not set
834# CONFIG_INPUT_ADXL34X is not set
835# CONFIG_INPUT_PCF8574 is not set
793 836
794# 837#
795# Hardware I/O ports 838# Hardware I/O ports
@@ -800,16 +843,13 @@ CONFIG_INPUT_MISC=y
800# 843#
801# Character devices 844# Character devices
802# 845#
803# CONFIG_AD9960 is not set
804CONFIG_BFIN_DMA_INTERFACE=m 846CONFIG_BFIN_DMA_INTERFACE=m
805# CONFIG_BFIN_PPI is not set 847# CONFIG_BFIN_PPI is not set
806# CONFIG_BFIN_PPIFCD is not set 848# CONFIG_BFIN_PPIFCD is not set
807# CONFIG_BFIN_SIMPLE_TIMER is not set 849# CONFIG_BFIN_SIMPLE_TIMER is not set
808# CONFIG_BFIN_SPI_ADC is not set 850# CONFIG_BFIN_SPI_ADC is not set
809CONFIG_BFIN_SPORT=m 851CONFIG_BFIN_SPORT=m
810# CONFIG_BFIN_TIMER_LATENCY is not set
811# CONFIG_BFIN_TWI_LCD is not set 852# CONFIG_BFIN_TWI_LCD is not set
812CONFIG_SIMPLE_GPIO=m
813CONFIG_VT=y 853CONFIG_VT=y
814CONFIG_CONSOLE_TRANSLATIONS=y 854CONFIG_CONSOLE_TRANSLATIONS=y
815CONFIG_VT_CONSOLE=y 855CONFIG_VT_CONSOLE=y
@@ -827,6 +867,7 @@ CONFIG_BFIN_JTAG_COMM=m
827# 867#
828# Non-8250 serial port support 868# Non-8250 serial port support
829# 869#
870# CONFIG_SERIAL_MAX3100 is not set
830CONFIG_SERIAL_BFIN=y 871CONFIG_SERIAL_BFIN=y
831CONFIG_SERIAL_BFIN_CONSOLE=y 872CONFIG_SERIAL_BFIN_CONSOLE=y
832CONFIG_SERIAL_BFIN_DMA=y 873CONFIG_SERIAL_BFIN_DMA=y
@@ -838,14 +879,10 @@ CONFIG_SERIAL_CORE=y
838CONFIG_SERIAL_CORE_CONSOLE=y 879CONFIG_SERIAL_CORE_CONSOLE=y
839# CONFIG_SERIAL_BFIN_SPORT is not set 880# CONFIG_SERIAL_BFIN_SPORT is not set
840CONFIG_UNIX98_PTYS=y 881CONFIG_UNIX98_PTYS=y
882# CONFIG_DEVPTS_MULTIPLE_INSTANCES is not set
841# CONFIG_LEGACY_PTYS is not set 883# CONFIG_LEGACY_PTYS is not set
842CONFIG_BFIN_OTP=y 884CONFIG_BFIN_OTP=y
843# CONFIG_BFIN_OTP_WRITE_ENABLE is not set 885# CONFIG_BFIN_OTP_WRITE_ENABLE is not set
844
845#
846# CAN, the car bus and industrial fieldbus
847#
848# CONFIG_CAN4LINUX is not set
849# CONFIG_IPMI_HANDLER is not set 886# CONFIG_IPMI_HANDLER is not set
850# CONFIG_HW_RANDOM is not set 887# CONFIG_HW_RANDOM is not set
851# CONFIG_R3964 is not set 888# CONFIG_R3964 is not set
@@ -853,6 +890,7 @@ CONFIG_BFIN_OTP=y
853# CONFIG_TCG_TPM is not set 890# CONFIG_TCG_TPM is not set
854CONFIG_I2C=y 891CONFIG_I2C=y
855CONFIG_I2C_BOARDINFO=y 892CONFIG_I2C_BOARDINFO=y
893CONFIG_I2C_COMPAT=y
856CONFIG_I2C_CHARDEV=m 894CONFIG_I2C_CHARDEV=m
857CONFIG_I2C_HELPER_AUTO=y 895CONFIG_I2C_HELPER_AUTO=y
858 896
@@ -886,14 +924,6 @@ CONFIG_I2C_BLACKFIN_TWI_CLK_KHZ=100
886# Miscellaneous I2C Chip support 924# Miscellaneous I2C Chip support
887# 925#
888# CONFIG_DS1682 is not set 926# CONFIG_DS1682 is not set
889# CONFIG_EEPROM_AT24 is not set
890# CONFIG_SENSORS_AD5252 is not set
891# CONFIG_EEPROM_LEGACY is not set
892# CONFIG_SENSORS_PCF8574 is not set
893# CONFIG_PCF8575 is not set
894# CONFIG_SENSORS_PCA9539 is not set
895# CONFIG_SENSORS_PCF8591 is not set
896# CONFIG_SENSORS_MAX6875 is not set
897# CONFIG_SENSORS_TSL2550 is not set 927# CONFIG_SENSORS_TSL2550 is not set
898# CONFIG_I2C_DEBUG_CORE is not set 928# CONFIG_I2C_DEBUG_CORE is not set
899# CONFIG_I2C_DEBUG_ALGO is not set 929# CONFIG_I2C_DEBUG_ALGO is not set
@@ -910,13 +940,18 @@ CONFIG_SPI_BFIN=y
910# CONFIG_SPI_BFIN_LOCK is not set 940# CONFIG_SPI_BFIN_LOCK is not set
911# CONFIG_SPI_BFIN_SPORT is not set 941# CONFIG_SPI_BFIN_SPORT is not set
912# CONFIG_SPI_BITBANG is not set 942# CONFIG_SPI_BITBANG is not set
943# CONFIG_SPI_GPIO is not set
913 944
914# 945#
915# SPI Protocol Masters 946# SPI Protocol Masters
916# 947#
917# CONFIG_EEPROM_AT25 is not set
918# CONFIG_SPI_SPIDEV is not set 948# CONFIG_SPI_SPIDEV is not set
919# CONFIG_SPI_TLE62X0 is not set 949# CONFIG_SPI_TLE62X0 is not set
950
951#
952# PPS support
953#
954# CONFIG_PPS is not set
920CONFIG_ARCH_WANT_OPTIONAL_GPIOLIB=y 955CONFIG_ARCH_WANT_OPTIONAL_GPIOLIB=y
921CONFIG_GPIOLIB=y 956CONFIG_GPIOLIB=y
922# CONFIG_DEBUG_GPIO is not set 957# CONFIG_DEBUG_GPIO is not set
@@ -932,6 +967,7 @@ CONFIG_GPIO_SYSFS=y
932# CONFIG_GPIO_MAX732X is not set 967# CONFIG_GPIO_MAX732X is not set
933# CONFIG_GPIO_PCA953X is not set 968# CONFIG_GPIO_PCA953X is not set
934# CONFIG_GPIO_PCF857X is not set 969# CONFIG_GPIO_PCF857X is not set
970# CONFIG_GPIO_ADP5588 is not set
935 971
936# 972#
937# PCI GPIO expanders: 973# PCI GPIO expanders:
@@ -942,11 +978,15 @@ CONFIG_GPIO_SYSFS=y
942# 978#
943# CONFIG_GPIO_MAX7301 is not set 979# CONFIG_GPIO_MAX7301 is not set
944# CONFIG_GPIO_MCP23S08 is not set 980# CONFIG_GPIO_MCP23S08 is not set
981# CONFIG_GPIO_MC33880 is not set
982
983#
984# AC97 GPIO expanders:
985#
945# CONFIG_W1 is not set 986# CONFIG_W1 is not set
946# CONFIG_POWER_SUPPLY is not set 987# CONFIG_POWER_SUPPLY is not set
947# CONFIG_HWMON is not set 988# CONFIG_HWMON is not set
948# CONFIG_THERMAL is not set 989# CONFIG_THERMAL is not set
949# CONFIG_THERMAL_HWMON is not set
950CONFIG_WATCHDOG=y 990CONFIG_WATCHDOG=y
951# CONFIG_WATCHDOG_NOWAYOUT is not set 991# CONFIG_WATCHDOG_NOWAYOUT is not set
952 992
@@ -973,28 +1013,21 @@ CONFIG_SSB_POSSIBLE=y
973# CONFIG_MFD_CORE is not set 1013# CONFIG_MFD_CORE is not set
974# CONFIG_MFD_SM501 is not set 1014# CONFIG_MFD_SM501 is not set
975# CONFIG_HTC_PASIC3 is not set 1015# CONFIG_HTC_PASIC3 is not set
1016# CONFIG_UCB1400_CORE is not set
1017# CONFIG_TPS65010 is not set
1018# CONFIG_TWL4030_CORE is not set
976# CONFIG_MFD_TMIO is not set 1019# CONFIG_MFD_TMIO is not set
977# CONFIG_PMIC_DA903X is not set 1020# CONFIG_PMIC_DA903X is not set
978# CONFIG_PMIC_ADP5520 is not set 1021# CONFIG_PMIC_ADP5520 is not set
979# CONFIG_MFD_WM8400 is not set 1022# CONFIG_MFD_WM8400 is not set
1023# CONFIG_MFD_WM831X is not set
980# CONFIG_MFD_WM8350_I2C is not set 1024# CONFIG_MFD_WM8350_I2C is not set
1025# CONFIG_MFD_PCF50633 is not set
1026# CONFIG_MFD_MC13783 is not set
1027# CONFIG_AB3100_CORE is not set
1028# CONFIG_EZX_PCAP is not set
981# CONFIG_REGULATOR is not set 1029# CONFIG_REGULATOR is not set
982 1030# CONFIG_MEDIA_SUPPORT is not set
983#
984# Multimedia devices
985#
986
987#
988# Multimedia core support
989#
990# CONFIG_VIDEO_DEV is not set
991# CONFIG_DVB_CORE is not set
992# CONFIG_VIDEO_MEDIA is not set
993
994#
995# Multimedia drivers
996#
997# CONFIG_DAB is not set
998 1031
999# 1032#
1000# Graphics support 1033# Graphics support
@@ -1030,15 +1063,18 @@ CONFIG_FB_BFIN_T350MCQB=y
1030# CONFIG_FB_VIRTUAL is not set 1063# CONFIG_FB_VIRTUAL is not set
1031# CONFIG_FB_METRONOME is not set 1064# CONFIG_FB_METRONOME is not set
1032# CONFIG_FB_MB862XX is not set 1065# CONFIG_FB_MB862XX is not set
1066# CONFIG_FB_BROADSHEET is not set
1033CONFIG_BACKLIGHT_LCD_SUPPORT=y 1067CONFIG_BACKLIGHT_LCD_SUPPORT=y
1034CONFIG_LCD_CLASS_DEVICE=m 1068CONFIG_LCD_CLASS_DEVICE=m
1069# CONFIG_LCD_LMS283GF05 is not set
1035CONFIG_LCD_LTV350QV=m 1070CONFIG_LCD_LTV350QV=m
1036# CONFIG_LCD_ILI9320 is not set 1071# CONFIG_LCD_ILI9320 is not set
1037# CONFIG_LCD_TDO24M is not set 1072# CONFIG_LCD_TDO24M is not set
1038# CONFIG_LCD_VGG2432A4 is not set 1073# CONFIG_LCD_VGG2432A4 is not set
1039# CONFIG_LCD_PLATFORM is not set 1074# CONFIG_LCD_PLATFORM is not set
1040CONFIG_BACKLIGHT_CLASS_DEVICE=m 1075CONFIG_BACKLIGHT_CLASS_DEVICE=m
1041# CONFIG_BACKLIGHT_CORGI is not set 1076CONFIG_BACKLIGHT_GENERIC=m
1077# CONFIG_BACKLIGHT_ADP8870 is not set
1042 1078
1043# 1079#
1044# Display device support 1080# Display device support
@@ -1066,6 +1102,7 @@ CONFIG_SOUND=m
1066CONFIG_SND=m 1102CONFIG_SND=m
1067CONFIG_SND_TIMER=m 1103CONFIG_SND_TIMER=m
1068CONFIG_SND_PCM=m 1104CONFIG_SND_PCM=m
1105CONFIG_SND_JACK=y
1069# CONFIG_SND_SEQUENCER is not set 1106# CONFIG_SND_SEQUENCER is not set
1070# CONFIG_SND_MIXER_OSS is not set 1107# CONFIG_SND_MIXER_OSS is not set
1071# CONFIG_SND_PCM_OSS is not set 1108# CONFIG_SND_PCM_OSS is not set
@@ -1074,6 +1111,11 @@ CONFIG_SND_SUPPORT_OLD_API=y
1074CONFIG_SND_VERBOSE_PROCFS=y 1111CONFIG_SND_VERBOSE_PROCFS=y
1075# CONFIG_SND_VERBOSE_PRINTK is not set 1112# CONFIG_SND_VERBOSE_PRINTK is not set
1076# CONFIG_SND_DEBUG is not set 1113# CONFIG_SND_DEBUG is not set
1114# CONFIG_SND_RAWMIDI_SEQ is not set
1115# CONFIG_SND_OPL3_LIB_SEQ is not set
1116# CONFIG_SND_OPL4_LIB_SEQ is not set
1117# CONFIG_SND_SBAWE_SEQ is not set
1118# CONFIG_SND_EMU10K1_SEQ is not set
1077CONFIG_SND_DRIVERS=y 1119CONFIG_SND_DRIVERS=y
1078# CONFIG_SND_DUMMY is not set 1120# CONFIG_SND_DUMMY is not set
1079# CONFIG_SND_MTPAV is not set 1121# CONFIG_SND_MTPAV is not set
@@ -1084,7 +1126,6 @@ CONFIG_SND_SPI=y
1084# 1126#
1085# ALSA Blackfin devices 1127# ALSA Blackfin devices
1086# 1128#
1087# CONFIG_SND_BLACKFIN_AD1836 is not set
1088# CONFIG_SND_BFIN_AD73322 is not set 1129# CONFIG_SND_BFIN_AD73322 is not set
1089CONFIG_SND_USB=y 1130CONFIG_SND_USB=y
1090# CONFIG_SND_USB_AUDIO is not set 1131# CONFIG_SND_USB_AUDIO is not set
@@ -1094,15 +1135,19 @@ CONFIG_SND_SOC_AC97_BUS=y
1094CONFIG_SND_BF5XX_I2S=m 1135CONFIG_SND_BF5XX_I2S=m
1095CONFIG_SND_BF5XX_SOC_SSM2602=m 1136CONFIG_SND_BF5XX_SOC_SSM2602=m
1096# CONFIG_SND_BF5XX_SOC_AD73311 is not set 1137# CONFIG_SND_BF5XX_SOC_AD73311 is not set
1138# CONFIG_SND_BF5XX_SOC_ADAU1371 is not set
1139# CONFIG_SND_BF5XX_SOC_ADAU1761 is not set
1140# CONFIG_SND_BF5XX_TDM is not set
1097CONFIG_SND_BF5XX_AC97=m 1141CONFIG_SND_BF5XX_AC97=m
1098CONFIG_SND_BF5XX_MMAP_SUPPORT=y 1142CONFIG_SND_BF5XX_MMAP_SUPPORT=y
1099# CONFIG_SND_BF5XX_MULTICHAN_SUPPORT is not set 1143# CONFIG_SND_BF5XX_MULTICHAN_SUPPORT is not set
1144# CONFIG_SND_BF5XX_HAVE_COLD_RESET is not set
1145CONFIG_SND_BF5XX_SOC_AD1980=m
1100CONFIG_SND_BF5XX_SOC_SPORT=m 1146CONFIG_SND_BF5XX_SOC_SPORT=m
1101CONFIG_SND_BF5XX_SOC_I2S=m 1147CONFIG_SND_BF5XX_SOC_I2S=m
1102CONFIG_SND_BF5XX_SOC_AC97=m 1148CONFIG_SND_BF5XX_SOC_AC97=m
1103CONFIG_SND_BF5XX_SOC_AD1980=m
1104CONFIG_SND_BF5XX_SPORT_NUM=0 1149CONFIG_SND_BF5XX_SPORT_NUM=0
1105# CONFIG_SND_BF5XX_HAVE_COLD_RESET is not set 1150CONFIG_SND_SOC_I2C_AND_SPI=m
1106# CONFIG_SND_SOC_ALL_CODECS is not set 1151# CONFIG_SND_SOC_ALL_CODECS is not set
1107CONFIG_SND_SOC_AD1980=m 1152CONFIG_SND_SOC_AD1980=m
1108CONFIG_SND_SOC_SSM2602=m 1153CONFIG_SND_SOC_SSM2602=m
@@ -1110,7 +1155,6 @@ CONFIG_SND_SOC_SSM2602=m
1110CONFIG_AC97_BUS=m 1155CONFIG_AC97_BUS=m
1111CONFIG_HID_SUPPORT=y 1156CONFIG_HID_SUPPORT=y
1112CONFIG_HID=y 1157CONFIG_HID=y
1113# CONFIG_HID_DEBUG is not set
1114# CONFIG_HIDRAW is not set 1158# CONFIG_HIDRAW is not set
1115 1159
1116# 1160#
@@ -1123,30 +1167,35 @@ CONFIG_USB_HID=y
1123# 1167#
1124# Special HID drivers 1168# Special HID drivers
1125# 1169#
1126CONFIG_HID_COMPAT=y
1127CONFIG_HID_A4TECH=y 1170CONFIG_HID_A4TECH=y
1128CONFIG_HID_APPLE=y 1171CONFIG_HID_APPLE=y
1129CONFIG_HID_BELKIN=y 1172CONFIG_HID_BELKIN=y
1130CONFIG_HID_BRIGHT=y
1131CONFIG_HID_CHERRY=y 1173CONFIG_HID_CHERRY=y
1132CONFIG_HID_CHICONY=y 1174CONFIG_HID_CHICONY=y
1133CONFIG_HID_CYPRESS=y 1175CONFIG_HID_CYPRESS=y
1134CONFIG_HID_DELL=y 1176# CONFIG_HID_DRAGONRISE is not set
1135CONFIG_HID_EZKEY=y 1177CONFIG_HID_EZKEY=y
1178# CONFIG_HID_KYE is not set
1136CONFIG_HID_GYRATION=y 1179CONFIG_HID_GYRATION=y
1180# CONFIG_HID_TWINHAN is not set
1181# CONFIG_HID_KENSINGTON is not set
1137CONFIG_HID_LOGITECH=y 1182CONFIG_HID_LOGITECH=y
1138# CONFIG_LOGITECH_FF is not set 1183# CONFIG_LOGITECH_FF is not set
1139# CONFIG_LOGIRUMBLEPAD2_FF is not set 1184# CONFIG_LOGIRUMBLEPAD2_FF is not set
1140CONFIG_HID_MICROSOFT=y 1185CONFIG_HID_MICROSOFT=y
1141CONFIG_HID_MONTEREY=y 1186CONFIG_HID_MONTEREY=y
1187# CONFIG_HID_NTRIG is not set
1142CONFIG_HID_PANTHERLORD=y 1188CONFIG_HID_PANTHERLORD=y
1143# CONFIG_PANTHERLORD_FF is not set 1189# CONFIG_PANTHERLORD_FF is not set
1144CONFIG_HID_PETALYNX=y 1190CONFIG_HID_PETALYNX=y
1145CONFIG_HID_SAMSUNG=y 1191CONFIG_HID_SAMSUNG=y
1146CONFIG_HID_SONY=y 1192CONFIG_HID_SONY=y
1147CONFIG_HID_SUNPLUS=y 1193CONFIG_HID_SUNPLUS=y
1148CONFIG_THRUSTMASTER_FF=m 1194# CONFIG_HID_GREENASIA is not set
1149CONFIG_ZEROPLUS_FF=m 1195# CONFIG_HID_SMARTJOYPLUS is not set
1196# CONFIG_HID_TOPSEED is not set
1197# CONFIG_HID_THRUSTMASTER is not set
1198# CONFIG_HID_ZEROPLUS is not set
1150CONFIG_USB_SUPPORT=y 1199CONFIG_USB_SUPPORT=y
1151CONFIG_USB_ARCH_HAS_HCD=y 1200CONFIG_USB_ARCH_HAS_HCD=y
1152# CONFIG_USB_ARCH_HAS_OHCI is not set 1201# CONFIG_USB_ARCH_HAS_OHCI is not set
@@ -1172,6 +1221,7 @@ CONFIG_USB_MON=y
1172# USB Host Controller Drivers 1221# USB Host Controller Drivers
1173# 1222#
1174# CONFIG_USB_C67X00_HCD is not set 1223# CONFIG_USB_C67X00_HCD is not set
1224# CONFIG_USB_OXU210HP_HCD is not set
1175# CONFIG_USB_ISP116X_HCD is not set 1225# CONFIG_USB_ISP116X_HCD is not set
1176# CONFIG_USB_ISP1760_HCD is not set 1226# CONFIG_USB_ISP1760_HCD is not set
1177# CONFIG_USB_ISP1362_HCD is not set 1227# CONFIG_USB_ISP1362_HCD is not set
@@ -1188,9 +1238,7 @@ CONFIG_USB_MUSB_HOST=y
1188# CONFIG_USB_MUSB_PERIPHERAL is not set 1238# CONFIG_USB_MUSB_PERIPHERAL is not set
1189# CONFIG_USB_MUSB_OTG is not set 1239# CONFIG_USB_MUSB_OTG is not set
1190CONFIG_USB_MUSB_HDRC_HCD=y 1240CONFIG_USB_MUSB_HDRC_HCD=y
1191# CONFIG_MUSB_PIO_ONLY is not set 1241CONFIG_MUSB_PIO_ONLY=y
1192CONFIG_USB_INVENTRA_DMA=y
1193# CONFIG_USB_TI_CPPI_DMA is not set
1194# CONFIG_USB_MUSB_DEBUG is not set 1242# CONFIG_USB_MUSB_DEBUG is not set
1195 1243
1196# 1244#
@@ -1202,18 +1250,17 @@ CONFIG_USB_INVENTRA_DMA=y
1202# CONFIG_USB_TMC is not set 1250# CONFIG_USB_TMC is not set
1203 1251
1204# 1252#
1205# NOTE: USB_STORAGE depends on SCSI but BLK_DEV_SD may also be needed; 1253# NOTE: USB_STORAGE depends on SCSI but BLK_DEV_SD may
1206# 1254#
1207 1255
1208# 1256#
1209# see USB_STORAGE Help for more information 1257# also be needed; see USB_STORAGE Help for more info
1210# 1258#
1211CONFIG_USB_STORAGE=y 1259CONFIG_USB_STORAGE=y
1212# CONFIG_USB_STORAGE_DEBUG is not set 1260# CONFIG_USB_STORAGE_DEBUG is not set
1213# CONFIG_USB_STORAGE_DATAFAB is not set 1261# CONFIG_USB_STORAGE_DATAFAB is not set
1214# CONFIG_USB_STORAGE_FREECOM is not set 1262# CONFIG_USB_STORAGE_FREECOM is not set
1215# CONFIG_USB_STORAGE_ISD200 is not set 1263# CONFIG_USB_STORAGE_ISD200 is not set
1216# CONFIG_USB_STORAGE_DPCM is not set
1217# CONFIG_USB_STORAGE_USBAT is not set 1264# CONFIG_USB_STORAGE_USBAT is not set
1218# CONFIG_USB_STORAGE_SDDR09 is not set 1265# CONFIG_USB_STORAGE_SDDR09 is not set
1219# CONFIG_USB_STORAGE_SDDR55 is not set 1266# CONFIG_USB_STORAGE_SDDR55 is not set
@@ -1249,7 +1296,6 @@ CONFIG_USB_STORAGE=y
1249# CONFIG_USB_LED is not set 1296# CONFIG_USB_LED is not set
1250# CONFIG_USB_CYPRESS_CY7C63 is not set 1297# CONFIG_USB_CYPRESS_CY7C63 is not set
1251# CONFIG_USB_CYTHERM is not set 1298# CONFIG_USB_CYTHERM is not set
1252# CONFIG_USB_PHIDGET is not set
1253# CONFIG_USB_IDMOUSE is not set 1299# CONFIG_USB_IDMOUSE is not set
1254# CONFIG_USB_FTDI_ELAN is not set 1300# CONFIG_USB_FTDI_ELAN is not set
1255# CONFIG_USB_APPLEDISPLAY is not set 1301# CONFIG_USB_APPLEDISPLAY is not set
@@ -1261,6 +1307,13 @@ CONFIG_USB_STORAGE=y
1261# CONFIG_USB_ISIGHTFW is not set 1307# CONFIG_USB_ISIGHTFW is not set
1262# CONFIG_USB_VST is not set 1308# CONFIG_USB_VST is not set
1263# CONFIG_USB_GADGET is not set 1309# CONFIG_USB_GADGET is not set
1310
1311#
1312# OTG and related infrastructure
1313#
1314CONFIG_USB_OTG_UTILS=y
1315# CONFIG_USB_GPIO_VBUS is not set
1316CONFIG_NOP_USB_XCEIV=y
1264# CONFIG_MMC is not set 1317# CONFIG_MMC is not set
1265# CONFIG_MEMSTICK is not set 1318# CONFIG_MEMSTICK is not set
1266# CONFIG_NEW_LEDS is not set 1319# CONFIG_NEW_LEDS is not set
@@ -1296,6 +1349,7 @@ CONFIG_RTC_INTF_DEV=y
1296# CONFIG_RTC_DRV_S35390A is not set 1349# CONFIG_RTC_DRV_S35390A is not set
1297# CONFIG_RTC_DRV_FM3130 is not set 1350# CONFIG_RTC_DRV_FM3130 is not set
1298# CONFIG_RTC_DRV_RX8581 is not set 1351# CONFIG_RTC_DRV_RX8581 is not set
1352# CONFIG_RTC_DRV_RX8025 is not set
1299 1353
1300# 1354#
1301# SPI RTC drivers 1355# SPI RTC drivers
@@ -1307,6 +1361,7 @@ CONFIG_RTC_INTF_DEV=y
1307# CONFIG_RTC_DRV_R9701 is not set 1361# CONFIG_RTC_DRV_R9701 is not set
1308# CONFIG_RTC_DRV_RS5C348 is not set 1362# CONFIG_RTC_DRV_RS5C348 is not set
1309# CONFIG_RTC_DRV_DS3234 is not set 1363# CONFIG_RTC_DRV_DS3234 is not set
1364# CONFIG_RTC_DRV_PCF2123 is not set
1310 1365
1311# 1366#
1312# Platform RTC drivers 1367# Platform RTC drivers
@@ -1327,10 +1382,21 @@ CONFIG_RTC_INTF_DEV=y
1327# 1382#
1328CONFIG_RTC_DRV_BFIN=y 1383CONFIG_RTC_DRV_BFIN=y
1329# CONFIG_DMADEVICES is not set 1384# CONFIG_DMADEVICES is not set
1385# CONFIG_AUXDISPLAY is not set
1330# CONFIG_UIO is not set 1386# CONFIG_UIO is not set
1387
1388#
1389# TI VLYNQ
1390#
1331# CONFIG_STAGING is not set 1391# CONFIG_STAGING is not set
1332 1392
1333# 1393#
1394# Firmware Drivers
1395#
1396# CONFIG_FIRMWARE_MEMMAP is not set
1397# CONFIG_SIGMA is not set
1398
1399#
1334# File systems 1400# File systems
1335# 1401#
1336CONFIG_EXT2_FS=m 1402CONFIG_EXT2_FS=m
@@ -1340,9 +1406,13 @@ CONFIG_EXT2_FS=m
1340# CONFIG_REISERFS_FS is not set 1406# CONFIG_REISERFS_FS is not set
1341# CONFIG_JFS_FS is not set 1407# CONFIG_JFS_FS is not set
1342# CONFIG_FS_POSIX_ACL is not set 1408# CONFIG_FS_POSIX_ACL is not set
1343CONFIG_FILE_LOCKING=y
1344# CONFIG_XFS_FS is not set 1409# CONFIG_XFS_FS is not set
1410# CONFIG_GFS2_FS is not set
1345# CONFIG_OCFS2_FS is not set 1411# CONFIG_OCFS2_FS is not set
1412# CONFIG_BTRFS_FS is not set
1413# CONFIG_NILFS2_FS is not set
1414CONFIG_FILE_LOCKING=y
1415CONFIG_FSNOTIFY=y
1346# CONFIG_DNOTIFY is not set 1416# CONFIG_DNOTIFY is not set
1347CONFIG_INOTIFY=y 1417CONFIG_INOTIFY=y
1348CONFIG_INOTIFY_USER=y 1418CONFIG_INOTIFY_USER=y
@@ -1352,6 +1422,11 @@ CONFIG_INOTIFY_USER=y
1352# CONFIG_FUSE_FS is not set 1422# CONFIG_FUSE_FS is not set
1353 1423
1354# 1424#
1425# Caches
1426#
1427# CONFIG_FSCACHE is not set
1428
1429#
1355# CD-ROM/DVD Filesystems 1430# CD-ROM/DVD Filesystems
1356# 1431#
1357CONFIG_ISO9660_FS=m 1432CONFIG_ISO9660_FS=m
@@ -1376,13 +1451,9 @@ CONFIG_FAT_DEFAULT_IOCHARSET="iso8859-1"
1376CONFIG_PROC_FS=y 1451CONFIG_PROC_FS=y
1377CONFIG_PROC_SYSCTL=y 1452CONFIG_PROC_SYSCTL=y
1378CONFIG_SYSFS=y 1453CONFIG_SYSFS=y
1379# CONFIG_TMPFS is not set
1380# CONFIG_HUGETLB_PAGE is not set 1454# CONFIG_HUGETLB_PAGE is not set
1381# CONFIG_CONFIGFS_FS is not set 1455# CONFIG_CONFIGFS_FS is not set
1382 1456CONFIG_MISC_FILESYSTEMS=y
1383#
1384# Miscellaneous filesystems
1385#
1386# CONFIG_ADFS_FS is not set 1457# CONFIG_ADFS_FS is not set
1387# CONFIG_AFFS_FS is not set 1458# CONFIG_AFFS_FS is not set
1388# CONFIG_HFS_FS is not set 1459# CONFIG_HFS_FS is not set
@@ -1401,17 +1472,8 @@ CONFIG_JFFS2_ZLIB=y
1401# CONFIG_JFFS2_LZO is not set 1472# CONFIG_JFFS2_LZO is not set
1402CONFIG_JFFS2_RTIME=y 1473CONFIG_JFFS2_RTIME=y
1403# CONFIG_JFFS2_RUBIN is not set 1474# CONFIG_JFFS2_RUBIN is not set
1404CONFIG_YAFFS_FS=m
1405CONFIG_YAFFS_YAFFS1=y
1406# CONFIG_YAFFS_9BYTE_TAGS is not set
1407# CONFIG_YAFFS_DOES_ECC is not set
1408CONFIG_YAFFS_YAFFS2=y
1409CONFIG_YAFFS_AUTO_YAFFS2=y
1410# CONFIG_YAFFS_DISABLE_LAZY_LOAD is not set
1411# CONFIG_YAFFS_DISABLE_WIDE_TNODES is not set
1412# CONFIG_YAFFS_ALWAYS_CHECK_CHUNK_ERASED is not set
1413CONFIG_YAFFS_SHORT_NAMES_IN_RAM=y
1414# CONFIG_CRAMFS is not set 1475# CONFIG_CRAMFS is not set
1476# CONFIG_SQUASHFS is not set
1415# CONFIG_VXFS_FS is not set 1477# CONFIG_VXFS_FS is not set
1416# CONFIG_MINIX_FS is not set 1478# CONFIG_MINIX_FS is not set
1417# CONFIG_OMFS_FS is not set 1479# CONFIG_OMFS_FS is not set
@@ -1430,7 +1492,6 @@ CONFIG_LOCKD=m
1430CONFIG_LOCKD_V4=y 1492CONFIG_LOCKD_V4=y
1431CONFIG_NFS_COMMON=y 1493CONFIG_NFS_COMMON=y
1432CONFIG_SUNRPC=m 1494CONFIG_SUNRPC=m
1433# CONFIG_SUNRPC_REGISTER_V4 is not set
1434# CONFIG_RPCSEC_GSS_KRB5 is not set 1495# CONFIG_RPCSEC_GSS_KRB5 is not set
1435# CONFIG_RPCSEC_GSS_SPKM3 is not set 1496# CONFIG_RPCSEC_GSS_SPKM3 is not set
1436CONFIG_SMB_FS=m 1497CONFIG_SMB_FS=m
@@ -1445,7 +1506,7 @@ CONFIG_SMB_FS=m
1445# 1506#
1446# CONFIG_PARTITION_ADVANCED is not set 1507# CONFIG_PARTITION_ADVANCED is not set
1447CONFIG_MSDOS_PARTITION=y 1508CONFIG_MSDOS_PARTITION=y
1448CONFIG_NLS=m 1509CONFIG_NLS=y
1449CONFIG_NLS_DEFAULT="iso8859-1" 1510CONFIG_NLS_DEFAULT="iso8859-1"
1450CONFIG_NLS_CODEPAGE_437=m 1511CONFIG_NLS_CODEPAGE_437=m
1451# CONFIG_NLS_CODEPAGE_737 is not set 1512# CONFIG_NLS_CODEPAGE_737 is not set
@@ -1495,14 +1556,19 @@ CONFIG_ENABLE_WARN_DEPRECATED=y
1495CONFIG_ENABLE_MUST_CHECK=y 1556CONFIG_ENABLE_MUST_CHECK=y
1496CONFIG_FRAME_WARN=1024 1557CONFIG_FRAME_WARN=1024
1497# CONFIG_MAGIC_SYSRQ is not set 1558# CONFIG_MAGIC_SYSRQ is not set
1559# CONFIG_STRIP_ASM_SYMS is not set
1498# CONFIG_UNUSED_SYMBOLS is not set 1560# CONFIG_UNUSED_SYMBOLS is not set
1499CONFIG_DEBUG_FS=y 1561CONFIG_DEBUG_FS=y
1500# CONFIG_HEADERS_CHECK is not set 1562# CONFIG_HEADERS_CHECK is not set
1563CONFIG_DEBUG_SECTION_MISMATCH=y
1501CONFIG_DEBUG_KERNEL=y 1564CONFIG_DEBUG_KERNEL=y
1502CONFIG_DEBUG_SHIRQ=y 1565CONFIG_DEBUG_SHIRQ=y
1503CONFIG_DETECT_SOFTLOCKUP=y 1566CONFIG_DETECT_SOFTLOCKUP=y
1504# CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC is not set 1567# CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC is not set
1505CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC_VALUE=0 1568CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC_VALUE=0
1569CONFIG_DETECT_HUNG_TASK=y
1570# CONFIG_BOOTPARAM_HUNG_TASK_PANIC is not set
1571CONFIG_BOOTPARAM_HUNG_TASK_PANIC_VALUE=0
1506CONFIG_SCHED_DEBUG=y 1572CONFIG_SCHED_DEBUG=y
1507# CONFIG_SCHEDSTATS is not set 1573# CONFIG_SCHEDSTATS is not set
1508# CONFIG_TIMER_STATS is not set 1574# CONFIG_TIMER_STATS is not set
@@ -1510,31 +1576,39 @@ CONFIG_SCHED_DEBUG=y
1510# CONFIG_DEBUG_SLAB is not set 1576# CONFIG_DEBUG_SLAB is not set
1511# CONFIG_DEBUG_SPINLOCK is not set 1577# CONFIG_DEBUG_SPINLOCK is not set
1512# CONFIG_DEBUG_MUTEXES is not set 1578# CONFIG_DEBUG_MUTEXES is not set
1579# CONFIG_DEBUG_LOCK_ALLOC is not set
1580# CONFIG_PROVE_LOCKING is not set
1581# CONFIG_LOCK_STAT is not set
1513# CONFIG_DEBUG_SPINLOCK_SLEEP is not set 1582# CONFIG_DEBUG_SPINLOCK_SLEEP is not set
1514# CONFIG_DEBUG_LOCKING_API_SELFTESTS is not set 1583# CONFIG_DEBUG_LOCKING_API_SELFTESTS is not set
1515# CONFIG_DEBUG_KOBJECT is not set 1584# CONFIG_DEBUG_KOBJECT is not set
1516CONFIG_DEBUG_BUGVERBOSE=y 1585CONFIG_DEBUG_BUGVERBOSE=y
1517CONFIG_DEBUG_INFO=y 1586CONFIG_DEBUG_INFO=y
1518# CONFIG_DEBUG_VM is not set 1587# CONFIG_DEBUG_VM is not set
1588# CONFIG_DEBUG_NOMMU_REGIONS is not set
1519# CONFIG_DEBUG_WRITECOUNT is not set 1589# CONFIG_DEBUG_WRITECOUNT is not set
1520# CONFIG_DEBUG_MEMORY_INIT is not set 1590# CONFIG_DEBUG_MEMORY_INIT is not set
1521# CONFIG_DEBUG_LIST is not set 1591# CONFIG_DEBUG_LIST is not set
1522# CONFIG_DEBUG_SG is not set 1592# CONFIG_DEBUG_SG is not set
1593# CONFIG_DEBUG_NOTIFIERS is not set
1594# CONFIG_DEBUG_CREDENTIALS is not set
1523# CONFIG_FRAME_POINTER is not set 1595# CONFIG_FRAME_POINTER is not set
1524# CONFIG_BOOT_PRINTK_DELAY is not set 1596# CONFIG_BOOT_PRINTK_DELAY is not set
1525# CONFIG_RCU_TORTURE_TEST is not set 1597# CONFIG_RCU_TORTURE_TEST is not set
1526# CONFIG_RCU_CPU_STALL_DETECTOR is not set 1598# CONFIG_RCU_CPU_STALL_DETECTOR is not set
1527# CONFIG_BACKTRACE_SELF_TEST is not set 1599# CONFIG_BACKTRACE_SELF_TEST is not set
1528# CONFIG_DEBUG_BLOCK_EXT_DEVT is not set 1600# CONFIG_DEBUG_BLOCK_EXT_DEVT is not set
1601# CONFIG_DEBUG_FORCE_WEAK_PER_CPU is not set
1529# CONFIG_FAULT_INJECTION is not set 1602# CONFIG_FAULT_INJECTION is not set
1530 1603# CONFIG_PAGE_POISONING is not set
1531# 1604CONFIG_HAVE_FUNCTION_TRACER=y
1532# Tracers 1605CONFIG_HAVE_FUNCTION_GRAPH_TRACER=y
1533# 1606CONFIG_TRACING_SUPPORT=y
1534# CONFIG_SCHED_TRACER is not set 1607# CONFIG_FTRACE is not set
1535# CONFIG_CONTEXT_SWITCH_TRACER is not set 1608# CONFIG_BRANCH_PROFILE_NONE is not set
1536# CONFIG_BOOT_TRACER is not set 1609# CONFIG_PROFILE_ANNOTATED_BRANCHES is not set
1537# CONFIG_DYNAMIC_PRINTK_DEBUG is not set 1610# CONFIG_PROFILE_ALL_BRANCHES is not set
1611# CONFIG_DYNAMIC_DEBUG is not set
1538# CONFIG_SAMPLES is not set 1612# CONFIG_SAMPLES is not set
1539CONFIG_HAVE_ARCH_KGDB=y 1613CONFIG_HAVE_ARCH_KGDB=y
1540# CONFIG_KGDB is not set 1614# CONFIG_KGDB is not set
@@ -1559,6 +1633,7 @@ CONFIG_DEBUG_BFIN_NO_KERN_HWTRACE=y
1559CONFIG_EARLY_PRINTK=y 1633CONFIG_EARLY_PRINTK=y
1560CONFIG_CPLB_INFO=y 1634CONFIG_CPLB_INFO=y
1561CONFIG_ACCESS_CHECK=y 1635CONFIG_ACCESS_CHECK=y
1636# CONFIG_BFIN_ISRAM_SELF_TEST is not set
1562 1637
1563# 1638#
1564# Security options 1639# Security options
@@ -1567,15 +1642,15 @@ CONFIG_ACCESS_CHECK=y
1567CONFIG_SECURITY=y 1642CONFIG_SECURITY=y
1568# CONFIG_SECURITYFS is not set 1643# CONFIG_SECURITYFS is not set
1569# CONFIG_SECURITY_NETWORK is not set 1644# CONFIG_SECURITY_NETWORK is not set
1645# CONFIG_SECURITY_PATH is not set
1570# CONFIG_SECURITY_FILE_CAPABILITIES is not set 1646# CONFIG_SECURITY_FILE_CAPABILITIES is not set
1571# CONFIG_SECURITY_ROOTPLUG is not set 1647# CONFIG_SECURITY_ROOTPLUG is not set
1572CONFIG_SECURITY_DEFAULT_MMAP_MIN_ADDR=0 1648# CONFIG_SECURITY_TOMOYO is not set
1573CONFIG_CRYPTO=y 1649CONFIG_CRYPTO=y
1574 1650
1575# 1651#
1576# Crypto core or helper 1652# Crypto core or helper
1577# 1653#
1578# CONFIG_CRYPTO_FIPS is not set
1579# CONFIG_CRYPTO_MANAGER is not set 1654# CONFIG_CRYPTO_MANAGER is not set
1580# CONFIG_CRYPTO_MANAGER2 is not set 1655# CONFIG_CRYPTO_MANAGER2 is not set
1581# CONFIG_CRYPTO_GF128MUL is not set 1656# CONFIG_CRYPTO_GF128MUL is not set
@@ -1607,11 +1682,13 @@ CONFIG_CRYPTO=y
1607# 1682#
1608# CONFIG_CRYPTO_HMAC is not set 1683# CONFIG_CRYPTO_HMAC is not set
1609# CONFIG_CRYPTO_XCBC is not set 1684# CONFIG_CRYPTO_XCBC is not set
1685# CONFIG_CRYPTO_VMAC is not set
1610 1686
1611# 1687#
1612# Digest 1688# Digest
1613# 1689#
1614# CONFIG_CRYPTO_CRC32C is not set 1690# CONFIG_CRYPTO_CRC32C is not set
1691# CONFIG_CRYPTO_GHASH is not set
1615# CONFIG_CRYPTO_MD4 is not set 1692# CONFIG_CRYPTO_MD4 is not set
1616# CONFIG_CRYPTO_MD5 is not set 1693# CONFIG_CRYPTO_MD5 is not set
1617# CONFIG_CRYPTO_MICHAEL_MIC is not set 1694# CONFIG_CRYPTO_MICHAEL_MIC is not set
@@ -1648,6 +1725,7 @@ CONFIG_CRYPTO=y
1648# Compression 1725# Compression
1649# 1726#
1650# CONFIG_CRYPTO_DEFLATE is not set 1727# CONFIG_CRYPTO_DEFLATE is not set
1728# CONFIG_CRYPTO_ZLIB is not set
1651# CONFIG_CRYPTO_LZO is not set 1729# CONFIG_CRYPTO_LZO is not set
1652 1730
1653# 1731#
@@ -1655,11 +1733,13 @@ CONFIG_CRYPTO=y
1655# 1733#
1656# CONFIG_CRYPTO_ANSI_CPRNG is not set 1734# CONFIG_CRYPTO_ANSI_CPRNG is not set
1657CONFIG_CRYPTO_HW=y 1735CONFIG_CRYPTO_HW=y
1736# CONFIG_BINARY_PRINTF is not set
1658 1737
1659# 1738#
1660# Library routines 1739# Library routines
1661# 1740#
1662CONFIG_BITREVERSE=y 1741CONFIG_BITREVERSE=y
1742CONFIG_GENERIC_FIND_LAST_BIT=y
1663CONFIG_CRC_CCITT=m 1743CONFIG_CRC_CCITT=m
1664# CONFIG_CRC16 is not set 1744# CONFIG_CRC16 is not set
1665# CONFIG_CRC_T10DIF is not set 1745# CONFIG_CRC_T10DIF is not set
@@ -1669,6 +1749,8 @@ CONFIG_CRC32=y
1669# CONFIG_LIBCRC32C is not set 1749# CONFIG_LIBCRC32C is not set
1670CONFIG_ZLIB_INFLATE=y 1750CONFIG_ZLIB_INFLATE=y
1671CONFIG_ZLIB_DEFLATE=m 1751CONFIG_ZLIB_DEFLATE=m
1752CONFIG_DECOMPRESS_GZIP=y
1672CONFIG_HAS_IOMEM=y 1753CONFIG_HAS_IOMEM=y
1673CONFIG_HAS_IOPORT=y 1754CONFIG_HAS_IOPORT=y
1674CONFIG_HAS_DMA=y 1755CONFIG_HAS_DMA=y
1756CONFIG_NLATTR=y
diff --git a/arch/blackfin/configs/BF533-EZKIT_defconfig b/arch/blackfin/configs/BF533-EZKIT_defconfig
index 50f9a23ccdbd..0b13d5836a48 100644
--- a/arch/blackfin/configs/BF533-EZKIT_defconfig
+++ b/arch/blackfin/configs/BF533-EZKIT_defconfig
@@ -1,22 +1,27 @@
1# 1#
2# Automatically generated make config: don't edit 2# Automatically generated make config: don't edit
3# Linux kernel version: 2.6.28.10 3# Linux kernel version: 2.6.32.2
4# Thu May 21 05:50:01 2009
5# 4#
6# CONFIG_MMU is not set 5# CONFIG_MMU is not set
7# CONFIG_FPU is not set 6# CONFIG_FPU is not set
8CONFIG_RWSEM_GENERIC_SPINLOCK=y 7CONFIG_RWSEM_GENERIC_SPINLOCK=y
9# CONFIG_RWSEM_XCHGADD_ALGORITHM is not set 8# CONFIG_RWSEM_XCHGADD_ALGORITHM is not set
10CONFIG_BLACKFIN=y 9CONFIG_BLACKFIN=y
10CONFIG_GENERIC_CSUM=y
11CONFIG_GENERIC_BUG=y
11CONFIG_ZONE_DMA=y 12CONFIG_ZONE_DMA=y
12CONFIG_GENERIC_FIND_NEXT_BIT=y 13CONFIG_GENERIC_FIND_NEXT_BIT=y
13CONFIG_GENERIC_HWEIGHT=y
14CONFIG_GENERIC_HARDIRQS=y 14CONFIG_GENERIC_HARDIRQS=y
15CONFIG_GENERIC_IRQ_PROBE=y 15CONFIG_GENERIC_IRQ_PROBE=y
16CONFIG_GENERIC_HARDIRQS_NO__DO_IRQ=y
16CONFIG_GENERIC_GPIO=y 17CONFIG_GENERIC_GPIO=y
17CONFIG_FORCE_MAX_ZONEORDER=14 18CONFIG_FORCE_MAX_ZONEORDER=14
18CONFIG_GENERIC_CALIBRATE_DELAY=y 19CONFIG_GENERIC_CALIBRATE_DELAY=y
20CONFIG_LOCKDEP_SUPPORT=y
21CONFIG_STACKTRACE_SUPPORT=y
22CONFIG_TRACE_IRQFLAGS_SUPPORT=y
19CONFIG_DEFCONFIG_LIST="/lib/modules/$UNAME_RELEASE/.config" 23CONFIG_DEFCONFIG_LIST="/lib/modules/$UNAME_RELEASE/.config"
24CONFIG_CONSTRUCTORS=y
20 25
21# 26#
22# General setup 27# General setup
@@ -26,22 +31,41 @@ CONFIG_BROKEN_ON_SMP=y
26CONFIG_INIT_ENV_ARG_LIMIT=32 31CONFIG_INIT_ENV_ARG_LIMIT=32
27CONFIG_LOCALVERSION="" 32CONFIG_LOCALVERSION=""
28CONFIG_LOCALVERSION_AUTO=y 33CONFIG_LOCALVERSION_AUTO=y
34CONFIG_HAVE_KERNEL_GZIP=y
35CONFIG_HAVE_KERNEL_BZIP2=y
36CONFIG_HAVE_KERNEL_LZMA=y
37CONFIG_KERNEL_GZIP=y
38# CONFIG_KERNEL_BZIP2 is not set
39# CONFIG_KERNEL_LZMA is not set
29CONFIG_SYSVIPC=y 40CONFIG_SYSVIPC=y
30CONFIG_SYSVIPC_SYSCTL=y 41CONFIG_SYSVIPC_SYSCTL=y
31# CONFIG_POSIX_MQUEUE is not set 42# CONFIG_POSIX_MQUEUE is not set
32# CONFIG_BSD_PROCESS_ACCT is not set 43# CONFIG_BSD_PROCESS_ACCT is not set
33# CONFIG_TASKSTATS is not set 44# CONFIG_TASKSTATS is not set
34# CONFIG_AUDIT is not set 45# CONFIG_AUDIT is not set
46
47#
48# RCU Subsystem
49#
50CONFIG_TREE_RCU=y
51# CONFIG_TREE_PREEMPT_RCU is not set
52# CONFIG_RCU_TRACE is not set
53CONFIG_RCU_FANOUT=32
54# CONFIG_RCU_FANOUT_EXACT is not set
55# CONFIG_TREE_RCU_TRACE is not set
35CONFIG_IKCONFIG=y 56CONFIG_IKCONFIG=y
36CONFIG_IKCONFIG_PROC=y 57CONFIG_IKCONFIG_PROC=y
37CONFIG_LOG_BUF_SHIFT=14 58CONFIG_LOG_BUF_SHIFT=14
38# CONFIG_CGROUPS is not set
39# CONFIG_GROUP_SCHED is not set 59# CONFIG_GROUP_SCHED is not set
60# CONFIG_CGROUPS is not set
40# CONFIG_SYSFS_DEPRECATED_V2 is not set 61# CONFIG_SYSFS_DEPRECATED_V2 is not set
41# CONFIG_RELAY is not set 62# CONFIG_RELAY is not set
42# CONFIG_NAMESPACES is not set 63# CONFIG_NAMESPACES is not set
43CONFIG_BLK_DEV_INITRD=y 64CONFIG_BLK_DEV_INITRD=y
44CONFIG_INITRAMFS_SOURCE="" 65CONFIG_INITRAMFS_SOURCE=""
66CONFIG_RD_GZIP=y
67# CONFIG_RD_BZIP2 is not set
68# CONFIG_RD_LZMA is not set
45# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set 69# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
46CONFIG_SYSCTL=y 70CONFIG_SYSCTL=y
47CONFIG_ANON_INODES=y 71CONFIG_ANON_INODES=y
@@ -62,6 +86,10 @@ CONFIG_EPOLL=y
62# CONFIG_TIMERFD is not set 86# CONFIG_TIMERFD is not set
63# CONFIG_EVENTFD is not set 87# CONFIG_EVENTFD is not set
64# CONFIG_AIO is not set 88# CONFIG_AIO is not set
89
90#
91# Kernel Performance Events And Counters
92#
65CONFIG_VM_EVENT_COUNTERS=y 93CONFIG_VM_EVENT_COUNTERS=y
66CONFIG_COMPAT_BRK=y 94CONFIG_COMPAT_BRK=y
67CONFIG_SLAB=y 95CONFIG_SLAB=y
@@ -69,11 +97,15 @@ CONFIG_SLAB=y
69# CONFIG_SLOB is not set 97# CONFIG_SLOB is not set
70CONFIG_MMAP_ALLOW_UNINITIALIZED=y 98CONFIG_MMAP_ALLOW_UNINITIALIZED=y
71# CONFIG_PROFILING is not set 99# CONFIG_PROFILING is not set
72# CONFIG_MARKERS is not set
73CONFIG_HAVE_OPROFILE=y 100CONFIG_HAVE_OPROFILE=y
101
102#
103# GCOV-based kernel profiling
104#
105# CONFIG_GCOV_KERNEL is not set
106# CONFIG_SLOW_WORK is not set
74# CONFIG_HAVE_GENERIC_DMA_COHERENT is not set 107# CONFIG_HAVE_GENERIC_DMA_COHERENT is not set
75CONFIG_SLABINFO=y 108CONFIG_SLABINFO=y
76CONFIG_TINY_SHMEM=y
77CONFIG_BASE_SMALL=0 109CONFIG_BASE_SMALL=0
78CONFIG_MODULES=y 110CONFIG_MODULES=y
79# CONFIG_MODULE_FORCE_LOAD is not set 111# CONFIG_MODULE_FORCE_LOAD is not set
@@ -81,11 +113,8 @@ CONFIG_MODULE_UNLOAD=y
81# CONFIG_MODULE_FORCE_UNLOAD is not set 113# CONFIG_MODULE_FORCE_UNLOAD is not set
82# CONFIG_MODVERSIONS is not set 114# CONFIG_MODVERSIONS is not set
83# CONFIG_MODULE_SRCVERSION_ALL is not set 115# CONFIG_MODULE_SRCVERSION_ALL is not set
84CONFIG_KMOD=y
85CONFIG_BLOCK=y 116CONFIG_BLOCK=y
86# CONFIG_LBD is not set 117# CONFIG_LBDAF is not set
87# CONFIG_BLK_DEV_IO_TRACE is not set
88# CONFIG_LSF is not set
89# CONFIG_BLK_DEV_BSG is not set 118# CONFIG_BLK_DEV_BSG is not set
90# CONFIG_BLK_DEV_INTEGRITY is not set 119# CONFIG_BLK_DEV_INTEGRITY is not set
91 120
@@ -101,7 +130,6 @@ CONFIG_DEFAULT_AS=y
101# CONFIG_DEFAULT_CFQ is not set 130# CONFIG_DEFAULT_CFQ is not set
102# CONFIG_DEFAULT_NOOP is not set 131# CONFIG_DEFAULT_NOOP is not set
103CONFIG_DEFAULT_IOSCHED="anticipatory" 132CONFIG_DEFAULT_IOSCHED="anticipatory"
104CONFIG_CLASSIC_RCU=y
105# CONFIG_PREEMPT_NONE is not set 133# CONFIG_PREEMPT_NONE is not set
106CONFIG_PREEMPT_VOLUNTARY=y 134CONFIG_PREEMPT_VOLUNTARY=y
107# CONFIG_PREEMPT is not set 135# CONFIG_PREEMPT is not set
@@ -132,15 +160,15 @@ CONFIG_BF533=y
132# CONFIG_BF537 is not set 160# CONFIG_BF537 is not set
133# CONFIG_BF538 is not set 161# CONFIG_BF538 is not set
134# CONFIG_BF539 is not set 162# CONFIG_BF539 is not set
135# CONFIG_BF542 is not set 163# CONFIG_BF542_std is not set
136# CONFIG_BF542M is not set 164# CONFIG_BF542M is not set
137# CONFIG_BF544 is not set 165# CONFIG_BF544_std is not set
138# CONFIG_BF544M is not set 166# CONFIG_BF544M is not set
139# CONFIG_BF547 is not set 167# CONFIG_BF547_std is not set
140# CONFIG_BF547M is not set 168# CONFIG_BF547M is not set
141# CONFIG_BF548 is not set 169# CONFIG_BF548_std is not set
142# CONFIG_BF548M is not set 170# CONFIG_BF548M is not set
143# CONFIG_BF549 is not set 171# CONFIG_BF549_std is not set
144# CONFIG_BF549M is not set 172# CONFIG_BF549M is not set
145# CONFIG_BF561 is not set 173# CONFIG_BF561 is not set
146CONFIG_BF_REV_MIN=3 174CONFIG_BF_REV_MIN=3
@@ -228,7 +256,7 @@ CONFIG_GENERIC_TIME=y
228CONFIG_GENERIC_CLOCKEVENTS=y 256CONFIG_GENERIC_CLOCKEVENTS=y
229# CONFIG_TICKSOURCE_GPTMR0 is not set 257# CONFIG_TICKSOURCE_GPTMR0 is not set
230CONFIG_TICKSOURCE_CORETMR=y 258CONFIG_TICKSOURCE_CORETMR=y
231# CONFIG_CYCLES_CLOCKSOURCE is not set 259CONFIG_CYCLES_CLOCKSOURCE=y
232# CONFIG_GPTMR0_CLOCKSOURCE is not set 260# CONFIG_GPTMR0_CLOCKSOURCE is not set
233CONFIG_TICK_ONESHOT=y 261CONFIG_TICK_ONESHOT=y
234# CONFIG_NO_HZ is not set 262# CONFIG_NO_HZ is not set
@@ -280,7 +308,6 @@ CONFIG_FLATMEM=y
280CONFIG_FLAT_NODE_MEM_MAP=y 308CONFIG_FLAT_NODE_MEM_MAP=y
281CONFIG_PAGEFLAGS_EXTENDED=y 309CONFIG_PAGEFLAGS_EXTENDED=y
282CONFIG_SPLIT_PTLOCK_CPUS=4 310CONFIG_SPLIT_PTLOCK_CPUS=4
283# CONFIG_RESOURCES_64BIT is not set
284# CONFIG_PHYS_ADDR_T_64BIT is not set 311# CONFIG_PHYS_ADDR_T_64BIT is not set
285CONFIG_ZONE_DMA_FLAG=1 312CONFIG_ZONE_DMA_FLAG=1
286CONFIG_VIRT_TO_BUS=y 313CONFIG_VIRT_TO_BUS=y
@@ -289,19 +316,18 @@ CONFIG_BFIN_GPTIMERS=m
289# CONFIG_DMA_UNCACHED_4M is not set 316# CONFIG_DMA_UNCACHED_4M is not set
290# CONFIG_DMA_UNCACHED_2M is not set 317# CONFIG_DMA_UNCACHED_2M is not set
291CONFIG_DMA_UNCACHED_1M=y 318CONFIG_DMA_UNCACHED_1M=y
319# CONFIG_DMA_UNCACHED_512K is not set
320# CONFIG_DMA_UNCACHED_256K is not set
321# CONFIG_DMA_UNCACHED_128K is not set
292# CONFIG_DMA_UNCACHED_NONE is not set 322# CONFIG_DMA_UNCACHED_NONE is not set
293 323
294# 324#
295# Cache Support 325# Cache Support
296# 326#
297#
298# Cache Support
299#
300CONFIG_BFIN_ICACHE=y 327CONFIG_BFIN_ICACHE=y
301# CONFIG_BFIN_ICACHE_LOCK is not set 328CONFIG_BFIN_EXTMEM_ICACHEABLE=y
302CONFIG_BFIN_DCACHE=y 329CONFIG_BFIN_DCACHE=y
303# CONFIG_BFIN_DCACHE_BANKA is not set 330# CONFIG_BFIN_DCACHE_BANKA is not set
304CONFIG_BFIN_EXTMEM_ICACHEABLE=y
305CONFIG_BFIN_EXTMEM_DCACHEABLE=y 331CONFIG_BFIN_EXTMEM_DCACHEABLE=y
306CONFIG_BFIN_EXTMEM_WRITEBACK=y 332CONFIG_BFIN_EXTMEM_WRITEBACK=y
307# CONFIG_BFIN_EXTMEM_WRITETHROUGH is not set 333# CONFIG_BFIN_EXTMEM_WRITETHROUGH is not set
@@ -312,7 +338,7 @@ CONFIG_BFIN_EXTMEM_WRITEBACK=y
312# CONFIG_MPU is not set 338# CONFIG_MPU is not set
313 339
314# 340#
315# Asynchonous Memory Configuration 341# Asynchronous Memory Configuration
316# 342#
317 343
318# 344#
@@ -358,6 +384,7 @@ CONFIG_PM=y
358CONFIG_PM_SLEEP=y 384CONFIG_PM_SLEEP=y
359CONFIG_SUSPEND=y 385CONFIG_SUSPEND=y
360CONFIG_SUSPEND_FREEZER=y 386CONFIG_SUSPEND_FREEZER=y
387# CONFIG_PM_RUNTIME is not set
361CONFIG_ARCH_SUSPEND_POSSIBLE=y 388CONFIG_ARCH_SUSPEND_POSSIBLE=y
362CONFIG_PM_BFIN_SLEEP_DEEPER=y 389CONFIG_PM_BFIN_SLEEP_DEEPER=y
363# CONFIG_PM_BFIN_SLEEP is not set 390# CONFIG_PM_BFIN_SLEEP is not set
@@ -379,11 +406,6 @@ CONFIG_NET=y
379CONFIG_PACKET=y 406CONFIG_PACKET=y
380# CONFIG_PACKET_MMAP is not set 407# CONFIG_PACKET_MMAP is not set
381CONFIG_UNIX=y 408CONFIG_UNIX=y
382CONFIG_XFRM=y
383# CONFIG_XFRM_USER is not set
384# CONFIG_XFRM_SUB_POLICY is not set
385# CONFIG_XFRM_MIGRATE is not set
386# CONFIG_XFRM_STATISTICS is not set
387# CONFIG_NET_KEY is not set 409# CONFIG_NET_KEY is not set
388CONFIG_INET=y 410CONFIG_INET=y
389# CONFIG_IP_MULTICAST is not set 411# CONFIG_IP_MULTICAST is not set
@@ -407,7 +429,6 @@ CONFIG_IP_PNP=y
407# CONFIG_INET_XFRM_MODE_BEET is not set 429# CONFIG_INET_XFRM_MODE_BEET is not set
408# CONFIG_INET_LRO is not set 430# CONFIG_INET_LRO is not set
409# CONFIG_INET_DIAG is not set 431# CONFIG_INET_DIAG is not set
410CONFIG_INET_TCP_DIAG=y
411# CONFIG_TCP_CONG_ADVANCED is not set 432# CONFIG_TCP_CONG_ADVANCED is not set
412CONFIG_TCP_CONG_CUBIC=y 433CONFIG_TCP_CONG_CUBIC=y
413CONFIG_DEFAULT_TCP_CONG="cubic" 434CONFIG_DEFAULT_TCP_CONG="cubic"
@@ -418,6 +439,7 @@ CONFIG_DEFAULT_TCP_CONG="cubic"
418# CONFIG_NETFILTER is not set 439# CONFIG_NETFILTER is not set
419# CONFIG_IP_DCCP is not set 440# CONFIG_IP_DCCP is not set
420# CONFIG_IP_SCTP is not set 441# CONFIG_IP_SCTP is not set
442# CONFIG_RDS is not set
421# CONFIG_TIPC is not set 443# CONFIG_TIPC is not set
422# CONFIG_ATM is not set 444# CONFIG_ATM is not set
423# CONFIG_BRIDGE is not set 445# CONFIG_BRIDGE is not set
@@ -431,7 +453,10 @@ CONFIG_DEFAULT_TCP_CONG="cubic"
431# CONFIG_LAPB is not set 453# CONFIG_LAPB is not set
432# CONFIG_ECONET is not set 454# CONFIG_ECONET is not set
433# CONFIG_WAN_ROUTER is not set 455# CONFIG_WAN_ROUTER is not set
456# CONFIG_PHONET is not set
457# CONFIG_IEEE802154 is not set
434# CONFIG_NET_SCHED is not set 458# CONFIG_NET_SCHED is not set
459# CONFIG_DCB is not set
435 460
436# 461#
437# Network testing 462# Network testing
@@ -475,13 +500,8 @@ CONFIG_IRTTY_SIR=m
475# 500#
476# CONFIG_BT is not set 501# CONFIG_BT is not set
477# CONFIG_AF_RXRPC is not set 502# CONFIG_AF_RXRPC is not set
478# CONFIG_PHONET is not set 503# CONFIG_WIRELESS is not set
479CONFIG_WIRELESS=y 504# CONFIG_WIMAX is not set
480# CONFIG_CFG80211 is not set
481CONFIG_WIRELESS_OLD_REGULATORY=y
482# CONFIG_WIRELESS_EXT is not set
483# CONFIG_MAC80211 is not set
484# CONFIG_IEEE80211 is not set
485# CONFIG_RFKILL is not set 505# CONFIG_RFKILL is not set
486# CONFIG_NET_9P is not set 506# CONFIG_NET_9P is not set
487 507
@@ -502,6 +522,7 @@ CONFIG_PREVENT_FIRMWARE_BUILD=y
502# CONFIG_CONNECTOR is not set 522# CONFIG_CONNECTOR is not set
503CONFIG_MTD=y 523CONFIG_MTD=y
504# CONFIG_MTD_DEBUG is not set 524# CONFIG_MTD_DEBUG is not set
525# CONFIG_MTD_TESTS is not set
505# CONFIG_MTD_CONCAT is not set 526# CONFIG_MTD_CONCAT is not set
506CONFIG_MTD_PARTITIONS=y 527CONFIG_MTD_PARTITIONS=y
507# CONFIG_MTD_REDBOOT_PARTS is not set 528# CONFIG_MTD_REDBOOT_PARTS is not set
@@ -559,6 +580,7 @@ CONFIG_MTD_COMPLEX_MAPPINGS=y
559# 580#
560# CONFIG_MTD_DATAFLASH is not set 581# CONFIG_MTD_DATAFLASH is not set
561# CONFIG_MTD_M25P80 is not set 582# CONFIG_MTD_M25P80 is not set
583# CONFIG_MTD_SST25L is not set
562# CONFIG_MTD_SLRAM is not set 584# CONFIG_MTD_SLRAM is not set
563# CONFIG_MTD_PHRAM is not set 585# CONFIG_MTD_PHRAM is not set
564# CONFIG_MTD_MTDRAM is not set 586# CONFIG_MTD_MTDRAM is not set
@@ -574,6 +596,11 @@ CONFIG_MTD_COMPLEX_MAPPINGS=y
574# CONFIG_MTD_ONENAND is not set 596# CONFIG_MTD_ONENAND is not set
575 597
576# 598#
599# LPDDR flash memory drivers
600#
601# CONFIG_MTD_LPDDR is not set
602
603#
577# UBI - Unsorted block images 604# UBI - Unsorted block images
578# 605#
579# CONFIG_MTD_UBI is not set 606# CONFIG_MTD_UBI is not set
@@ -590,9 +617,14 @@ CONFIG_BLK_DEV_RAM_SIZE=4096
590# CONFIG_ATA_OVER_ETH is not set 617# CONFIG_ATA_OVER_ETH is not set
591# CONFIG_BLK_DEV_HD is not set 618# CONFIG_BLK_DEV_HD is not set
592CONFIG_MISC_DEVICES=y 619CONFIG_MISC_DEVICES=y
593# CONFIG_EEPROM_93CX6 is not set
594# CONFIG_ENCLOSURE_SERVICES is not set 620# CONFIG_ENCLOSURE_SERVICES is not set
595# CONFIG_C2PORT is not set 621# CONFIG_C2PORT is not set
622
623#
624# EEPROM support
625#
626# CONFIG_EEPROM_AT25 is not set
627# CONFIG_EEPROM_93CX6 is not set
596CONFIG_HAVE_IDE=y 628CONFIG_HAVE_IDE=y
597# CONFIG_IDE is not set 629# CONFIG_IDE is not set
598 630
@@ -616,9 +648,12 @@ CONFIG_NETDEVICES=y
616CONFIG_NET_ETHERNET=y 648CONFIG_NET_ETHERNET=y
617CONFIG_MII=y 649CONFIG_MII=y
618CONFIG_SMC91X=y 650CONFIG_SMC91X=y
619# CONFIG_SMSC911X is not set
620# CONFIG_DM9000 is not set 651# CONFIG_DM9000 is not set
621# CONFIG_ENC28J60 is not set 652# CONFIG_ENC28J60 is not set
653# CONFIG_ETHOC is not set
654# CONFIG_SMSC911X is not set
655# CONFIG_DNET is not set
656# CONFIG_ADF702X is not set
622# CONFIG_IBM_NEW_EMAC_ZMII is not set 657# CONFIG_IBM_NEW_EMAC_ZMII is not set
623# CONFIG_IBM_NEW_EMAC_RGMII is not set 658# CONFIG_IBM_NEW_EMAC_RGMII is not set
624# CONFIG_IBM_NEW_EMAC_TAH is not set 659# CONFIG_IBM_NEW_EMAC_TAH is not set
@@ -627,15 +662,16 @@ CONFIG_SMC91X=y
627# CONFIG_IBM_NEW_EMAC_MAL_CLR_ICINTSTAT is not set 662# CONFIG_IBM_NEW_EMAC_MAL_CLR_ICINTSTAT is not set
628# CONFIG_IBM_NEW_EMAC_MAL_COMMON_ERR is not set 663# CONFIG_IBM_NEW_EMAC_MAL_COMMON_ERR is not set
629# CONFIG_B44 is not set 664# CONFIG_B44 is not set
665# CONFIG_KS8842 is not set
666# CONFIG_KS8851 is not set
667# CONFIG_KS8851_MLL is not set
630# CONFIG_NETDEV_1000 is not set 668# CONFIG_NETDEV_1000 is not set
631# CONFIG_NETDEV_10000 is not set 669# CONFIG_NETDEV_10000 is not set
670# CONFIG_WLAN is not set
632 671
633# 672#
634# Wireless LAN 673# Enable WiMAX (Networking options) to see the WiMAX drivers
635# 674#
636# CONFIG_WLAN_PRE80211 is not set
637# CONFIG_WLAN_80211 is not set
638# CONFIG_IWLWIFI_LEDS is not set
639# CONFIG_WAN is not set 675# CONFIG_WAN is not set
640# CONFIG_PPP is not set 676# CONFIG_PPP is not set
641# CONFIG_SLIP is not set 677# CONFIG_SLIP is not set
@@ -679,15 +715,12 @@ CONFIG_INPUT_EVDEV=m
679# 715#
680# Character devices 716# Character devices
681# 717#
682# CONFIG_AD9960 is not set
683CONFIG_BFIN_DMA_INTERFACE=m 718CONFIG_BFIN_DMA_INTERFACE=m
684# CONFIG_BFIN_PPI is not set 719# CONFIG_BFIN_PPI is not set
685# CONFIG_BFIN_PPIFCD is not set 720# CONFIG_BFIN_PPIFCD is not set
686# CONFIG_BFIN_SIMPLE_TIMER is not set 721# CONFIG_BFIN_SIMPLE_TIMER is not set
687# CONFIG_BFIN_SPI_ADC is not set 722# CONFIG_BFIN_SPI_ADC is not set
688CONFIG_BFIN_SPORT=y 723CONFIG_BFIN_SPORT=y
689# CONFIG_BFIN_TIMER_LATENCY is not set
690CONFIG_SIMPLE_GPIO=m
691# CONFIG_VT is not set 724# CONFIG_VT is not set
692# CONFIG_DEVKMEM is not set 725# CONFIG_DEVKMEM is not set
693CONFIG_BFIN_JTAG_COMM=m 726CONFIG_BFIN_JTAG_COMM=m
@@ -701,6 +734,7 @@ CONFIG_BFIN_JTAG_COMM=m
701# 734#
702# Non-8250 serial port support 735# Non-8250 serial port support
703# 736#
737# CONFIG_SERIAL_MAX3100 is not set
704CONFIG_SERIAL_BFIN=y 738CONFIG_SERIAL_BFIN=y
705CONFIG_SERIAL_BFIN_CONSOLE=y 739CONFIG_SERIAL_BFIN_CONSOLE=y
706CONFIG_SERIAL_BFIN_DMA=y 740CONFIG_SERIAL_BFIN_DMA=y
@@ -711,12 +745,8 @@ CONFIG_SERIAL_CORE=y
711CONFIG_SERIAL_CORE_CONSOLE=y 745CONFIG_SERIAL_CORE_CONSOLE=y
712# CONFIG_SERIAL_BFIN_SPORT is not set 746# CONFIG_SERIAL_BFIN_SPORT is not set
713CONFIG_UNIX98_PTYS=y 747CONFIG_UNIX98_PTYS=y
748# CONFIG_DEVPTS_MULTIPLE_INSTANCES is not set
714# CONFIG_LEGACY_PTYS is not set 749# CONFIG_LEGACY_PTYS is not set
715
716#
717# CAN, the car bus and industrial fieldbus
718#
719# CONFIG_CAN4LINUX is not set
720# CONFIG_IPMI_HANDLER is not set 750# CONFIG_IPMI_HANDLER is not set
721# CONFIG_HW_RANDOM is not set 751# CONFIG_HW_RANDOM is not set
722# CONFIG_R3964 is not set 752# CONFIG_R3964 is not set
@@ -734,13 +764,18 @@ CONFIG_SPI_BFIN=y
734# CONFIG_SPI_BFIN_LOCK is not set 764# CONFIG_SPI_BFIN_LOCK is not set
735# CONFIG_SPI_BFIN_SPORT is not set 765# CONFIG_SPI_BFIN_SPORT is not set
736# CONFIG_SPI_BITBANG is not set 766# CONFIG_SPI_BITBANG is not set
767# CONFIG_SPI_GPIO is not set
737 768
738# 769#
739# SPI Protocol Masters 770# SPI Protocol Masters
740# 771#
741# CONFIG_EEPROM_AT25 is not set
742# CONFIG_SPI_SPIDEV is not set 772# CONFIG_SPI_SPIDEV is not set
743# CONFIG_SPI_TLE62X0 is not set 773# CONFIG_SPI_TLE62X0 is not set
774
775#
776# PPS support
777#
778# CONFIG_PPS is not set
744CONFIG_ARCH_WANT_OPTIONAL_GPIOLIB=y 779CONFIG_ARCH_WANT_OPTIONAL_GPIOLIB=y
745CONFIG_GPIOLIB=y 780CONFIG_GPIOLIB=y
746# CONFIG_DEBUG_GPIO is not set 781# CONFIG_DEBUG_GPIO is not set
@@ -753,9 +788,6 @@ CONFIG_GPIO_SYSFS=y
753# 788#
754# I2C GPIO expanders: 789# I2C GPIO expanders:
755# 790#
756# CONFIG_GPIO_MAX732X is not set
757# CONFIG_GPIO_PCA953X is not set
758# CONFIG_GPIO_PCF857X is not set
759 791
760# 792#
761# PCI GPIO expanders: 793# PCI GPIO expanders:
@@ -766,11 +798,15 @@ CONFIG_GPIO_SYSFS=y
766# 798#
767# CONFIG_GPIO_MAX7301 is not set 799# CONFIG_GPIO_MAX7301 is not set
768# CONFIG_GPIO_MCP23S08 is not set 800# CONFIG_GPIO_MCP23S08 is not set
801# CONFIG_GPIO_MC33880 is not set
802
803#
804# AC97 GPIO expanders:
805#
769# CONFIG_W1 is not set 806# CONFIG_W1 is not set
770# CONFIG_POWER_SUPPLY is not set 807# CONFIG_POWER_SUPPLY is not set
771# CONFIG_HWMON is not set 808# CONFIG_HWMON is not set
772# CONFIG_THERMAL is not set 809# CONFIG_THERMAL is not set
773# CONFIG_THERMAL_HWMON is not set
774CONFIG_WATCHDOG=y 810CONFIG_WATCHDOG=y
775# CONFIG_WATCHDOG_NOWAYOUT is not set 811# CONFIG_WATCHDOG_NOWAYOUT is not set
776 812
@@ -793,23 +829,10 @@ CONFIG_SSB_POSSIBLE=y
793# CONFIG_MFD_SM501 is not set 829# CONFIG_MFD_SM501 is not set
794# CONFIG_HTC_PASIC3 is not set 830# CONFIG_HTC_PASIC3 is not set
795# CONFIG_MFD_TMIO is not set 831# CONFIG_MFD_TMIO is not set
832# CONFIG_MFD_MC13783 is not set
833# CONFIG_EZX_PCAP is not set
796# CONFIG_REGULATOR is not set 834# CONFIG_REGULATOR is not set
797 835# CONFIG_MEDIA_SUPPORT is not set
798#
799# Multimedia devices
800#
801
802#
803# Multimedia core support
804#
805# CONFIG_VIDEO_DEV is not set
806# CONFIG_DVB_CORE is not set
807# CONFIG_VIDEO_MEDIA is not set
808
809#
810# Multimedia drivers
811#
812# CONFIG_DAB is not set
813 836
814# 837#
815# Graphics support 838# Graphics support
@@ -826,14 +849,12 @@ CONFIG_SSB_POSSIBLE=y
826# CONFIG_SOUND is not set 849# CONFIG_SOUND is not set
827CONFIG_HID_SUPPORT=y 850CONFIG_HID_SUPPORT=y
828CONFIG_HID=m 851CONFIG_HID=m
829# CONFIG_HID_DEBUG is not set
830# CONFIG_HIDRAW is not set 852# CONFIG_HIDRAW is not set
831# CONFIG_HID_PID is not set 853# CONFIG_HID_PID is not set
832 854
833# 855#
834# Special HID drivers 856# Special HID drivers
835# 857#
836CONFIG_HID_COMPAT=y
837# CONFIG_USB_SUPPORT is not set 858# CONFIG_USB_SUPPORT is not set
838# CONFIG_MMC is not set 859# CONFIG_MMC is not set
839# CONFIG_MEMSTICK is not set 860# CONFIG_MEMSTICK is not set
@@ -864,6 +885,7 @@ CONFIG_RTC_INTF_DEV=y
864# CONFIG_RTC_DRV_R9701 is not set 885# CONFIG_RTC_DRV_R9701 is not set
865# CONFIG_RTC_DRV_RS5C348 is not set 886# CONFIG_RTC_DRV_RS5C348 is not set
866# CONFIG_RTC_DRV_DS3234 is not set 887# CONFIG_RTC_DRV_DS3234 is not set
888# CONFIG_RTC_DRV_PCF2123 is not set
867 889
868# 890#
869# Platform RTC drivers 891# Platform RTC drivers
@@ -884,10 +906,20 @@ CONFIG_RTC_INTF_DEV=y
884# 906#
885CONFIG_RTC_DRV_BFIN=y 907CONFIG_RTC_DRV_BFIN=y
886# CONFIG_DMADEVICES is not set 908# CONFIG_DMADEVICES is not set
909# CONFIG_AUXDISPLAY is not set
887# CONFIG_UIO is not set 910# CONFIG_UIO is not set
911
912#
913# TI VLYNQ
914#
888# CONFIG_STAGING is not set 915# CONFIG_STAGING is not set
889 916
890# 917#
918# Firmware Drivers
919#
920# CONFIG_FIRMWARE_MEMMAP is not set
921
922#
891# File systems 923# File systems
892# 924#
893# CONFIG_EXT2_FS is not set 925# CONFIG_EXT2_FS is not set
@@ -896,9 +928,13 @@ CONFIG_RTC_DRV_BFIN=y
896# CONFIG_REISERFS_FS is not set 928# CONFIG_REISERFS_FS is not set
897# CONFIG_JFS_FS is not set 929# CONFIG_JFS_FS is not set
898# CONFIG_FS_POSIX_ACL is not set 930# CONFIG_FS_POSIX_ACL is not set
899CONFIG_FILE_LOCKING=y
900# CONFIG_XFS_FS is not set 931# CONFIG_XFS_FS is not set
932# CONFIG_GFS2_FS is not set
901# CONFIG_OCFS2_FS is not set 933# CONFIG_OCFS2_FS is not set
934# CONFIG_BTRFS_FS is not set
935# CONFIG_NILFS2_FS is not set
936CONFIG_FILE_LOCKING=y
937CONFIG_FSNOTIFY=y
902# CONFIG_DNOTIFY is not set 938# CONFIG_DNOTIFY is not set
903CONFIG_INOTIFY=y 939CONFIG_INOTIFY=y
904CONFIG_INOTIFY_USER=y 940CONFIG_INOTIFY_USER=y
@@ -908,6 +944,11 @@ CONFIG_INOTIFY_USER=y
908# CONFIG_FUSE_FS is not set 944# CONFIG_FUSE_FS is not set
909 945
910# 946#
947# Caches
948#
949# CONFIG_FSCACHE is not set
950
951#
911# CD-ROM/DVD Filesystems 952# CD-ROM/DVD Filesystems
912# 953#
913# CONFIG_ISO9660_FS is not set 954# CONFIG_ISO9660_FS is not set
@@ -926,13 +967,9 @@ CONFIG_INOTIFY_USER=y
926CONFIG_PROC_FS=y 967CONFIG_PROC_FS=y
927CONFIG_PROC_SYSCTL=y 968CONFIG_PROC_SYSCTL=y
928CONFIG_SYSFS=y 969CONFIG_SYSFS=y
929# CONFIG_TMPFS is not set
930# CONFIG_HUGETLB_PAGE is not set 970# CONFIG_HUGETLB_PAGE is not set
931# CONFIG_CONFIGFS_FS is not set 971# CONFIG_CONFIGFS_FS is not set
932 972CONFIG_MISC_FILESYSTEMS=y
933#
934# Miscellaneous filesystems
935#
936# CONFIG_ADFS_FS is not set 973# CONFIG_ADFS_FS is not set
937# CONFIG_AFFS_FS is not set 974# CONFIG_AFFS_FS is not set
938# CONFIG_HFS_FS is not set 975# CONFIG_HFS_FS is not set
@@ -951,17 +988,8 @@ CONFIG_JFFS2_ZLIB=y
951# CONFIG_JFFS2_LZO is not set 988# CONFIG_JFFS2_LZO is not set
952CONFIG_JFFS2_RTIME=y 989CONFIG_JFFS2_RTIME=y
953# CONFIG_JFFS2_RUBIN is not set 990# CONFIG_JFFS2_RUBIN is not set
954CONFIG_YAFFS_FS=m
955CONFIG_YAFFS_YAFFS1=y
956# CONFIG_YAFFS_9BYTE_TAGS is not set
957# CONFIG_YAFFS_DOES_ECC is not set
958CONFIG_YAFFS_YAFFS2=y
959CONFIG_YAFFS_AUTO_YAFFS2=y
960# CONFIG_YAFFS_DISABLE_LAZY_LOAD is not set
961# CONFIG_YAFFS_DISABLE_WIDE_TNODES is not set
962# CONFIG_YAFFS_ALWAYS_CHECK_CHUNK_ERASED is not set
963CONFIG_YAFFS_SHORT_NAMES_IN_RAM=y
964# CONFIG_CRAMFS is not set 991# CONFIG_CRAMFS is not set
992# CONFIG_SQUASHFS is not set
965# CONFIG_VXFS_FS is not set 993# CONFIG_VXFS_FS is not set
966# CONFIG_MINIX_FS is not set 994# CONFIG_MINIX_FS is not set
967# CONFIG_OMFS_FS is not set 995# CONFIG_OMFS_FS is not set
@@ -980,7 +1008,6 @@ CONFIG_LOCKD=m
980CONFIG_LOCKD_V4=y 1008CONFIG_LOCKD_V4=y
981CONFIG_NFS_COMMON=y 1009CONFIG_NFS_COMMON=y
982CONFIG_SUNRPC=m 1010CONFIG_SUNRPC=m
983# CONFIG_SUNRPC_REGISTER_V4 is not set
984# CONFIG_RPCSEC_GSS_KRB5 is not set 1011# CONFIG_RPCSEC_GSS_KRB5 is not set
985# CONFIG_RPCSEC_GSS_SPKM3 is not set 1012# CONFIG_RPCSEC_GSS_SPKM3 is not set
986CONFIG_SMB_FS=m 1013CONFIG_SMB_FS=m
@@ -1045,14 +1072,19 @@ CONFIG_ENABLE_WARN_DEPRECATED=y
1045CONFIG_ENABLE_MUST_CHECK=y 1072CONFIG_ENABLE_MUST_CHECK=y
1046CONFIG_FRAME_WARN=1024 1073CONFIG_FRAME_WARN=1024
1047# CONFIG_MAGIC_SYSRQ is not set 1074# CONFIG_MAGIC_SYSRQ is not set
1075# CONFIG_STRIP_ASM_SYMS is not set
1048# CONFIG_UNUSED_SYMBOLS is not set 1076# CONFIG_UNUSED_SYMBOLS is not set
1049CONFIG_DEBUG_FS=y 1077CONFIG_DEBUG_FS=y
1050# CONFIG_HEADERS_CHECK is not set 1078# CONFIG_HEADERS_CHECK is not set
1079CONFIG_DEBUG_SECTION_MISMATCH=y
1051CONFIG_DEBUG_KERNEL=y 1080CONFIG_DEBUG_KERNEL=y
1052CONFIG_DEBUG_SHIRQ=y 1081CONFIG_DEBUG_SHIRQ=y
1053CONFIG_DETECT_SOFTLOCKUP=y 1082CONFIG_DETECT_SOFTLOCKUP=y
1054# CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC is not set 1083# CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC is not set
1055CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC_VALUE=0 1084CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC_VALUE=0
1085CONFIG_DETECT_HUNG_TASK=y
1086# CONFIG_BOOTPARAM_HUNG_TASK_PANIC is not set
1087CONFIG_BOOTPARAM_HUNG_TASK_PANIC_VALUE=0
1056CONFIG_SCHED_DEBUG=y 1088CONFIG_SCHED_DEBUG=y
1057# CONFIG_SCHEDSTATS is not set 1089# CONFIG_SCHEDSTATS is not set
1058# CONFIG_TIMER_STATS is not set 1090# CONFIG_TIMER_STATS is not set
@@ -1060,31 +1092,39 @@ CONFIG_SCHED_DEBUG=y
1060# CONFIG_DEBUG_SLAB is not set 1092# CONFIG_DEBUG_SLAB is not set
1061# CONFIG_DEBUG_SPINLOCK is not set 1093# CONFIG_DEBUG_SPINLOCK is not set
1062# CONFIG_DEBUG_MUTEXES is not set 1094# CONFIG_DEBUG_MUTEXES is not set
1095# CONFIG_DEBUG_LOCK_ALLOC is not set
1096# CONFIG_PROVE_LOCKING is not set
1097# CONFIG_LOCK_STAT is not set
1063# CONFIG_DEBUG_SPINLOCK_SLEEP is not set 1098# CONFIG_DEBUG_SPINLOCK_SLEEP is not set
1064# CONFIG_DEBUG_LOCKING_API_SELFTESTS is not set 1099# CONFIG_DEBUG_LOCKING_API_SELFTESTS is not set
1065# CONFIG_DEBUG_KOBJECT is not set 1100# CONFIG_DEBUG_KOBJECT is not set
1066CONFIG_DEBUG_BUGVERBOSE=y 1101CONFIG_DEBUG_BUGVERBOSE=y
1067CONFIG_DEBUG_INFO=y 1102CONFIG_DEBUG_INFO=y
1068# CONFIG_DEBUG_VM is not set 1103# CONFIG_DEBUG_VM is not set
1104# CONFIG_DEBUG_NOMMU_REGIONS is not set
1069# CONFIG_DEBUG_WRITECOUNT is not set 1105# CONFIG_DEBUG_WRITECOUNT is not set
1070# CONFIG_DEBUG_MEMORY_INIT is not set 1106# CONFIG_DEBUG_MEMORY_INIT is not set
1071# CONFIG_DEBUG_LIST is not set 1107# CONFIG_DEBUG_LIST is not set
1072# CONFIG_DEBUG_SG is not set 1108# CONFIG_DEBUG_SG is not set
1109# CONFIG_DEBUG_NOTIFIERS is not set
1110# CONFIG_DEBUG_CREDENTIALS is not set
1073# CONFIG_FRAME_POINTER is not set 1111# CONFIG_FRAME_POINTER is not set
1074# CONFIG_BOOT_PRINTK_DELAY is not set 1112# CONFIG_BOOT_PRINTK_DELAY is not set
1075# CONFIG_RCU_TORTURE_TEST is not set 1113# CONFIG_RCU_TORTURE_TEST is not set
1076# CONFIG_RCU_CPU_STALL_DETECTOR is not set 1114# CONFIG_RCU_CPU_STALL_DETECTOR is not set
1077# CONFIG_BACKTRACE_SELF_TEST is not set 1115# CONFIG_BACKTRACE_SELF_TEST is not set
1078# CONFIG_DEBUG_BLOCK_EXT_DEVT is not set 1116# CONFIG_DEBUG_BLOCK_EXT_DEVT is not set
1117# CONFIG_DEBUG_FORCE_WEAK_PER_CPU is not set
1079# CONFIG_FAULT_INJECTION is not set 1118# CONFIG_FAULT_INJECTION is not set
1080 1119# CONFIG_PAGE_POISONING is not set
1081# 1120CONFIG_HAVE_FUNCTION_TRACER=y
1082# Tracers 1121CONFIG_HAVE_FUNCTION_GRAPH_TRACER=y
1083# 1122CONFIG_TRACING_SUPPORT=y
1084# CONFIG_SCHED_TRACER is not set 1123# CONFIG_FTRACE is not set
1085# CONFIG_CONTEXT_SWITCH_TRACER is not set 1124# CONFIG_BRANCH_PROFILE_NONE is not set
1086# CONFIG_BOOT_TRACER is not set 1125# CONFIG_PROFILE_ANNOTATED_BRANCHES is not set
1087# CONFIG_DYNAMIC_PRINTK_DEBUG is not set 1126# CONFIG_PROFILE_ALL_BRANCHES is not set
1127# CONFIG_DYNAMIC_DEBUG is not set
1088# CONFIG_SAMPLES is not set 1128# CONFIG_SAMPLES is not set
1089CONFIG_HAVE_ARCH_KGDB=y 1129CONFIG_HAVE_ARCH_KGDB=y
1090# CONFIG_KGDB is not set 1130# CONFIG_KGDB is not set
@@ -1109,6 +1149,7 @@ CONFIG_DEBUG_BFIN_NO_KERN_HWTRACE=y
1109CONFIG_EARLY_PRINTK=y 1149CONFIG_EARLY_PRINTK=y
1110CONFIG_CPLB_INFO=y 1150CONFIG_CPLB_INFO=y
1111CONFIG_ACCESS_CHECK=y 1151CONFIG_ACCESS_CHECK=y
1152# CONFIG_BFIN_ISRAM_SELF_TEST is not set
1112 1153
1113# 1154#
1114# Security options 1155# Security options
@@ -1117,14 +1158,14 @@ CONFIG_ACCESS_CHECK=y
1117CONFIG_SECURITY=y 1158CONFIG_SECURITY=y
1118# CONFIG_SECURITYFS is not set 1159# CONFIG_SECURITYFS is not set
1119# CONFIG_SECURITY_NETWORK is not set 1160# CONFIG_SECURITY_NETWORK is not set
1161# CONFIG_SECURITY_PATH is not set
1120# CONFIG_SECURITY_FILE_CAPABILITIES is not set 1162# CONFIG_SECURITY_FILE_CAPABILITIES is not set
1121CONFIG_SECURITY_DEFAULT_MMAP_MIN_ADDR=0 1163# CONFIG_SECURITY_TOMOYO is not set
1122CONFIG_CRYPTO=y 1164CONFIG_CRYPTO=y
1123 1165
1124# 1166#
1125# Crypto core or helper 1167# Crypto core or helper
1126# 1168#
1127# CONFIG_CRYPTO_FIPS is not set
1128# CONFIG_CRYPTO_MANAGER is not set 1169# CONFIG_CRYPTO_MANAGER is not set
1129# CONFIG_CRYPTO_MANAGER2 is not set 1170# CONFIG_CRYPTO_MANAGER2 is not set
1130# CONFIG_CRYPTO_GF128MUL is not set 1171# CONFIG_CRYPTO_GF128MUL is not set
@@ -1156,11 +1197,13 @@ CONFIG_CRYPTO=y
1156# 1197#
1157# CONFIG_CRYPTO_HMAC is not set 1198# CONFIG_CRYPTO_HMAC is not set
1158# CONFIG_CRYPTO_XCBC is not set 1199# CONFIG_CRYPTO_XCBC is not set
1200# CONFIG_CRYPTO_VMAC is not set
1159 1201
1160# 1202#
1161# Digest 1203# Digest
1162# 1204#
1163# CONFIG_CRYPTO_CRC32C is not set 1205# CONFIG_CRYPTO_CRC32C is not set
1206# CONFIG_CRYPTO_GHASH is not set
1164# CONFIG_CRYPTO_MD4 is not set 1207# CONFIG_CRYPTO_MD4 is not set
1165# CONFIG_CRYPTO_MD5 is not set 1208# CONFIG_CRYPTO_MD5 is not set
1166# CONFIG_CRYPTO_MICHAEL_MIC is not set 1209# CONFIG_CRYPTO_MICHAEL_MIC is not set
@@ -1197,6 +1240,7 @@ CONFIG_CRYPTO=y
1197# Compression 1240# Compression
1198# 1241#
1199# CONFIG_CRYPTO_DEFLATE is not set 1242# CONFIG_CRYPTO_DEFLATE is not set
1243# CONFIG_CRYPTO_ZLIB is not set
1200# CONFIG_CRYPTO_LZO is not set 1244# CONFIG_CRYPTO_LZO is not set
1201 1245
1202# 1246#
@@ -1204,11 +1248,13 @@ CONFIG_CRYPTO=y
1204# 1248#
1205# CONFIG_CRYPTO_ANSI_CPRNG is not set 1249# CONFIG_CRYPTO_ANSI_CPRNG is not set
1206CONFIG_CRYPTO_HW=y 1250CONFIG_CRYPTO_HW=y
1251# CONFIG_BINARY_PRINTF is not set
1207 1252
1208# 1253#
1209# Library routines 1254# Library routines
1210# 1255#
1211CONFIG_BITREVERSE=y 1256CONFIG_BITREVERSE=y
1257CONFIG_GENERIC_FIND_LAST_BIT=y
1212CONFIG_CRC_CCITT=m 1258CONFIG_CRC_CCITT=m
1213# CONFIG_CRC16 is not set 1259# CONFIG_CRC16 is not set
1214# CONFIG_CRC_T10DIF is not set 1260# CONFIG_CRC_T10DIF is not set
@@ -1218,6 +1264,8 @@ CONFIG_CRC32=y
1218# CONFIG_LIBCRC32C is not set 1264# CONFIG_LIBCRC32C is not set
1219CONFIG_ZLIB_INFLATE=y 1265CONFIG_ZLIB_INFLATE=y
1220CONFIG_ZLIB_DEFLATE=m 1266CONFIG_ZLIB_DEFLATE=m
1267CONFIG_DECOMPRESS_GZIP=y
1221CONFIG_HAS_IOMEM=y 1268CONFIG_HAS_IOMEM=y
1222CONFIG_HAS_IOPORT=y 1269CONFIG_HAS_IOPORT=y
1223CONFIG_HAS_DMA=y 1270CONFIG_HAS_DMA=y
1271CONFIG_NLATTR=y
diff --git a/arch/blackfin/configs/BF533-STAMP_defconfig b/arch/blackfin/configs/BF533-STAMP_defconfig
index 6c60c8286318..c3fe6e5b612f 100644
--- a/arch/blackfin/configs/BF533-STAMP_defconfig
+++ b/arch/blackfin/configs/BF533-STAMP_defconfig
@@ -1,22 +1,27 @@
1# 1#
2# Automatically generated make config: don't edit 2# Automatically generated make config: don't edit
3# Linux kernel version: 2.6.28.10 3# Linux kernel version: 2.6.32.2
4# Thu May 21 05:50:01 2009
5# 4#
6# CONFIG_MMU is not set 5# CONFIG_MMU is not set
7# CONFIG_FPU is not set 6# CONFIG_FPU is not set
8CONFIG_RWSEM_GENERIC_SPINLOCK=y 7CONFIG_RWSEM_GENERIC_SPINLOCK=y
9# CONFIG_RWSEM_XCHGADD_ALGORITHM is not set 8# CONFIG_RWSEM_XCHGADD_ALGORITHM is not set
10CONFIG_BLACKFIN=y 9CONFIG_BLACKFIN=y
10CONFIG_GENERIC_CSUM=y
11CONFIG_GENERIC_BUG=y
11CONFIG_ZONE_DMA=y 12CONFIG_ZONE_DMA=y
12CONFIG_GENERIC_FIND_NEXT_BIT=y 13CONFIG_GENERIC_FIND_NEXT_BIT=y
13CONFIG_GENERIC_HWEIGHT=y
14CONFIG_GENERIC_HARDIRQS=y 14CONFIG_GENERIC_HARDIRQS=y
15CONFIG_GENERIC_IRQ_PROBE=y 15CONFIG_GENERIC_IRQ_PROBE=y
16CONFIG_GENERIC_HARDIRQS_NO__DO_IRQ=y
16CONFIG_GENERIC_GPIO=y 17CONFIG_GENERIC_GPIO=y
17CONFIG_FORCE_MAX_ZONEORDER=14 18CONFIG_FORCE_MAX_ZONEORDER=14
18CONFIG_GENERIC_CALIBRATE_DELAY=y 19CONFIG_GENERIC_CALIBRATE_DELAY=y
20CONFIG_LOCKDEP_SUPPORT=y
21CONFIG_STACKTRACE_SUPPORT=y
22CONFIG_TRACE_IRQFLAGS_SUPPORT=y
19CONFIG_DEFCONFIG_LIST="/lib/modules/$UNAME_RELEASE/.config" 23CONFIG_DEFCONFIG_LIST="/lib/modules/$UNAME_RELEASE/.config"
24CONFIG_CONSTRUCTORS=y
20 25
21# 26#
22# General setup 27# General setup
@@ -26,22 +31,41 @@ CONFIG_BROKEN_ON_SMP=y
26CONFIG_INIT_ENV_ARG_LIMIT=32 31CONFIG_INIT_ENV_ARG_LIMIT=32
27CONFIG_LOCALVERSION="" 32CONFIG_LOCALVERSION=""
28CONFIG_LOCALVERSION_AUTO=y 33CONFIG_LOCALVERSION_AUTO=y
34CONFIG_HAVE_KERNEL_GZIP=y
35CONFIG_HAVE_KERNEL_BZIP2=y
36CONFIG_HAVE_KERNEL_LZMA=y
37CONFIG_KERNEL_GZIP=y
38# CONFIG_KERNEL_BZIP2 is not set
39# CONFIG_KERNEL_LZMA is not set
29CONFIG_SYSVIPC=y 40CONFIG_SYSVIPC=y
30CONFIG_SYSVIPC_SYSCTL=y 41CONFIG_SYSVIPC_SYSCTL=y
31# CONFIG_POSIX_MQUEUE is not set 42# CONFIG_POSIX_MQUEUE is not set
32# CONFIG_BSD_PROCESS_ACCT is not set 43# CONFIG_BSD_PROCESS_ACCT is not set
33# CONFIG_TASKSTATS is not set 44# CONFIG_TASKSTATS is not set
34# CONFIG_AUDIT is not set 45# CONFIG_AUDIT is not set
46
47#
48# RCU Subsystem
49#
50CONFIG_TREE_RCU=y
51# CONFIG_TREE_PREEMPT_RCU is not set
52# CONFIG_RCU_TRACE is not set
53CONFIG_RCU_FANOUT=32
54# CONFIG_RCU_FANOUT_EXACT is not set
55# CONFIG_TREE_RCU_TRACE is not set
35CONFIG_IKCONFIG=y 56CONFIG_IKCONFIG=y
36CONFIG_IKCONFIG_PROC=y 57CONFIG_IKCONFIG_PROC=y
37CONFIG_LOG_BUF_SHIFT=14 58CONFIG_LOG_BUF_SHIFT=14
38# CONFIG_CGROUPS is not set
39# CONFIG_GROUP_SCHED is not set 59# CONFIG_GROUP_SCHED is not set
60# CONFIG_CGROUPS is not set
40# CONFIG_SYSFS_DEPRECATED_V2 is not set 61# CONFIG_SYSFS_DEPRECATED_V2 is not set
41# CONFIG_RELAY is not set 62# CONFIG_RELAY is not set
42# CONFIG_NAMESPACES is not set 63# CONFIG_NAMESPACES is not set
43CONFIG_BLK_DEV_INITRD=y 64CONFIG_BLK_DEV_INITRD=y
44CONFIG_INITRAMFS_SOURCE="" 65CONFIG_INITRAMFS_SOURCE=""
66CONFIG_RD_GZIP=y
67# CONFIG_RD_BZIP2 is not set
68# CONFIG_RD_LZMA is not set
45# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set 69# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
46CONFIG_SYSCTL=y 70CONFIG_SYSCTL=y
47CONFIG_ANON_INODES=y 71CONFIG_ANON_INODES=y
@@ -62,6 +86,10 @@ CONFIG_EPOLL=y
62# CONFIG_TIMERFD is not set 86# CONFIG_TIMERFD is not set
63# CONFIG_EVENTFD is not set 87# CONFIG_EVENTFD is not set
64# CONFIG_AIO is not set 88# CONFIG_AIO is not set
89
90#
91# Kernel Performance Events And Counters
92#
65CONFIG_VM_EVENT_COUNTERS=y 93CONFIG_VM_EVENT_COUNTERS=y
66CONFIG_COMPAT_BRK=y 94CONFIG_COMPAT_BRK=y
67CONFIG_SLAB=y 95CONFIG_SLAB=y
@@ -69,11 +97,15 @@ CONFIG_SLAB=y
69# CONFIG_SLOB is not set 97# CONFIG_SLOB is not set
70CONFIG_MMAP_ALLOW_UNINITIALIZED=y 98CONFIG_MMAP_ALLOW_UNINITIALIZED=y
71# CONFIG_PROFILING is not set 99# CONFIG_PROFILING is not set
72# CONFIG_MARKERS is not set
73CONFIG_HAVE_OPROFILE=y 100CONFIG_HAVE_OPROFILE=y
101
102#
103# GCOV-based kernel profiling
104#
105# CONFIG_GCOV_KERNEL is not set
106# CONFIG_SLOW_WORK is not set
74# CONFIG_HAVE_GENERIC_DMA_COHERENT is not set 107# CONFIG_HAVE_GENERIC_DMA_COHERENT is not set
75CONFIG_SLABINFO=y 108CONFIG_SLABINFO=y
76CONFIG_TINY_SHMEM=y
77CONFIG_BASE_SMALL=0 109CONFIG_BASE_SMALL=0
78CONFIG_MODULES=y 110CONFIG_MODULES=y
79# CONFIG_MODULE_FORCE_LOAD is not set 111# CONFIG_MODULE_FORCE_LOAD is not set
@@ -81,11 +113,8 @@ CONFIG_MODULE_UNLOAD=y
81# CONFIG_MODULE_FORCE_UNLOAD is not set 113# CONFIG_MODULE_FORCE_UNLOAD is not set
82# CONFIG_MODVERSIONS is not set 114# CONFIG_MODVERSIONS is not set
83# CONFIG_MODULE_SRCVERSION_ALL is not set 115# CONFIG_MODULE_SRCVERSION_ALL is not set
84CONFIG_KMOD=y
85CONFIG_BLOCK=y 116CONFIG_BLOCK=y
86# CONFIG_LBD is not set 117# CONFIG_LBDAF is not set
87# CONFIG_BLK_DEV_IO_TRACE is not set
88# CONFIG_LSF is not set
89# CONFIG_BLK_DEV_BSG is not set 118# CONFIG_BLK_DEV_BSG is not set
90# CONFIG_BLK_DEV_INTEGRITY is not set 119# CONFIG_BLK_DEV_INTEGRITY is not set
91 120
@@ -101,7 +130,6 @@ CONFIG_DEFAULT_AS=y
101# CONFIG_DEFAULT_CFQ is not set 130# CONFIG_DEFAULT_CFQ is not set
102# CONFIG_DEFAULT_NOOP is not set 131# CONFIG_DEFAULT_NOOP is not set
103CONFIG_DEFAULT_IOSCHED="anticipatory" 132CONFIG_DEFAULT_IOSCHED="anticipatory"
104CONFIG_CLASSIC_RCU=y
105# CONFIG_PREEMPT_NONE is not set 133# CONFIG_PREEMPT_NONE is not set
106CONFIG_PREEMPT_VOLUNTARY=y 134CONFIG_PREEMPT_VOLUNTARY=y
107# CONFIG_PREEMPT is not set 135# CONFIG_PREEMPT is not set
@@ -132,15 +160,15 @@ CONFIG_BF533=y
132# CONFIG_BF537 is not set 160# CONFIG_BF537 is not set
133# CONFIG_BF538 is not set 161# CONFIG_BF538 is not set
134# CONFIG_BF539 is not set 162# CONFIG_BF539 is not set
135# CONFIG_BF542 is not set 163# CONFIG_BF542_std is not set
136# CONFIG_BF542M is not set 164# CONFIG_BF542M is not set
137# CONFIG_BF544 is not set 165# CONFIG_BF544_std is not set
138# CONFIG_BF544M is not set 166# CONFIG_BF544M is not set
139# CONFIG_BF547 is not set 167# CONFIG_BF547_std is not set
140# CONFIG_BF547M is not set 168# CONFIG_BF547M is not set
141# CONFIG_BF548 is not set 169# CONFIG_BF548_std is not set
142# CONFIG_BF548M is not set 170# CONFIG_BF548M is not set
143# CONFIG_BF549 is not set 171# CONFIG_BF549_std is not set
144# CONFIG_BF549M is not set 172# CONFIG_BF549M is not set
145# CONFIG_BF561 is not set 173# CONFIG_BF561 is not set
146CONFIG_BF_REV_MIN=3 174CONFIG_BF_REV_MIN=3
@@ -228,7 +256,7 @@ CONFIG_GENERIC_TIME=y
228CONFIG_GENERIC_CLOCKEVENTS=y 256CONFIG_GENERIC_CLOCKEVENTS=y
229# CONFIG_TICKSOURCE_GPTMR0 is not set 257# CONFIG_TICKSOURCE_GPTMR0 is not set
230CONFIG_TICKSOURCE_CORETMR=y 258CONFIG_TICKSOURCE_CORETMR=y
231# CONFIG_CYCLES_CLOCKSOURCE is not set 259CONFIG_CYCLES_CLOCKSOURCE=y
232# CONFIG_GPTMR0_CLOCKSOURCE is not set 260# CONFIG_GPTMR0_CLOCKSOURCE is not set
233CONFIG_TICK_ONESHOT=y 261CONFIG_TICK_ONESHOT=y
234# CONFIG_NO_HZ is not set 262# CONFIG_NO_HZ is not set
@@ -280,7 +308,6 @@ CONFIG_FLATMEM=y
280CONFIG_FLAT_NODE_MEM_MAP=y 308CONFIG_FLAT_NODE_MEM_MAP=y
281CONFIG_PAGEFLAGS_EXTENDED=y 309CONFIG_PAGEFLAGS_EXTENDED=y
282CONFIG_SPLIT_PTLOCK_CPUS=4 310CONFIG_SPLIT_PTLOCK_CPUS=4
283# CONFIG_RESOURCES_64BIT is not set
284# CONFIG_PHYS_ADDR_T_64BIT is not set 311# CONFIG_PHYS_ADDR_T_64BIT is not set
285CONFIG_ZONE_DMA_FLAG=1 312CONFIG_ZONE_DMA_FLAG=1
286CONFIG_VIRT_TO_BUS=y 313CONFIG_VIRT_TO_BUS=y
@@ -289,16 +316,18 @@ CONFIG_BFIN_GPTIMERS=m
289# CONFIG_DMA_UNCACHED_4M is not set 316# CONFIG_DMA_UNCACHED_4M is not set
290# CONFIG_DMA_UNCACHED_2M is not set 317# CONFIG_DMA_UNCACHED_2M is not set
291CONFIG_DMA_UNCACHED_1M=y 318CONFIG_DMA_UNCACHED_1M=y
319# CONFIG_DMA_UNCACHED_512K is not set
320# CONFIG_DMA_UNCACHED_256K is not set
321# CONFIG_DMA_UNCACHED_128K is not set
292# CONFIG_DMA_UNCACHED_NONE is not set 322# CONFIG_DMA_UNCACHED_NONE is not set
293 323
294# 324#
295# Cache Support 325# Cache Support
296# 326#
297CONFIG_BFIN_ICACHE=y 327CONFIG_BFIN_ICACHE=y
298# CONFIG_BFIN_ICACHE_LOCK is not set 328CONFIG_BFIN_EXTMEM_ICACHEABLE=y
299CONFIG_BFIN_DCACHE=y 329CONFIG_BFIN_DCACHE=y
300# CONFIG_BFIN_DCACHE_BANKA is not set 330# CONFIG_BFIN_DCACHE_BANKA is not set
301CONFIG_BFIN_EXTMEM_ICACHEABLE=y
302CONFIG_BFIN_EXTMEM_DCACHEABLE=y 331CONFIG_BFIN_EXTMEM_DCACHEABLE=y
303CONFIG_BFIN_EXTMEM_WRITEBACK=y 332CONFIG_BFIN_EXTMEM_WRITEBACK=y
304# CONFIG_BFIN_EXTMEM_WRITETHROUGH is not set 333# CONFIG_BFIN_EXTMEM_WRITETHROUGH is not set
@@ -309,7 +338,7 @@ CONFIG_BFIN_EXTMEM_WRITEBACK=y
309# CONFIG_MPU is not set 338# CONFIG_MPU is not set
310 339
311# 340#
312# Asynchonous Memory Configuration 341# Asynchronous Memory Configuration
313# 342#
314 343
315# 344#
@@ -355,6 +384,7 @@ CONFIG_PM=y
355CONFIG_PM_SLEEP=y 384CONFIG_PM_SLEEP=y
356CONFIG_SUSPEND=y 385CONFIG_SUSPEND=y
357CONFIG_SUSPEND_FREEZER=y 386CONFIG_SUSPEND_FREEZER=y
387# CONFIG_PM_RUNTIME is not set
358CONFIG_ARCH_SUSPEND_POSSIBLE=y 388CONFIG_ARCH_SUSPEND_POSSIBLE=y
359CONFIG_PM_BFIN_SLEEP_DEEPER=y 389CONFIG_PM_BFIN_SLEEP_DEEPER=y
360# CONFIG_PM_BFIN_SLEEP is not set 390# CONFIG_PM_BFIN_SLEEP is not set
@@ -376,11 +406,6 @@ CONFIG_NET=y
376CONFIG_PACKET=y 406CONFIG_PACKET=y
377# CONFIG_PACKET_MMAP is not set 407# CONFIG_PACKET_MMAP is not set
378CONFIG_UNIX=y 408CONFIG_UNIX=y
379CONFIG_XFRM=y
380# CONFIG_XFRM_USER is not set
381# CONFIG_XFRM_SUB_POLICY is not set
382# CONFIG_XFRM_MIGRATE is not set
383# CONFIG_XFRM_STATISTICS is not set
384# CONFIG_NET_KEY is not set 409# CONFIG_NET_KEY is not set
385CONFIG_INET=y 410CONFIG_INET=y
386# CONFIG_IP_MULTICAST is not set 411# CONFIG_IP_MULTICAST is not set
@@ -404,7 +429,6 @@ CONFIG_IP_PNP=y
404# CONFIG_INET_XFRM_MODE_BEET is not set 429# CONFIG_INET_XFRM_MODE_BEET is not set
405# CONFIG_INET_LRO is not set 430# CONFIG_INET_LRO is not set
406# CONFIG_INET_DIAG is not set 431# CONFIG_INET_DIAG is not set
407CONFIG_INET_TCP_DIAG=y
408# CONFIG_TCP_CONG_ADVANCED is not set 432# CONFIG_TCP_CONG_ADVANCED is not set
409CONFIG_TCP_CONG_CUBIC=y 433CONFIG_TCP_CONG_CUBIC=y
410CONFIG_DEFAULT_TCP_CONG="cubic" 434CONFIG_DEFAULT_TCP_CONG="cubic"
@@ -415,6 +439,7 @@ CONFIG_DEFAULT_TCP_CONG="cubic"
415# CONFIG_NETFILTER is not set 439# CONFIG_NETFILTER is not set
416# CONFIG_IP_DCCP is not set 440# CONFIG_IP_DCCP is not set
417# CONFIG_IP_SCTP is not set 441# CONFIG_IP_SCTP is not set
442# CONFIG_RDS is not set
418# CONFIG_TIPC is not set 443# CONFIG_TIPC is not set
419# CONFIG_ATM is not set 444# CONFIG_ATM is not set
420# CONFIG_BRIDGE is not set 445# CONFIG_BRIDGE is not set
@@ -428,7 +453,10 @@ CONFIG_DEFAULT_TCP_CONG="cubic"
428# CONFIG_LAPB is not set 453# CONFIG_LAPB is not set
429# CONFIG_ECONET is not set 454# CONFIG_ECONET is not set
430# CONFIG_WAN_ROUTER is not set 455# CONFIG_WAN_ROUTER is not set
456# CONFIG_PHONET is not set
457# CONFIG_IEEE802154 is not set
431# CONFIG_NET_SCHED is not set 458# CONFIG_NET_SCHED is not set
459# CONFIG_DCB is not set
432 460
433# 461#
434# Network testing 462# Network testing
@@ -474,13 +502,8 @@ CONFIG_SIR_BFIN_DMA=y
474# 502#
475# CONFIG_BT is not set 503# CONFIG_BT is not set
476# CONFIG_AF_RXRPC is not set 504# CONFIG_AF_RXRPC is not set
477# CONFIG_PHONET is not set 505# CONFIG_WIRELESS is not set
478CONFIG_WIRELESS=y 506# CONFIG_WIMAX is not set
479# CONFIG_CFG80211 is not set
480CONFIG_WIRELESS_OLD_REGULATORY=y
481# CONFIG_WIRELESS_EXT is not set
482# CONFIG_MAC80211 is not set
483# CONFIG_IEEE80211 is not set
484# CONFIG_RFKILL is not set 507# CONFIG_RFKILL is not set
485# CONFIG_NET_9P is not set 508# CONFIG_NET_9P is not set
486 509
@@ -501,6 +524,7 @@ CONFIG_PREVENT_FIRMWARE_BUILD=y
501# CONFIG_CONNECTOR is not set 524# CONFIG_CONNECTOR is not set
502CONFIG_MTD=y 525CONFIG_MTD=y
503# CONFIG_MTD_DEBUG is not set 526# CONFIG_MTD_DEBUG is not set
527# CONFIG_MTD_TESTS is not set
504# CONFIG_MTD_CONCAT is not set 528# CONFIG_MTD_CONCAT is not set
505CONFIG_MTD_PARTITIONS=y 529CONFIG_MTD_PARTITIONS=y
506# CONFIG_MTD_REDBOOT_PARTS is not set 530# CONFIG_MTD_REDBOOT_PARTS is not set
@@ -560,6 +584,7 @@ CONFIG_MTD_BFIN_ASYNC=m
560# 584#
561# CONFIG_MTD_DATAFLASH is not set 585# CONFIG_MTD_DATAFLASH is not set
562# CONFIG_MTD_M25P80 is not set 586# CONFIG_MTD_M25P80 is not set
587# CONFIG_MTD_SST25L is not set
563# CONFIG_MTD_SLRAM is not set 588# CONFIG_MTD_SLRAM is not set
564# CONFIG_MTD_PHRAM is not set 589# CONFIG_MTD_PHRAM is not set
565# CONFIG_MTD_MTDRAM is not set 590# CONFIG_MTD_MTDRAM is not set
@@ -575,6 +600,11 @@ CONFIG_MTD_BFIN_ASYNC=m
575# CONFIG_MTD_ONENAND is not set 600# CONFIG_MTD_ONENAND is not set
576 601
577# 602#
603# LPDDR flash memory drivers
604#
605# CONFIG_MTD_LPDDR is not set
606
607#
578# UBI - Unsorted block images 608# UBI - Unsorted block images
579# 609#
580# CONFIG_MTD_UBI is not set 610# CONFIG_MTD_UBI is not set
@@ -591,10 +621,20 @@ CONFIG_BLK_DEV_RAM_SIZE=4096
591# CONFIG_ATA_OVER_ETH is not set 621# CONFIG_ATA_OVER_ETH is not set
592# CONFIG_BLK_DEV_HD is not set 622# CONFIG_BLK_DEV_HD is not set
593CONFIG_MISC_DEVICES=y 623CONFIG_MISC_DEVICES=y
594# CONFIG_EEPROM_93CX6 is not set 624# CONFIG_AD525X_DPOT is not set
595# CONFIG_ICS932S401 is not set 625# CONFIG_ICS932S401 is not set
596# CONFIG_ENCLOSURE_SERVICES is not set 626# CONFIG_ENCLOSURE_SERVICES is not set
627# CONFIG_ISL29003 is not set
597# CONFIG_C2PORT is not set 628# CONFIG_C2PORT is not set
629
630#
631# EEPROM support
632#
633# CONFIG_EEPROM_AT24 is not set
634# CONFIG_EEPROM_AT25 is not set
635# CONFIG_EEPROM_LEGACY is not set
636# CONFIG_EEPROM_MAX6875 is not set
637# CONFIG_EEPROM_93CX6 is not set
598CONFIG_HAVE_IDE=y 638CONFIG_HAVE_IDE=y
599# CONFIG_IDE is not set 639# CONFIG_IDE is not set
600 640
@@ -618,9 +658,12 @@ CONFIG_NETDEVICES=y
618CONFIG_NET_ETHERNET=y 658CONFIG_NET_ETHERNET=y
619CONFIG_MII=y 659CONFIG_MII=y
620CONFIG_SMC91X=y 660CONFIG_SMC91X=y
621# CONFIG_SMSC911X is not set
622# CONFIG_DM9000 is not set 661# CONFIG_DM9000 is not set
623# CONFIG_ENC28J60 is not set 662# CONFIG_ENC28J60 is not set
663# CONFIG_ETHOC is not set
664# CONFIG_SMSC911X is not set
665# CONFIG_DNET is not set
666# CONFIG_ADF702X is not set
624# CONFIG_IBM_NEW_EMAC_ZMII is not set 667# CONFIG_IBM_NEW_EMAC_ZMII is not set
625# CONFIG_IBM_NEW_EMAC_RGMII is not set 668# CONFIG_IBM_NEW_EMAC_RGMII is not set
626# CONFIG_IBM_NEW_EMAC_TAH is not set 669# CONFIG_IBM_NEW_EMAC_TAH is not set
@@ -629,15 +672,16 @@ CONFIG_SMC91X=y
629# CONFIG_IBM_NEW_EMAC_MAL_CLR_ICINTSTAT is not set 672# CONFIG_IBM_NEW_EMAC_MAL_CLR_ICINTSTAT is not set
630# CONFIG_IBM_NEW_EMAC_MAL_COMMON_ERR is not set 673# CONFIG_IBM_NEW_EMAC_MAL_COMMON_ERR is not set
631# CONFIG_B44 is not set 674# CONFIG_B44 is not set
675# CONFIG_KS8842 is not set
676# CONFIG_KS8851 is not set
677# CONFIG_KS8851_MLL is not set
632# CONFIG_NETDEV_1000 is not set 678# CONFIG_NETDEV_1000 is not set
633# CONFIG_NETDEV_10000 is not set 679# CONFIG_NETDEV_10000 is not set
680# CONFIG_WLAN is not set
634 681
635# 682#
636# Wireless LAN 683# Enable WiMAX (Networking options) to see the WiMAX drivers
637# 684#
638# CONFIG_WLAN_PRE80211 is not set
639# CONFIG_WLAN_80211 is not set
640# CONFIG_IWLWIFI_LEDS is not set
641# CONFIG_WAN is not set 685# CONFIG_WAN is not set
642# CONFIG_PPP is not set 686# CONFIG_PPP is not set
643# CONFIG_SLIP is not set 687# CONFIG_SLIP is not set
@@ -672,7 +716,10 @@ CONFIG_INPUT_EVDEV=m
672# CONFIG_INPUT_TOUCHSCREEN is not set 716# CONFIG_INPUT_TOUCHSCREEN is not set
673CONFIG_INPUT_MISC=y 717CONFIG_INPUT_MISC=y
674# CONFIG_INPUT_UINPUT is not set 718# CONFIG_INPUT_UINPUT is not set
675CONFIG_CONFIG_INPUT_PCF8574=m 719# CONFIG_INPUT_GPIO_ROTARY_ENCODER is not set
720# CONFIG_INPUT_AD714X is not set
721# CONFIG_INPUT_ADXL34X is not set
722# CONFIG_INPUT_PCF8574 is not set
676 723
677# 724#
678# Hardware I/O ports 725# Hardware I/O ports
@@ -683,16 +730,13 @@ CONFIG_CONFIG_INPUT_PCF8574=m
683# 730#
684# Character devices 731# Character devices
685# 732#
686# CONFIG_AD9960 is not set
687CONFIG_BFIN_DMA_INTERFACE=m 733CONFIG_BFIN_DMA_INTERFACE=m
688# CONFIG_BFIN_PPI is not set 734# CONFIG_BFIN_PPI is not set
689# CONFIG_BFIN_PPIFCD is not set 735# CONFIG_BFIN_PPIFCD is not set
690# CONFIG_BFIN_SIMPLE_TIMER is not set 736# CONFIG_BFIN_SIMPLE_TIMER is not set
691# CONFIG_BFIN_SPI_ADC is not set 737# CONFIG_BFIN_SPI_ADC is not set
692CONFIG_BFIN_SPORT=m 738CONFIG_BFIN_SPORT=m
693# CONFIG_BFIN_TIMER_LATENCY is not set
694# CONFIG_BFIN_TWI_LCD is not set 739# CONFIG_BFIN_TWI_LCD is not set
695CONFIG_SIMPLE_GPIO=m
696# CONFIG_VT is not set 740# CONFIG_VT is not set
697# CONFIG_DEVKMEM is not set 741# CONFIG_DEVKMEM is not set
698CONFIG_BFIN_JTAG_COMM=m 742CONFIG_BFIN_JTAG_COMM=m
@@ -706,6 +750,7 @@ CONFIG_BFIN_JTAG_COMM=m
706# 750#
707# Non-8250 serial port support 751# Non-8250 serial port support
708# 752#
753# CONFIG_SERIAL_MAX3100 is not set
709CONFIG_SERIAL_BFIN=y 754CONFIG_SERIAL_BFIN=y
710CONFIG_SERIAL_BFIN_CONSOLE=y 755CONFIG_SERIAL_BFIN_CONSOLE=y
711CONFIG_SERIAL_BFIN_DMA=y 756CONFIG_SERIAL_BFIN_DMA=y
@@ -716,12 +761,8 @@ CONFIG_SERIAL_CORE=y
716CONFIG_SERIAL_CORE_CONSOLE=y 761CONFIG_SERIAL_CORE_CONSOLE=y
717# CONFIG_SERIAL_BFIN_SPORT is not set 762# CONFIG_SERIAL_BFIN_SPORT is not set
718CONFIG_UNIX98_PTYS=y 763CONFIG_UNIX98_PTYS=y
764# CONFIG_DEVPTS_MULTIPLE_INSTANCES is not set
719# CONFIG_LEGACY_PTYS is not set 765# CONFIG_LEGACY_PTYS is not set
720
721#
722# CAN, the car bus and industrial fieldbus
723#
724# CONFIG_CAN4LINUX is not set
725# CONFIG_IPMI_HANDLER is not set 766# CONFIG_IPMI_HANDLER is not set
726# CONFIG_HW_RANDOM is not set 767# CONFIG_HW_RANDOM is not set
727# CONFIG_R3964 is not set 768# CONFIG_R3964 is not set
@@ -729,6 +770,7 @@ CONFIG_UNIX98_PTYS=y
729# CONFIG_TCG_TPM is not set 770# CONFIG_TCG_TPM is not set
730CONFIG_I2C=m 771CONFIG_I2C=m
731CONFIG_I2C_BOARDINFO=y 772CONFIG_I2C_BOARDINFO=y
773CONFIG_I2C_COMPAT=y
732CONFIG_I2C_CHARDEV=m 774CONFIG_I2C_CHARDEV=m
733CONFIG_I2C_HELPER_AUTO=y 775CONFIG_I2C_HELPER_AUTO=y
734 776
@@ -759,14 +801,6 @@ CONFIG_I2C_HELPER_AUTO=y
759# Miscellaneous I2C Chip support 801# Miscellaneous I2C Chip support
760# 802#
761# CONFIG_DS1682 is not set 803# CONFIG_DS1682 is not set
762# CONFIG_EEPROM_AT24 is not set
763# CONFIG_SENSORS_AD5252 is not set
764# CONFIG_EEPROM_LEGACY is not set
765# CONFIG_SENSORS_PCF8574 is not set
766# CONFIG_PCF8575 is not set
767# CONFIG_SENSORS_PCA9539 is not set
768# CONFIG_SENSORS_PCF8591 is not set
769# CONFIG_SENSORS_MAX6875 is not set
770# CONFIG_SENSORS_TSL2550 is not set 804# CONFIG_SENSORS_TSL2550 is not set
771# CONFIG_I2C_DEBUG_CORE is not set 805# CONFIG_I2C_DEBUG_CORE is not set
772# CONFIG_I2C_DEBUG_ALGO is not set 806# CONFIG_I2C_DEBUG_ALGO is not set
@@ -783,13 +817,18 @@ CONFIG_SPI_BFIN=y
783# CONFIG_SPI_BFIN_LOCK is not set 817# CONFIG_SPI_BFIN_LOCK is not set
784# CONFIG_SPI_BFIN_SPORT is not set 818# CONFIG_SPI_BFIN_SPORT is not set
785# CONFIG_SPI_BITBANG is not set 819# CONFIG_SPI_BITBANG is not set
820# CONFIG_SPI_GPIO is not set
786 821
787# 822#
788# SPI Protocol Masters 823# SPI Protocol Masters
789# 824#
790# CONFIG_EEPROM_AT25 is not set
791# CONFIG_SPI_SPIDEV is not set 825# CONFIG_SPI_SPIDEV is not set
792# CONFIG_SPI_TLE62X0 is not set 826# CONFIG_SPI_TLE62X0 is not set
827
828#
829# PPS support
830#
831# CONFIG_PPS is not set
793CONFIG_ARCH_WANT_OPTIONAL_GPIOLIB=y 832CONFIG_ARCH_WANT_OPTIONAL_GPIOLIB=y
794CONFIG_GPIOLIB=y 833CONFIG_GPIOLIB=y
795# CONFIG_DEBUG_GPIO is not set 834# CONFIG_DEBUG_GPIO is not set
@@ -805,6 +844,7 @@ CONFIG_GPIO_SYSFS=y
805# CONFIG_GPIO_MAX732X is not set 844# CONFIG_GPIO_MAX732X is not set
806# CONFIG_GPIO_PCA953X is not set 845# CONFIG_GPIO_PCA953X is not set
807# CONFIG_GPIO_PCF857X is not set 846# CONFIG_GPIO_PCF857X is not set
847# CONFIG_GPIO_ADP5588 is not set
808 848
809# 849#
810# PCI GPIO expanders: 850# PCI GPIO expanders:
@@ -815,11 +855,15 @@ CONFIG_GPIO_SYSFS=y
815# 855#
816# CONFIG_GPIO_MAX7301 is not set 856# CONFIG_GPIO_MAX7301 is not set
817# CONFIG_GPIO_MCP23S08 is not set 857# CONFIG_GPIO_MCP23S08 is not set
858# CONFIG_GPIO_MC33880 is not set
859
860#
861# AC97 GPIO expanders:
862#
818# CONFIG_W1 is not set 863# CONFIG_W1 is not set
819# CONFIG_POWER_SUPPLY is not set 864# CONFIG_POWER_SUPPLY is not set
820# CONFIG_HWMON is not set 865# CONFIG_HWMON is not set
821# CONFIG_THERMAL is not set 866# CONFIG_THERMAL is not set
822# CONFIG_THERMAL_HWMON is not set
823CONFIG_WATCHDOG=y 867CONFIG_WATCHDOG=y
824# CONFIG_WATCHDOG_NOWAYOUT is not set 868# CONFIG_WATCHDOG_NOWAYOUT is not set
825 869
@@ -841,26 +885,18 @@ CONFIG_SSB_POSSIBLE=y
841# CONFIG_MFD_CORE is not set 885# CONFIG_MFD_CORE is not set
842# CONFIG_MFD_SM501 is not set 886# CONFIG_MFD_SM501 is not set
843# CONFIG_HTC_PASIC3 is not set 887# CONFIG_HTC_PASIC3 is not set
888# CONFIG_UCB1400_CORE is not set
889# CONFIG_TPS65010 is not set
844# CONFIG_MFD_TMIO is not set 890# CONFIG_MFD_TMIO is not set
845# CONFIG_MFD_WM8400 is not set 891# CONFIG_MFD_WM8400 is not set
892# CONFIG_MFD_WM831X is not set
846# CONFIG_MFD_WM8350_I2C is not set 893# CONFIG_MFD_WM8350_I2C is not set
894# CONFIG_MFD_PCF50633 is not set
895# CONFIG_MFD_MC13783 is not set
896# CONFIG_AB3100_CORE is not set
897# CONFIG_EZX_PCAP is not set
847# CONFIG_REGULATOR is not set 898# CONFIG_REGULATOR is not set
848 899# CONFIG_MEDIA_SUPPORT is not set
849#
850# Multimedia devices
851#
852
853#
854# Multimedia core support
855#
856# CONFIG_VIDEO_DEV is not set
857# CONFIG_DVB_CORE is not set
858# CONFIG_VIDEO_MEDIA is not set
859
860#
861# Multimedia drivers
862#
863# CONFIG_DAB is not set
864 900
865# 901#
866# Graphics support 902# Graphics support
@@ -904,6 +940,7 @@ CONFIG_ADV7393_1XMEM=y
904# CONFIG_FB_VIRTUAL is not set 940# CONFIG_FB_VIRTUAL is not set
905# CONFIG_FB_METRONOME is not set 941# CONFIG_FB_METRONOME is not set
906# CONFIG_FB_MB862XX is not set 942# CONFIG_FB_MB862XX is not set
943# CONFIG_FB_BROADSHEET is not set
907# CONFIG_BACKLIGHT_LCD_SUPPORT is not set 944# CONFIG_BACKLIGHT_LCD_SUPPORT is not set
908 945
909# 946#
@@ -913,19 +950,27 @@ CONFIG_ADV7393_1XMEM=y
913# CONFIG_LOGO is not set 950# CONFIG_LOGO is not set
914CONFIG_SOUND=m 951CONFIG_SOUND=m
915CONFIG_SOUND_OSS_CORE=y 952CONFIG_SOUND_OSS_CORE=y
953CONFIG_SOUND_OSS_CORE_PRECLAIM=y
916CONFIG_SND=m 954CONFIG_SND=m
917CONFIG_SND_TIMER=m 955CONFIG_SND_TIMER=m
918CONFIG_SND_PCM=m 956CONFIG_SND_PCM=m
957CONFIG_SND_JACK=y
919# CONFIG_SND_SEQUENCER is not set 958# CONFIG_SND_SEQUENCER is not set
920CONFIG_SND_OSSEMUL=y 959CONFIG_SND_OSSEMUL=y
921CONFIG_SND_MIXER_OSS=m 960CONFIG_SND_MIXER_OSS=m
922CONFIG_SND_PCM_OSS=m 961CONFIG_SND_PCM_OSS=m
923CONFIG_SND_PCM_OSS_PLUGINS=y 962CONFIG_SND_PCM_OSS_PLUGINS=y
963# CONFIG_SND_HRTIMER is not set
924# CONFIG_SND_DYNAMIC_MINORS is not set 964# CONFIG_SND_DYNAMIC_MINORS is not set
925CONFIG_SND_SUPPORT_OLD_API=y 965CONFIG_SND_SUPPORT_OLD_API=y
926CONFIG_SND_VERBOSE_PROCFS=y 966CONFIG_SND_VERBOSE_PROCFS=y
927# CONFIG_SND_VERBOSE_PRINTK is not set 967# CONFIG_SND_VERBOSE_PRINTK is not set
928# CONFIG_SND_DEBUG is not set 968# CONFIG_SND_DEBUG is not set
969# CONFIG_SND_RAWMIDI_SEQ is not set
970# CONFIG_SND_OPL3_LIB_SEQ is not set
971# CONFIG_SND_OPL4_LIB_SEQ is not set
972# CONFIG_SND_SBAWE_SEQ is not set
973# CONFIG_SND_EMU10K1_SEQ is not set
929CONFIG_SND_DRIVERS=y 974CONFIG_SND_DRIVERS=y
930# CONFIG_SND_DUMMY is not set 975# CONFIG_SND_DUMMY is not set
931# CONFIG_SND_MTPAV is not set 976# CONFIG_SND_MTPAV is not set
@@ -936,13 +981,6 @@ CONFIG_SND_SPI=y
936# 981#
937# ALSA Blackfin devices 982# ALSA Blackfin devices
938# 983#
939CONFIG_SND_BLACKFIN_AD1836=m
940CONFIG_SND_BLACKFIN_AD1836_TDM=y
941# CONFIG_SND_BLACKFIN_AD1836_I2S is not set
942CONFIG_SND_BLACKFIN_AD1836_MULSUB=y
943# CONFIG_SND_BLACKFIN_AD1836_5P1 is not set
944CONFIG_SND_BLACKFIN_SPORT=0
945CONFIG_SND_BLACKFIN_SPI_PFBIT=4
946CONFIG_SND_BFIN_SPORT=0 984CONFIG_SND_BFIN_SPORT=0
947CONFIG_SND_BFIN_AD73322=m 985CONFIG_SND_BFIN_AD73322=m
948CONFIG_SND_BFIN_AD73322_SPORT0_SE=10 986CONFIG_SND_BFIN_AD73322_SPORT0_SE=10
@@ -953,16 +991,20 @@ CONFIG_SND_SOC_AC97_BUS=y
953CONFIG_SND_BF5XX_I2S=m 991CONFIG_SND_BF5XX_I2S=m
954# CONFIG_SND_BF5XX_SOC_SSM2602 is not set 992# CONFIG_SND_BF5XX_SOC_SSM2602 is not set
955CONFIG_SND_BF5XX_SOC_AD73311=m 993CONFIG_SND_BF5XX_SOC_AD73311=m
994# CONFIG_SND_BF5XX_SOC_ADAU1371 is not set
995# CONFIG_SND_BF5XX_SOC_ADAU1761 is not set
956CONFIG_SND_BFIN_AD73311_SE=4 996CONFIG_SND_BFIN_AD73311_SE=4
997# CONFIG_SND_BF5XX_TDM is not set
957CONFIG_SND_BF5XX_AC97=m 998CONFIG_SND_BF5XX_AC97=m
958CONFIG_SND_BF5XX_MMAP_SUPPORT=y 999CONFIG_SND_BF5XX_MMAP_SUPPORT=y
959# CONFIG_SND_BF5XX_MULTICHAN_SUPPORT is not set 1000# CONFIG_SND_BF5XX_MULTICHAN_SUPPORT is not set
1001# CONFIG_SND_BF5XX_HAVE_COLD_RESET is not set
1002CONFIG_SND_BF5XX_SOC_AD1980=m
960CONFIG_SND_BF5XX_SOC_SPORT=m 1003CONFIG_SND_BF5XX_SOC_SPORT=m
961CONFIG_SND_BF5XX_SOC_I2S=m 1004CONFIG_SND_BF5XX_SOC_I2S=m
962CONFIG_SND_BF5XX_SOC_AC97=m 1005CONFIG_SND_BF5XX_SOC_AC97=m
963CONFIG_SND_BF5XX_SOC_AD1980=m
964CONFIG_SND_BF5XX_SPORT_NUM=0 1006CONFIG_SND_BF5XX_SPORT_NUM=0
965# CONFIG_SND_BF5XX_HAVE_COLD_RESET is not set 1007CONFIG_SND_SOC_I2C_AND_SPI=m
966# CONFIG_SND_SOC_ALL_CODECS is not set 1008# CONFIG_SND_SOC_ALL_CODECS is not set
967CONFIG_SND_SOC_AD1980=m 1009CONFIG_SND_SOC_AD1980=m
968CONFIG_SND_SOC_AD73311=m 1010CONFIG_SND_SOC_AD73311=m
@@ -970,14 +1012,12 @@ CONFIG_SND_SOC_AD73311=m
970CONFIG_AC97_BUS=m 1012CONFIG_AC97_BUS=m
971CONFIG_HID_SUPPORT=y 1013CONFIG_HID_SUPPORT=y
972CONFIG_HID=y 1014CONFIG_HID=y
973# CONFIG_HID_DEBUG is not set
974# CONFIG_HIDRAW is not set 1015# CONFIG_HIDRAW is not set
975# CONFIG_HID_PID is not set 1016# CONFIG_HID_PID is not set
976 1017
977# 1018#
978# Special HID drivers 1019# Special HID drivers
979# 1020#
980CONFIG_HID_COMPAT=y
981# CONFIG_USB_SUPPORT is not set 1021# CONFIG_USB_SUPPORT is not set
982# CONFIG_MMC is not set 1022# CONFIG_MMC is not set
983# CONFIG_MEMSTICK is not set 1023# CONFIG_MEMSTICK is not set
@@ -1014,6 +1054,7 @@ CONFIG_RTC_INTF_DEV=y
1014# CONFIG_RTC_DRV_S35390A is not set 1054# CONFIG_RTC_DRV_S35390A is not set
1015# CONFIG_RTC_DRV_FM3130 is not set 1055# CONFIG_RTC_DRV_FM3130 is not set
1016# CONFIG_RTC_DRV_RX8581 is not set 1056# CONFIG_RTC_DRV_RX8581 is not set
1057# CONFIG_RTC_DRV_RX8025 is not set
1017 1058
1018# 1059#
1019# SPI RTC drivers 1060# SPI RTC drivers
@@ -1025,6 +1066,7 @@ CONFIG_RTC_INTF_DEV=y
1025# CONFIG_RTC_DRV_R9701 is not set 1066# CONFIG_RTC_DRV_R9701 is not set
1026# CONFIG_RTC_DRV_RS5C348 is not set 1067# CONFIG_RTC_DRV_RS5C348 is not set
1027# CONFIG_RTC_DRV_DS3234 is not set 1068# CONFIG_RTC_DRV_DS3234 is not set
1069# CONFIG_RTC_DRV_PCF2123 is not set
1028 1070
1029# 1071#
1030# Platform RTC drivers 1072# Platform RTC drivers
@@ -1045,10 +1087,21 @@ CONFIG_RTC_INTF_DEV=y
1045# 1087#
1046CONFIG_RTC_DRV_BFIN=y 1088CONFIG_RTC_DRV_BFIN=y
1047# CONFIG_DMADEVICES is not set 1089# CONFIG_DMADEVICES is not set
1090# CONFIG_AUXDISPLAY is not set
1048# CONFIG_UIO is not set 1091# CONFIG_UIO is not set
1092
1093#
1094# TI VLYNQ
1095#
1049# CONFIG_STAGING is not set 1096# CONFIG_STAGING is not set
1050 1097
1051# 1098#
1099# Firmware Drivers
1100#
1101# CONFIG_FIRMWARE_MEMMAP is not set
1102# CONFIG_SIGMA is not set
1103
1104#
1052# File systems 1105# File systems
1053# 1106#
1054# CONFIG_EXT2_FS is not set 1107# CONFIG_EXT2_FS is not set
@@ -1057,9 +1110,13 @@ CONFIG_RTC_DRV_BFIN=y
1057# CONFIG_REISERFS_FS is not set 1110# CONFIG_REISERFS_FS is not set
1058# CONFIG_JFS_FS is not set 1111# CONFIG_JFS_FS is not set
1059# CONFIG_FS_POSIX_ACL is not set 1112# CONFIG_FS_POSIX_ACL is not set
1060CONFIG_FILE_LOCKING=y
1061# CONFIG_XFS_FS is not set 1113# CONFIG_XFS_FS is not set
1114# CONFIG_GFS2_FS is not set
1062# CONFIG_OCFS2_FS is not set 1115# CONFIG_OCFS2_FS is not set
1116# CONFIG_BTRFS_FS is not set
1117# CONFIG_NILFS2_FS is not set
1118CONFIG_FILE_LOCKING=y
1119CONFIG_FSNOTIFY=y
1063# CONFIG_DNOTIFY is not set 1120# CONFIG_DNOTIFY is not set
1064CONFIG_INOTIFY=y 1121CONFIG_INOTIFY=y
1065CONFIG_INOTIFY_USER=y 1122CONFIG_INOTIFY_USER=y
@@ -1069,6 +1126,11 @@ CONFIG_INOTIFY_USER=y
1069# CONFIG_FUSE_FS is not set 1126# CONFIG_FUSE_FS is not set
1070 1127
1071# 1128#
1129# Caches
1130#
1131# CONFIG_FSCACHE is not set
1132
1133#
1072# CD-ROM/DVD Filesystems 1134# CD-ROM/DVD Filesystems
1073# 1135#
1074# CONFIG_ISO9660_FS is not set 1136# CONFIG_ISO9660_FS is not set
@@ -1087,13 +1149,9 @@ CONFIG_INOTIFY_USER=y
1087CONFIG_PROC_FS=y 1149CONFIG_PROC_FS=y
1088CONFIG_PROC_SYSCTL=y 1150CONFIG_PROC_SYSCTL=y
1089CONFIG_SYSFS=y 1151CONFIG_SYSFS=y
1090# CONFIG_TMPFS is not set
1091# CONFIG_HUGETLB_PAGE is not set 1152# CONFIG_HUGETLB_PAGE is not set
1092# CONFIG_CONFIGFS_FS is not set 1153# CONFIG_CONFIGFS_FS is not set
1093 1154CONFIG_MISC_FILESYSTEMS=y
1094#
1095# Miscellaneous filesystems
1096#
1097# CONFIG_ADFS_FS is not set 1155# CONFIG_ADFS_FS is not set
1098# CONFIG_AFFS_FS is not set 1156# CONFIG_AFFS_FS is not set
1099# CONFIG_HFS_FS is not set 1157# CONFIG_HFS_FS is not set
@@ -1112,17 +1170,8 @@ CONFIG_JFFS2_ZLIB=y
1112# CONFIG_JFFS2_LZO is not set 1170# CONFIG_JFFS2_LZO is not set
1113CONFIG_JFFS2_RTIME=y 1171CONFIG_JFFS2_RTIME=y
1114# CONFIG_JFFS2_RUBIN is not set 1172# CONFIG_JFFS2_RUBIN is not set
1115CONFIG_YAFFS_FS=m
1116CONFIG_YAFFS_YAFFS1=y
1117# CONFIG_YAFFS_9BYTE_TAGS is not set
1118# CONFIG_YAFFS_DOES_ECC is not set
1119CONFIG_YAFFS_YAFFS2=y
1120CONFIG_YAFFS_AUTO_YAFFS2=y
1121# CONFIG_YAFFS_DISABLE_LAZY_LOAD is not set
1122# CONFIG_YAFFS_DISABLE_WIDE_TNODES is not set
1123# CONFIG_YAFFS_ALWAYS_CHECK_CHUNK_ERASED is not set
1124CONFIG_YAFFS_SHORT_NAMES_IN_RAM=y
1125# CONFIG_CRAMFS is not set 1173# CONFIG_CRAMFS is not set
1174# CONFIG_SQUASHFS is not set
1126# CONFIG_VXFS_FS is not set 1175# CONFIG_VXFS_FS is not set
1127# CONFIG_MINIX_FS is not set 1176# CONFIG_MINIX_FS is not set
1128# CONFIG_OMFS_FS is not set 1177# CONFIG_OMFS_FS is not set
@@ -1141,7 +1190,6 @@ CONFIG_LOCKD=m
1141CONFIG_LOCKD_V4=y 1190CONFIG_LOCKD_V4=y
1142CONFIG_NFS_COMMON=y 1191CONFIG_NFS_COMMON=y
1143CONFIG_SUNRPC=m 1192CONFIG_SUNRPC=m
1144# CONFIG_SUNRPC_REGISTER_V4 is not set
1145# CONFIG_RPCSEC_GSS_KRB5 is not set 1193# CONFIG_RPCSEC_GSS_KRB5 is not set
1146# CONFIG_RPCSEC_GSS_SPKM3 is not set 1194# CONFIG_RPCSEC_GSS_SPKM3 is not set
1147CONFIG_SMB_FS=m 1195CONFIG_SMB_FS=m
@@ -1206,14 +1254,19 @@ CONFIG_ENABLE_WARN_DEPRECATED=y
1206CONFIG_ENABLE_MUST_CHECK=y 1254CONFIG_ENABLE_MUST_CHECK=y
1207CONFIG_FRAME_WARN=1024 1255CONFIG_FRAME_WARN=1024
1208# CONFIG_MAGIC_SYSRQ is not set 1256# CONFIG_MAGIC_SYSRQ is not set
1257# CONFIG_STRIP_ASM_SYMS is not set
1209# CONFIG_UNUSED_SYMBOLS is not set 1258# CONFIG_UNUSED_SYMBOLS is not set
1210CONFIG_DEBUG_FS=y 1259CONFIG_DEBUG_FS=y
1211# CONFIG_HEADERS_CHECK is not set 1260# CONFIG_HEADERS_CHECK is not set
1261CONFIG_DEBUG_SECTION_MISMATCH=y
1212CONFIG_DEBUG_KERNEL=y 1262CONFIG_DEBUG_KERNEL=y
1213CONFIG_DEBUG_SHIRQ=y 1263CONFIG_DEBUG_SHIRQ=y
1214CONFIG_DETECT_SOFTLOCKUP=y 1264CONFIG_DETECT_SOFTLOCKUP=y
1215# CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC is not set 1265# CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC is not set
1216CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC_VALUE=0 1266CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC_VALUE=0
1267CONFIG_DETECT_HUNG_TASK=y
1268# CONFIG_BOOTPARAM_HUNG_TASK_PANIC is not set
1269CONFIG_BOOTPARAM_HUNG_TASK_PANIC_VALUE=0
1217CONFIG_SCHED_DEBUG=y 1270CONFIG_SCHED_DEBUG=y
1218# CONFIG_SCHEDSTATS is not set 1271# CONFIG_SCHEDSTATS is not set
1219# CONFIG_TIMER_STATS is not set 1272# CONFIG_TIMER_STATS is not set
@@ -1221,31 +1274,39 @@ CONFIG_SCHED_DEBUG=y
1221# CONFIG_DEBUG_SLAB is not set 1274# CONFIG_DEBUG_SLAB is not set
1222# CONFIG_DEBUG_SPINLOCK is not set 1275# CONFIG_DEBUG_SPINLOCK is not set
1223# CONFIG_DEBUG_MUTEXES is not set 1276# CONFIG_DEBUG_MUTEXES is not set
1277# CONFIG_DEBUG_LOCK_ALLOC is not set
1278# CONFIG_PROVE_LOCKING is not set
1279# CONFIG_LOCK_STAT is not set
1224# CONFIG_DEBUG_SPINLOCK_SLEEP is not set 1280# CONFIG_DEBUG_SPINLOCK_SLEEP is not set
1225# CONFIG_DEBUG_LOCKING_API_SELFTESTS is not set 1281# CONFIG_DEBUG_LOCKING_API_SELFTESTS is not set
1226# CONFIG_DEBUG_KOBJECT is not set 1282# CONFIG_DEBUG_KOBJECT is not set
1227CONFIG_DEBUG_BUGVERBOSE=y 1283CONFIG_DEBUG_BUGVERBOSE=y
1228CONFIG_DEBUG_INFO=y 1284CONFIG_DEBUG_INFO=y
1229# CONFIG_DEBUG_VM is not set 1285# CONFIG_DEBUG_VM is not set
1286# CONFIG_DEBUG_NOMMU_REGIONS is not set
1230# CONFIG_DEBUG_WRITECOUNT is not set 1287# CONFIG_DEBUG_WRITECOUNT is not set
1231# CONFIG_DEBUG_MEMORY_INIT is not set 1288# CONFIG_DEBUG_MEMORY_INIT is not set
1232# CONFIG_DEBUG_LIST is not set 1289# CONFIG_DEBUG_LIST is not set
1233# CONFIG_DEBUG_SG is not set 1290# CONFIG_DEBUG_SG is not set
1291# CONFIG_DEBUG_NOTIFIERS is not set
1292# CONFIG_DEBUG_CREDENTIALS is not set
1234# CONFIG_FRAME_POINTER is not set 1293# CONFIG_FRAME_POINTER is not set
1235# CONFIG_BOOT_PRINTK_DELAY is not set 1294# CONFIG_BOOT_PRINTK_DELAY is not set
1236# CONFIG_RCU_TORTURE_TEST is not set 1295# CONFIG_RCU_TORTURE_TEST is not set
1237# CONFIG_RCU_CPU_STALL_DETECTOR is not set 1296# CONFIG_RCU_CPU_STALL_DETECTOR is not set
1238# CONFIG_BACKTRACE_SELF_TEST is not set 1297# CONFIG_BACKTRACE_SELF_TEST is not set
1239# CONFIG_DEBUG_BLOCK_EXT_DEVT is not set 1298# CONFIG_DEBUG_BLOCK_EXT_DEVT is not set
1299# CONFIG_DEBUG_FORCE_WEAK_PER_CPU is not set
1240# CONFIG_FAULT_INJECTION is not set 1300# CONFIG_FAULT_INJECTION is not set
1241 1301# CONFIG_PAGE_POISONING is not set
1242# 1302CONFIG_HAVE_FUNCTION_TRACER=y
1243# Tracers 1303CONFIG_HAVE_FUNCTION_GRAPH_TRACER=y
1244# 1304CONFIG_TRACING_SUPPORT=y
1245# CONFIG_SCHED_TRACER is not set 1305# CONFIG_FTRACE is not set
1246# CONFIG_CONTEXT_SWITCH_TRACER is not set 1306# CONFIG_BRANCH_PROFILE_NONE is not set
1247# CONFIG_BOOT_TRACER is not set 1307# CONFIG_PROFILE_ANNOTATED_BRANCHES is not set
1248# CONFIG_DYNAMIC_PRINTK_DEBUG is not set 1308# CONFIG_PROFILE_ALL_BRANCHES is not set
1309# CONFIG_DYNAMIC_DEBUG is not set
1249# CONFIG_SAMPLES is not set 1310# CONFIG_SAMPLES is not set
1250CONFIG_HAVE_ARCH_KGDB=y 1311CONFIG_HAVE_ARCH_KGDB=y
1251# CONFIG_KGDB is not set 1312# CONFIG_KGDB is not set
@@ -1270,6 +1331,7 @@ CONFIG_DEBUG_BFIN_NO_KERN_HWTRACE=y
1270CONFIG_EARLY_PRINTK=y 1331CONFIG_EARLY_PRINTK=y
1271CONFIG_CPLB_INFO=y 1332CONFIG_CPLB_INFO=y
1272CONFIG_ACCESS_CHECK=y 1333CONFIG_ACCESS_CHECK=y
1334# CONFIG_BFIN_ISRAM_SELF_TEST is not set
1273 1335
1274# 1336#
1275# Security options 1337# Security options
@@ -1278,14 +1340,14 @@ CONFIG_ACCESS_CHECK=y
1278CONFIG_SECURITY=y 1340CONFIG_SECURITY=y
1279# CONFIG_SECURITYFS is not set 1341# CONFIG_SECURITYFS is not set
1280# CONFIG_SECURITY_NETWORK is not set 1342# CONFIG_SECURITY_NETWORK is not set
1343# CONFIG_SECURITY_PATH is not set
1281# CONFIG_SECURITY_FILE_CAPABILITIES is not set 1344# CONFIG_SECURITY_FILE_CAPABILITIES is not set
1282CONFIG_SECURITY_DEFAULT_MMAP_MIN_ADDR=0 1345# CONFIG_SECURITY_TOMOYO is not set
1283CONFIG_CRYPTO=y 1346CONFIG_CRYPTO=y
1284 1347
1285# 1348#
1286# Crypto core or helper 1349# Crypto core or helper
1287# 1350#
1288# CONFIG_CRYPTO_FIPS is not set
1289# CONFIG_CRYPTO_MANAGER is not set 1351# CONFIG_CRYPTO_MANAGER is not set
1290# CONFIG_CRYPTO_MANAGER2 is not set 1352# CONFIG_CRYPTO_MANAGER2 is not set
1291# CONFIG_CRYPTO_GF128MUL is not set 1353# CONFIG_CRYPTO_GF128MUL is not set
@@ -1317,11 +1379,13 @@ CONFIG_CRYPTO=y
1317# 1379#
1318# CONFIG_CRYPTO_HMAC is not set 1380# CONFIG_CRYPTO_HMAC is not set
1319# CONFIG_CRYPTO_XCBC is not set 1381# CONFIG_CRYPTO_XCBC is not set
1382# CONFIG_CRYPTO_VMAC is not set
1320 1383
1321# 1384#
1322# Digest 1385# Digest
1323# 1386#
1324# CONFIG_CRYPTO_CRC32C is not set 1387# CONFIG_CRYPTO_CRC32C is not set
1388# CONFIG_CRYPTO_GHASH is not set
1325# CONFIG_CRYPTO_MD4 is not set 1389# CONFIG_CRYPTO_MD4 is not set
1326# CONFIG_CRYPTO_MD5 is not set 1390# CONFIG_CRYPTO_MD5 is not set
1327# CONFIG_CRYPTO_MICHAEL_MIC is not set 1391# CONFIG_CRYPTO_MICHAEL_MIC is not set
@@ -1358,6 +1422,7 @@ CONFIG_CRYPTO=y
1358# Compression 1422# Compression
1359# 1423#
1360# CONFIG_CRYPTO_DEFLATE is not set 1424# CONFIG_CRYPTO_DEFLATE is not set
1425# CONFIG_CRYPTO_ZLIB is not set
1361# CONFIG_CRYPTO_LZO is not set 1426# CONFIG_CRYPTO_LZO is not set
1362 1427
1363# 1428#
@@ -1365,11 +1430,13 @@ CONFIG_CRYPTO=y
1365# 1430#
1366# CONFIG_CRYPTO_ANSI_CPRNG is not set 1431# CONFIG_CRYPTO_ANSI_CPRNG is not set
1367CONFIG_CRYPTO_HW=y 1432CONFIG_CRYPTO_HW=y
1433# CONFIG_BINARY_PRINTF is not set
1368 1434
1369# 1435#
1370# Library routines 1436# Library routines
1371# 1437#
1372CONFIG_BITREVERSE=y 1438CONFIG_BITREVERSE=y
1439CONFIG_GENERIC_FIND_LAST_BIT=y
1373CONFIG_CRC_CCITT=m 1440CONFIG_CRC_CCITT=m
1374# CONFIG_CRC16 is not set 1441# CONFIG_CRC16 is not set
1375# CONFIG_CRC_T10DIF is not set 1442# CONFIG_CRC_T10DIF is not set
@@ -1379,6 +1446,8 @@ CONFIG_CRC32=y
1379# CONFIG_LIBCRC32C is not set 1446# CONFIG_LIBCRC32C is not set
1380CONFIG_ZLIB_INFLATE=y 1447CONFIG_ZLIB_INFLATE=y
1381CONFIG_ZLIB_DEFLATE=m 1448CONFIG_ZLIB_DEFLATE=m
1449CONFIG_DECOMPRESS_GZIP=y
1382CONFIG_HAS_IOMEM=y 1450CONFIG_HAS_IOMEM=y
1383CONFIG_HAS_IOPORT=y 1451CONFIG_HAS_IOPORT=y
1384CONFIG_HAS_DMA=y 1452CONFIG_HAS_DMA=y
1453CONFIG_NLATTR=y
diff --git a/arch/blackfin/configs/BF537-STAMP_defconfig b/arch/blackfin/configs/BF537-STAMP_defconfig
index 2908595b67c5..7596cf7673f1 100644
--- a/arch/blackfin/configs/BF537-STAMP_defconfig
+++ b/arch/blackfin/configs/BF537-STAMP_defconfig
@@ -1,22 +1,27 @@
1# 1#
2# Automatically generated make config: don't edit 2# Automatically generated make config: don't edit
3# Linux kernel version: 2.6.28.10 3# Linux kernel version: 2.6.32.2
4# Thu May 21 05:50:01 2009
5# 4#
6# CONFIG_MMU is not set 5# CONFIG_MMU is not set
7# CONFIG_FPU is not set 6# CONFIG_FPU is not set
8CONFIG_RWSEM_GENERIC_SPINLOCK=y 7CONFIG_RWSEM_GENERIC_SPINLOCK=y
9# CONFIG_RWSEM_XCHGADD_ALGORITHM is not set 8# CONFIG_RWSEM_XCHGADD_ALGORITHM is not set
10CONFIG_BLACKFIN=y 9CONFIG_BLACKFIN=y
10CONFIG_GENERIC_CSUM=y
11CONFIG_GENERIC_BUG=y
11CONFIG_ZONE_DMA=y 12CONFIG_ZONE_DMA=y
12CONFIG_GENERIC_FIND_NEXT_BIT=y 13CONFIG_GENERIC_FIND_NEXT_BIT=y
13CONFIG_GENERIC_HWEIGHT=y
14CONFIG_GENERIC_HARDIRQS=y 14CONFIG_GENERIC_HARDIRQS=y
15CONFIG_GENERIC_IRQ_PROBE=y 15CONFIG_GENERIC_IRQ_PROBE=y
16CONFIG_GENERIC_HARDIRQS_NO__DO_IRQ=y
16CONFIG_GENERIC_GPIO=y 17CONFIG_GENERIC_GPIO=y
17CONFIG_FORCE_MAX_ZONEORDER=14 18CONFIG_FORCE_MAX_ZONEORDER=14
18CONFIG_GENERIC_CALIBRATE_DELAY=y 19CONFIG_GENERIC_CALIBRATE_DELAY=y
20CONFIG_LOCKDEP_SUPPORT=y
21CONFIG_STACKTRACE_SUPPORT=y
22CONFIG_TRACE_IRQFLAGS_SUPPORT=y
19CONFIG_DEFCONFIG_LIST="/lib/modules/$UNAME_RELEASE/.config" 23CONFIG_DEFCONFIG_LIST="/lib/modules/$UNAME_RELEASE/.config"
24CONFIG_CONSTRUCTORS=y
20 25
21# 26#
22# General setup 27# General setup
@@ -26,22 +31,41 @@ CONFIG_BROKEN_ON_SMP=y
26CONFIG_INIT_ENV_ARG_LIMIT=32 31CONFIG_INIT_ENV_ARG_LIMIT=32
27CONFIG_LOCALVERSION="" 32CONFIG_LOCALVERSION=""
28CONFIG_LOCALVERSION_AUTO=y 33CONFIG_LOCALVERSION_AUTO=y
34CONFIG_HAVE_KERNEL_GZIP=y
35CONFIG_HAVE_KERNEL_BZIP2=y
36CONFIG_HAVE_KERNEL_LZMA=y
37CONFIG_KERNEL_GZIP=y
38# CONFIG_KERNEL_BZIP2 is not set
39# CONFIG_KERNEL_LZMA is not set
29CONFIG_SYSVIPC=y 40CONFIG_SYSVIPC=y
30CONFIG_SYSVIPC_SYSCTL=y 41CONFIG_SYSVIPC_SYSCTL=y
31# CONFIG_POSIX_MQUEUE is not set 42# CONFIG_POSIX_MQUEUE is not set
32# CONFIG_BSD_PROCESS_ACCT is not set 43# CONFIG_BSD_PROCESS_ACCT is not set
33# CONFIG_TASKSTATS is not set 44# CONFIG_TASKSTATS is not set
34# CONFIG_AUDIT is not set 45# CONFIG_AUDIT is not set
46
47#
48# RCU Subsystem
49#
50CONFIG_TREE_RCU=y
51# CONFIG_TREE_PREEMPT_RCU is not set
52# CONFIG_RCU_TRACE is not set
53CONFIG_RCU_FANOUT=32
54# CONFIG_RCU_FANOUT_EXACT is not set
55# CONFIG_TREE_RCU_TRACE is not set
35CONFIG_IKCONFIG=y 56CONFIG_IKCONFIG=y
36CONFIG_IKCONFIG_PROC=y 57CONFIG_IKCONFIG_PROC=y
37CONFIG_LOG_BUF_SHIFT=14 58CONFIG_LOG_BUF_SHIFT=14
38# CONFIG_CGROUPS is not set
39# CONFIG_GROUP_SCHED is not set 59# CONFIG_GROUP_SCHED is not set
60# CONFIG_CGROUPS is not set
40# CONFIG_SYSFS_DEPRECATED_V2 is not set 61# CONFIG_SYSFS_DEPRECATED_V2 is not set
41# CONFIG_RELAY is not set 62# CONFIG_RELAY is not set
42# CONFIG_NAMESPACES is not set 63# CONFIG_NAMESPACES is not set
43CONFIG_BLK_DEV_INITRD=y 64CONFIG_BLK_DEV_INITRD=y
44CONFIG_INITRAMFS_SOURCE="" 65CONFIG_INITRAMFS_SOURCE=""
66CONFIG_RD_GZIP=y
67# CONFIG_RD_BZIP2 is not set
68# CONFIG_RD_LZMA is not set
45# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set 69# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
46CONFIG_SYSCTL=y 70CONFIG_SYSCTL=y
47CONFIG_ANON_INODES=y 71CONFIG_ANON_INODES=y
@@ -62,6 +86,10 @@ CONFIG_EPOLL=y
62# CONFIG_TIMERFD is not set 86# CONFIG_TIMERFD is not set
63# CONFIG_EVENTFD is not set 87# CONFIG_EVENTFD is not set
64# CONFIG_AIO is not set 88# CONFIG_AIO is not set
89
90#
91# Kernel Performance Events And Counters
92#
65CONFIG_VM_EVENT_COUNTERS=y 93CONFIG_VM_EVENT_COUNTERS=y
66CONFIG_COMPAT_BRK=y 94CONFIG_COMPAT_BRK=y
67CONFIG_SLAB=y 95CONFIG_SLAB=y
@@ -69,11 +97,15 @@ CONFIG_SLAB=y
69# CONFIG_SLOB is not set 97# CONFIG_SLOB is not set
70CONFIG_MMAP_ALLOW_UNINITIALIZED=y 98CONFIG_MMAP_ALLOW_UNINITIALIZED=y
71# CONFIG_PROFILING is not set 99# CONFIG_PROFILING is not set
72# CONFIG_MARKERS is not set
73CONFIG_HAVE_OPROFILE=y 100CONFIG_HAVE_OPROFILE=y
101
102#
103# GCOV-based kernel profiling
104#
105# CONFIG_GCOV_KERNEL is not set
106# CONFIG_SLOW_WORK is not set
74# CONFIG_HAVE_GENERIC_DMA_COHERENT is not set 107# CONFIG_HAVE_GENERIC_DMA_COHERENT is not set
75CONFIG_SLABINFO=y 108CONFIG_SLABINFO=y
76CONFIG_TINY_SHMEM=y
77CONFIG_BASE_SMALL=0 109CONFIG_BASE_SMALL=0
78CONFIG_MODULES=y 110CONFIG_MODULES=y
79# CONFIG_MODULE_FORCE_LOAD is not set 111# CONFIG_MODULE_FORCE_LOAD is not set
@@ -81,11 +113,8 @@ CONFIG_MODULE_UNLOAD=y
81# CONFIG_MODULE_FORCE_UNLOAD is not set 113# CONFIG_MODULE_FORCE_UNLOAD is not set
82# CONFIG_MODVERSIONS is not set 114# CONFIG_MODVERSIONS is not set
83# CONFIG_MODULE_SRCVERSION_ALL is not set 115# CONFIG_MODULE_SRCVERSION_ALL is not set
84CONFIG_KMOD=y
85CONFIG_BLOCK=y 116CONFIG_BLOCK=y
86# CONFIG_LBD is not set 117# CONFIG_LBDAF is not set
87# CONFIG_BLK_DEV_IO_TRACE is not set
88# CONFIG_LSF is not set
89# CONFIG_BLK_DEV_BSG is not set 118# CONFIG_BLK_DEV_BSG is not set
90# CONFIG_BLK_DEV_INTEGRITY is not set 119# CONFIG_BLK_DEV_INTEGRITY is not set
91 120
@@ -101,7 +130,6 @@ CONFIG_DEFAULT_AS=y
101# CONFIG_DEFAULT_CFQ is not set 130# CONFIG_DEFAULT_CFQ is not set
102# CONFIG_DEFAULT_NOOP is not set 131# CONFIG_DEFAULT_NOOP is not set
103CONFIG_DEFAULT_IOSCHED="anticipatory" 132CONFIG_DEFAULT_IOSCHED="anticipatory"
104CONFIG_CLASSIC_RCU=y
105# CONFIG_PREEMPT_NONE is not set 133# CONFIG_PREEMPT_NONE is not set
106CONFIG_PREEMPT_VOLUNTARY=y 134CONFIG_PREEMPT_VOLUNTARY=y
107# CONFIG_PREEMPT is not set 135# CONFIG_PREEMPT is not set
@@ -132,15 +160,15 @@ CONFIG_FREEZER=y
132CONFIG_BF537=y 160CONFIG_BF537=y
133# CONFIG_BF538 is not set 161# CONFIG_BF538 is not set
134# CONFIG_BF539 is not set 162# CONFIG_BF539 is not set
135# CONFIG_BF542 is not set 163# CONFIG_BF542_std is not set
136# CONFIG_BF542M is not set 164# CONFIG_BF542M is not set
137# CONFIG_BF544 is not set 165# CONFIG_BF544_std is not set
138# CONFIG_BF544M is not set 166# CONFIG_BF544M is not set
139# CONFIG_BF547 is not set 167# CONFIG_BF547_std is not set
140# CONFIG_BF547M is not set 168# CONFIG_BF547M is not set
141# CONFIG_BF548 is not set 169# CONFIG_BF548_std is not set
142# CONFIG_BF548M is not set 170# CONFIG_BF548M is not set
143# CONFIG_BF549 is not set 171# CONFIG_BF549_std is not set
144# CONFIG_BF549M is not set 172# CONFIG_BF549M is not set
145# CONFIG_BF561 is not set 173# CONFIG_BF561 is not set
146CONFIG_BF_REV_MIN=2 174CONFIG_BF_REV_MIN=2
@@ -184,7 +212,8 @@ CONFIG_IRQ_MEM_DMA1=13
184CONFIG_IRQ_WATCH=13 212CONFIG_IRQ_WATCH=13
185CONFIG_IRQ_SPI=10 213CONFIG_IRQ_SPI=10
186CONFIG_BFIN537_STAMP=y 214CONFIG_BFIN537_STAMP=y
187# CONFIG_BFIN537_BLUETECHNIX_CM is not set 215# CONFIG_BFIN537_BLUETECHNIX_CM_E is not set
216# CONFIG_BFIN537_BLUETECHNIX_CM_U is not set
188# CONFIG_BFIN537_BLUETECHNIX_TCM is not set 217# CONFIG_BFIN537_BLUETECHNIX_TCM is not set
189# CONFIG_PNAV10 is not set 218# CONFIG_PNAV10 is not set
190# CONFIG_CAMSIG_MINOTAUR is not set 219# CONFIG_CAMSIG_MINOTAUR is not set
@@ -235,7 +264,7 @@ CONFIG_GENERIC_TIME=y
235CONFIG_GENERIC_CLOCKEVENTS=y 264CONFIG_GENERIC_CLOCKEVENTS=y
236# CONFIG_TICKSOURCE_GPTMR0 is not set 265# CONFIG_TICKSOURCE_GPTMR0 is not set
237CONFIG_TICKSOURCE_CORETMR=y 266CONFIG_TICKSOURCE_CORETMR=y
238# CONFIG_CYCLES_CLOCKSOURCE is not set 267CONFIG_CYCLES_CLOCKSOURCE=y
239# CONFIG_GPTMR0_CLOCKSOURCE is not set 268# CONFIG_GPTMR0_CLOCKSOURCE is not set
240CONFIG_TICK_ONESHOT=y 269CONFIG_TICK_ONESHOT=y
241# CONFIG_NO_HZ is not set 270# CONFIG_NO_HZ is not set
@@ -287,7 +316,6 @@ CONFIG_FLATMEM=y
287CONFIG_FLAT_NODE_MEM_MAP=y 316CONFIG_FLAT_NODE_MEM_MAP=y
288CONFIG_PAGEFLAGS_EXTENDED=y 317CONFIG_PAGEFLAGS_EXTENDED=y
289CONFIG_SPLIT_PTLOCK_CPUS=4 318CONFIG_SPLIT_PTLOCK_CPUS=4
290# CONFIG_RESOURCES_64BIT is not set
291# CONFIG_PHYS_ADDR_T_64BIT is not set 319# CONFIG_PHYS_ADDR_T_64BIT is not set
292CONFIG_ZONE_DMA_FLAG=1 320CONFIG_ZONE_DMA_FLAG=1
293CONFIG_VIRT_TO_BUS=y 321CONFIG_VIRT_TO_BUS=y
@@ -296,16 +324,18 @@ CONFIG_BFIN_GPTIMERS=m
296# CONFIG_DMA_UNCACHED_4M is not set 324# CONFIG_DMA_UNCACHED_4M is not set
297# CONFIG_DMA_UNCACHED_2M is not set 325# CONFIG_DMA_UNCACHED_2M is not set
298CONFIG_DMA_UNCACHED_1M=y 326CONFIG_DMA_UNCACHED_1M=y
327# CONFIG_DMA_UNCACHED_512K is not set
328# CONFIG_DMA_UNCACHED_256K is not set
329# CONFIG_DMA_UNCACHED_128K is not set
299# CONFIG_DMA_UNCACHED_NONE is not set 330# CONFIG_DMA_UNCACHED_NONE is not set
300 331
301# 332#
302# Cache Support 333# Cache Support
303# 334#
304CONFIG_BFIN_ICACHE=y 335CONFIG_BFIN_ICACHE=y
305# CONFIG_BFIN_ICACHE_LOCK is not set 336CONFIG_BFIN_EXTMEM_ICACHEABLE=y
306CONFIG_BFIN_DCACHE=y 337CONFIG_BFIN_DCACHE=y
307# CONFIG_BFIN_DCACHE_BANKA is not set 338# CONFIG_BFIN_DCACHE_BANKA is not set
308CONFIG_BFIN_EXTMEM_ICACHEABLE=y
309CONFIG_BFIN_EXTMEM_DCACHEABLE=y 339CONFIG_BFIN_EXTMEM_DCACHEABLE=y
310CONFIG_BFIN_EXTMEM_WRITEBACK=y 340CONFIG_BFIN_EXTMEM_WRITEBACK=y
311# CONFIG_BFIN_EXTMEM_WRITETHROUGH is not set 341# CONFIG_BFIN_EXTMEM_WRITETHROUGH is not set
@@ -316,7 +346,7 @@ CONFIG_BFIN_EXTMEM_WRITEBACK=y
316# CONFIG_MPU is not set 346# CONFIG_MPU is not set
317 347
318# 348#
319# Asynchonous Memory Configuration 349# Asynchronous Memory Configuration
320# 350#
321 351
322# 352#
@@ -362,6 +392,7 @@ CONFIG_PM=y
362CONFIG_PM_SLEEP=y 392CONFIG_PM_SLEEP=y
363CONFIG_SUSPEND=y 393CONFIG_SUSPEND=y
364CONFIG_SUSPEND_FREEZER=y 394CONFIG_SUSPEND_FREEZER=y
395# CONFIG_PM_RUNTIME is not set
365CONFIG_ARCH_SUSPEND_POSSIBLE=y 396CONFIG_ARCH_SUSPEND_POSSIBLE=y
366CONFIG_PM_BFIN_SLEEP_DEEPER=y 397CONFIG_PM_BFIN_SLEEP_DEEPER=y
367# CONFIG_PM_BFIN_SLEEP is not set 398# CONFIG_PM_BFIN_SLEEP is not set
@@ -384,11 +415,6 @@ CONFIG_NET=y
384CONFIG_PACKET=y 415CONFIG_PACKET=y
385# CONFIG_PACKET_MMAP is not set 416# CONFIG_PACKET_MMAP is not set
386CONFIG_UNIX=y 417CONFIG_UNIX=y
387CONFIG_XFRM=y
388# CONFIG_XFRM_USER is not set
389# CONFIG_XFRM_SUB_POLICY is not set
390# CONFIG_XFRM_MIGRATE is not set
391# CONFIG_XFRM_STATISTICS is not set
392# CONFIG_NET_KEY is not set 418# CONFIG_NET_KEY is not set
393CONFIG_INET=y 419CONFIG_INET=y
394# CONFIG_IP_MULTICAST is not set 420# CONFIG_IP_MULTICAST is not set
@@ -412,7 +438,6 @@ CONFIG_IP_PNP=y
412# CONFIG_INET_XFRM_MODE_BEET is not set 438# CONFIG_INET_XFRM_MODE_BEET is not set
413# CONFIG_INET_LRO is not set 439# CONFIG_INET_LRO is not set
414# CONFIG_INET_DIAG is not set 440# CONFIG_INET_DIAG is not set
415CONFIG_INET_TCP_DIAG=y
416# CONFIG_TCP_CONG_ADVANCED is not set 441# CONFIG_TCP_CONG_ADVANCED is not set
417CONFIG_TCP_CONG_CUBIC=y 442CONFIG_TCP_CONG_CUBIC=y
418CONFIG_DEFAULT_TCP_CONG="cubic" 443CONFIG_DEFAULT_TCP_CONG="cubic"
@@ -423,6 +448,7 @@ CONFIG_DEFAULT_TCP_CONG="cubic"
423# CONFIG_NETFILTER is not set 448# CONFIG_NETFILTER is not set
424# CONFIG_IP_DCCP is not set 449# CONFIG_IP_DCCP is not set
425# CONFIG_IP_SCTP is not set 450# CONFIG_IP_SCTP is not set
451# CONFIG_RDS is not set
426# CONFIG_TIPC is not set 452# CONFIG_TIPC is not set
427# CONFIG_ATM is not set 453# CONFIG_ATM is not set
428# CONFIG_BRIDGE is not set 454# CONFIG_BRIDGE is not set
@@ -436,14 +462,34 @@ CONFIG_DEFAULT_TCP_CONG="cubic"
436# CONFIG_LAPB is not set 462# CONFIG_LAPB is not set
437# CONFIG_ECONET is not set 463# CONFIG_ECONET is not set
438# CONFIG_WAN_ROUTER is not set 464# CONFIG_WAN_ROUTER is not set
465# CONFIG_PHONET is not set
466# CONFIG_IEEE802154 is not set
439# CONFIG_NET_SCHED is not set 467# CONFIG_NET_SCHED is not set
468# CONFIG_DCB is not set
440 469
441# 470#
442# Network testing 471# Network testing
443# 472#
444# CONFIG_NET_PKTGEN is not set 473# CONFIG_NET_PKTGEN is not set
445# CONFIG_HAMRADIO is not set 474# CONFIG_HAMRADIO is not set
446# CONFIG_CAN is not set 475CONFIG_CAN=m
476CONFIG_CAN_RAW=m
477CONFIG_CAN_BCM=m
478
479#
480# CAN Device Drivers
481#
482# CONFIG_CAN_VCAN is not set
483CONFIG_CAN_DEV=m
484# CONFIG_CAN_CALC_BITTIMING is not set
485CONFIG_CAN_BFIN=m
486# CONFIG_CAN_SJA1000 is not set
487
488#
489# CAN USB interfaces
490#
491# CONFIG_CAN_EMS_USB is not set
492# CONFIG_CAN_DEBUG_DEVICES is not set
447CONFIG_IRDA=m 493CONFIG_IRDA=m
448 494
449# 495#
@@ -483,13 +529,8 @@ CONFIG_SIR_BFIN_DMA=y
483# 529#
484# CONFIG_BT is not set 530# CONFIG_BT is not set
485# CONFIG_AF_RXRPC is not set 531# CONFIG_AF_RXRPC is not set
486# CONFIG_PHONET is not set 532# CONFIG_WIRELESS is not set
487CONFIG_WIRELESS=y 533# CONFIG_WIMAX is not set
488# CONFIG_CFG80211 is not set
489CONFIG_WIRELESS_OLD_REGULATORY=y
490# CONFIG_WIRELESS_EXT is not set
491# CONFIG_MAC80211 is not set
492# CONFIG_IEEE80211 is not set
493# CONFIG_RFKILL is not set 534# CONFIG_RFKILL is not set
494# CONFIG_NET_9P is not set 535# CONFIG_NET_9P is not set
495 536
@@ -510,6 +551,7 @@ CONFIG_PREVENT_FIRMWARE_BUILD=y
510# CONFIG_CONNECTOR is not set 551# CONFIG_CONNECTOR is not set
511CONFIG_MTD=y 552CONFIG_MTD=y
512# CONFIG_MTD_DEBUG is not set 553# CONFIG_MTD_DEBUG is not set
554# CONFIG_MTD_TESTS is not set
513# CONFIG_MTD_CONCAT is not set 555# CONFIG_MTD_CONCAT is not set
514CONFIG_MTD_PARTITIONS=y 556CONFIG_MTD_PARTITIONS=y
515# CONFIG_MTD_REDBOOT_PARTS is not set 557# CONFIG_MTD_REDBOOT_PARTS is not set
@@ -568,6 +610,7 @@ CONFIG_MTD_PHYSMAP=m
568# 610#
569# CONFIG_MTD_DATAFLASH is not set 611# CONFIG_MTD_DATAFLASH is not set
570# CONFIG_MTD_M25P80 is not set 612# CONFIG_MTD_M25P80 is not set
613# CONFIG_MTD_SST25L is not set
571# CONFIG_MTD_SLRAM is not set 614# CONFIG_MTD_SLRAM is not set
572# CONFIG_MTD_PHRAM is not set 615# CONFIG_MTD_PHRAM is not set
573# CONFIG_MTD_MTDRAM is not set 616# CONFIG_MTD_MTDRAM is not set
@@ -583,6 +626,11 @@ CONFIG_MTD_PHYSMAP=m
583# CONFIG_MTD_ONENAND is not set 626# CONFIG_MTD_ONENAND is not set
584 627
585# 628#
629# LPDDR flash memory drivers
630#
631# CONFIG_MTD_LPDDR is not set
632
633#
586# UBI - Unsorted block images 634# UBI - Unsorted block images
587# 635#
588# CONFIG_MTD_UBI is not set 636# CONFIG_MTD_UBI is not set
@@ -599,10 +647,20 @@ CONFIG_BLK_DEV_RAM_SIZE=4096
599# CONFIG_ATA_OVER_ETH is not set 647# CONFIG_ATA_OVER_ETH is not set
600# CONFIG_BLK_DEV_HD is not set 648# CONFIG_BLK_DEV_HD is not set
601CONFIG_MISC_DEVICES=y 649CONFIG_MISC_DEVICES=y
602# CONFIG_EEPROM_93CX6 is not set 650# CONFIG_AD525X_DPOT is not set
603# CONFIG_ICS932S401 is not set 651# CONFIG_ICS932S401 is not set
604# CONFIG_ENCLOSURE_SERVICES is not set 652# CONFIG_ENCLOSURE_SERVICES is not set
653# CONFIG_ISL29003 is not set
605# CONFIG_C2PORT is not set 654# CONFIG_C2PORT is not set
655
656#
657# EEPROM support
658#
659# CONFIG_EEPROM_AT24 is not set
660# CONFIG_EEPROM_AT25 is not set
661# CONFIG_EEPROM_LEGACY is not set
662# CONFIG_EEPROM_MAX6875 is not set
663# CONFIG_EEPROM_93CX6 is not set
606CONFIG_HAVE_IDE=y 664CONFIG_HAVE_IDE=y
607# CONFIG_IDE is not set 665# CONFIG_IDE is not set
608 666
@@ -637,6 +695,9 @@ CONFIG_SMSC_PHY=y
637# CONFIG_BROADCOM_PHY is not set 695# CONFIG_BROADCOM_PHY is not set
638# CONFIG_ICPLUS_PHY is not set 696# CONFIG_ICPLUS_PHY is not set
639# CONFIG_REALTEK_PHY is not set 697# CONFIG_REALTEK_PHY is not set
698# CONFIG_NATIONAL_PHY is not set
699# CONFIG_STE10XP is not set
700# CONFIG_LSI_ET1011C_PHY is not set
640# CONFIG_FIXED_PHY is not set 701# CONFIG_FIXED_PHY is not set
641# CONFIG_MDIO_BITBANG is not set 702# CONFIG_MDIO_BITBANG is not set
642CONFIG_NET_ETHERNET=y 703CONFIG_NET_ETHERNET=y
@@ -647,9 +708,12 @@ CONFIG_BFIN_TX_DESC_NUM=10
647CONFIG_BFIN_RX_DESC_NUM=20 708CONFIG_BFIN_RX_DESC_NUM=20
648# CONFIG_BFIN_MAC_RMII is not set 709# CONFIG_BFIN_MAC_RMII is not set
649# CONFIG_SMC91X is not set 710# CONFIG_SMC91X is not set
650# CONFIG_SMSC911X is not set
651# CONFIG_DM9000 is not set 711# CONFIG_DM9000 is not set
652# CONFIG_ENC28J60 is not set 712# CONFIG_ENC28J60 is not set
713# CONFIG_ETHOC is not set
714# CONFIG_SMSC911X is not set
715# CONFIG_DNET is not set
716# CONFIG_ADF702X is not set
653# CONFIG_IBM_NEW_EMAC_ZMII is not set 717# CONFIG_IBM_NEW_EMAC_ZMII is not set
654# CONFIG_IBM_NEW_EMAC_RGMII is not set 718# CONFIG_IBM_NEW_EMAC_RGMII is not set
655# CONFIG_IBM_NEW_EMAC_TAH is not set 719# CONFIG_IBM_NEW_EMAC_TAH is not set
@@ -658,15 +722,16 @@ CONFIG_BFIN_RX_DESC_NUM=20
658# CONFIG_IBM_NEW_EMAC_MAL_CLR_ICINTSTAT is not set 722# CONFIG_IBM_NEW_EMAC_MAL_CLR_ICINTSTAT is not set
659# CONFIG_IBM_NEW_EMAC_MAL_COMMON_ERR is not set 723# CONFIG_IBM_NEW_EMAC_MAL_COMMON_ERR is not set
660# CONFIG_B44 is not set 724# CONFIG_B44 is not set
725# CONFIG_KS8842 is not set
726# CONFIG_KS8851 is not set
727# CONFIG_KS8851_MLL is not set
661# CONFIG_NETDEV_1000 is not set 728# CONFIG_NETDEV_1000 is not set
662# CONFIG_NETDEV_10000 is not set 729# CONFIG_NETDEV_10000 is not set
730# CONFIG_WLAN is not set
663 731
664# 732#
665# Wireless LAN 733# Enable WiMAX (Networking options) to see the WiMAX drivers
666# 734#
667# CONFIG_WLAN_PRE80211 is not set
668# CONFIG_WLAN_80211 is not set
669# CONFIG_IWLWIFI_LEDS is not set
670# CONFIG_WAN is not set 735# CONFIG_WAN is not set
671# CONFIG_PPP is not set 736# CONFIG_PPP is not set
672# CONFIG_SLIP is not set 737# CONFIG_SLIP is not set
@@ -701,7 +766,10 @@ CONFIG_INPUT_EVDEV=m
701# CONFIG_INPUT_TOUCHSCREEN is not set 766# CONFIG_INPUT_TOUCHSCREEN is not set
702CONFIG_INPUT_MISC=y 767CONFIG_INPUT_MISC=y
703# CONFIG_INPUT_UINPUT is not set 768# CONFIG_INPUT_UINPUT is not set
704CONFIG_CONFIG_INPUT_PCF8574=m 769# CONFIG_INPUT_GPIO_ROTARY_ENCODER is not set
770# CONFIG_INPUT_AD714X is not set
771# CONFIG_INPUT_ADXL34X is not set
772# CONFIG_INPUT_PCF8574 is not set
705 773
706# 774#
707# Hardware I/O ports 775# Hardware I/O ports
@@ -712,16 +780,13 @@ CONFIG_CONFIG_INPUT_PCF8574=m
712# 780#
713# Character devices 781# Character devices
714# 782#
715# CONFIG_AD9960 is not set
716CONFIG_BFIN_DMA_INTERFACE=m 783CONFIG_BFIN_DMA_INTERFACE=m
717# CONFIG_BFIN_PPI is not set 784# CONFIG_BFIN_PPI is not set
718# CONFIG_BFIN_PPIFCD is not set 785# CONFIG_BFIN_PPIFCD is not set
719# CONFIG_BFIN_SIMPLE_TIMER is not set 786# CONFIG_BFIN_SIMPLE_TIMER is not set
720# CONFIG_BFIN_SPI_ADC is not set 787# CONFIG_BFIN_SPI_ADC is not set
721CONFIG_BFIN_SPORT=m 788CONFIG_BFIN_SPORT=m
722# CONFIG_BFIN_TIMER_LATENCY is not set
723# CONFIG_BFIN_TWI_LCD is not set 789# CONFIG_BFIN_TWI_LCD is not set
724CONFIG_SIMPLE_GPIO=m
725# CONFIG_VT is not set 790# CONFIG_VT is not set
726# CONFIG_DEVKMEM is not set 791# CONFIG_DEVKMEM is not set
727CONFIG_BFIN_JTAG_COMM=m 792CONFIG_BFIN_JTAG_COMM=m
@@ -735,6 +800,7 @@ CONFIG_BFIN_JTAG_COMM=m
735# 800#
736# Non-8250 serial port support 801# Non-8250 serial port support
737# 802#
803# CONFIG_SERIAL_MAX3100 is not set
738CONFIG_SERIAL_BFIN=y 804CONFIG_SERIAL_BFIN=y
739CONFIG_SERIAL_BFIN_CONSOLE=y 805CONFIG_SERIAL_BFIN_CONSOLE=y
740CONFIG_SERIAL_BFIN_DMA=y 806CONFIG_SERIAL_BFIN_DMA=y
@@ -746,17 +812,8 @@ CONFIG_SERIAL_CORE=y
746CONFIG_SERIAL_CORE_CONSOLE=y 812CONFIG_SERIAL_CORE_CONSOLE=y
747# CONFIG_SERIAL_BFIN_SPORT is not set 813# CONFIG_SERIAL_BFIN_SPORT is not set
748CONFIG_UNIX98_PTYS=y 814CONFIG_UNIX98_PTYS=y
815# CONFIG_DEVPTS_MULTIPLE_INSTANCES is not set
749# CONFIG_LEGACY_PTYS is not set 816# CONFIG_LEGACY_PTYS is not set
750
751#
752# CAN, the car bus and industrial fieldbus
753#
754CONFIG_CAN4LINUX=y
755
756#
757# linux embedded drivers
758#
759CONFIG_CAN_BLACKFIN=m
760# CONFIG_IPMI_HANDLER is not set 817# CONFIG_IPMI_HANDLER is not set
761# CONFIG_HW_RANDOM is not set 818# CONFIG_HW_RANDOM is not set
762# CONFIG_R3964 is not set 819# CONFIG_R3964 is not set
@@ -764,6 +821,7 @@ CONFIG_CAN_BLACKFIN=m
764# CONFIG_TCG_TPM is not set 821# CONFIG_TCG_TPM is not set
765CONFIG_I2C=m 822CONFIG_I2C=m
766CONFIG_I2C_BOARDINFO=y 823CONFIG_I2C_BOARDINFO=y
824CONFIG_I2C_COMPAT=y
767CONFIG_I2C_CHARDEV=m 825CONFIG_I2C_CHARDEV=m
768CONFIG_I2C_HELPER_AUTO=y 826CONFIG_I2C_HELPER_AUTO=y
769 827
@@ -796,14 +854,6 @@ CONFIG_I2C_BLACKFIN_TWI_CLK_KHZ=100
796# Miscellaneous I2C Chip support 854# Miscellaneous I2C Chip support
797# 855#
798# CONFIG_DS1682 is not set 856# CONFIG_DS1682 is not set
799# CONFIG_EEPROM_AT24 is not set
800CONFIG_SENSORS_AD5252=m
801# CONFIG_EEPROM_LEGACY is not set
802# CONFIG_SENSORS_PCF8574 is not set
803# CONFIG_PCF8575 is not set
804# CONFIG_SENSORS_PCA9539 is not set
805# CONFIG_SENSORS_PCF8591 is not set
806# CONFIG_SENSORS_MAX6875 is not set
807# CONFIG_SENSORS_TSL2550 is not set 857# CONFIG_SENSORS_TSL2550 is not set
808# CONFIG_I2C_DEBUG_CORE is not set 858# CONFIG_I2C_DEBUG_CORE is not set
809# CONFIG_I2C_DEBUG_ALGO is not set 859# CONFIG_I2C_DEBUG_ALGO is not set
@@ -820,13 +870,18 @@ CONFIG_SPI_BFIN=y
820# CONFIG_SPI_BFIN_LOCK is not set 870# CONFIG_SPI_BFIN_LOCK is not set
821# CONFIG_SPI_BFIN_SPORT is not set 871# CONFIG_SPI_BFIN_SPORT is not set
822# CONFIG_SPI_BITBANG is not set 872# CONFIG_SPI_BITBANG is not set
873# CONFIG_SPI_GPIO is not set
823 874
824# 875#
825# SPI Protocol Masters 876# SPI Protocol Masters
826# 877#
827# CONFIG_EEPROM_AT25 is not set
828# CONFIG_SPI_SPIDEV is not set 878# CONFIG_SPI_SPIDEV is not set
829# CONFIG_SPI_TLE62X0 is not set 879# CONFIG_SPI_TLE62X0 is not set
880
881#
882# PPS support
883#
884# CONFIG_PPS is not set
830CONFIG_ARCH_WANT_OPTIONAL_GPIOLIB=y 885CONFIG_ARCH_WANT_OPTIONAL_GPIOLIB=y
831CONFIG_GPIOLIB=y 886CONFIG_GPIOLIB=y
832# CONFIG_DEBUG_GPIO is not set 887# CONFIG_DEBUG_GPIO is not set
@@ -842,6 +897,7 @@ CONFIG_GPIO_SYSFS=y
842# CONFIG_GPIO_MAX732X is not set 897# CONFIG_GPIO_MAX732X is not set
843# CONFIG_GPIO_PCA953X is not set 898# CONFIG_GPIO_PCA953X is not set
844# CONFIG_GPIO_PCF857X is not set 899# CONFIG_GPIO_PCF857X is not set
900# CONFIG_GPIO_ADP5588 is not set
845 901
846# 902#
847# PCI GPIO expanders: 903# PCI GPIO expanders:
@@ -852,11 +908,15 @@ CONFIG_GPIO_SYSFS=y
852# 908#
853# CONFIG_GPIO_MAX7301 is not set 909# CONFIG_GPIO_MAX7301 is not set
854# CONFIG_GPIO_MCP23S08 is not set 910# CONFIG_GPIO_MCP23S08 is not set
911# CONFIG_GPIO_MC33880 is not set
912
913#
914# AC97 GPIO expanders:
915#
855# CONFIG_W1 is not set 916# CONFIG_W1 is not set
856# CONFIG_POWER_SUPPLY is not set 917# CONFIG_POWER_SUPPLY is not set
857# CONFIG_HWMON is not set 918# CONFIG_HWMON is not set
858# CONFIG_THERMAL is not set 919# CONFIG_THERMAL is not set
859# CONFIG_THERMAL_HWMON is not set
860CONFIG_WATCHDOG=y 920CONFIG_WATCHDOG=y
861# CONFIG_WATCHDOG_NOWAYOUT is not set 921# CONFIG_WATCHDOG_NOWAYOUT is not set
862 922
@@ -878,26 +938,18 @@ CONFIG_SSB_POSSIBLE=y
878# CONFIG_MFD_CORE is not set 938# CONFIG_MFD_CORE is not set
879# CONFIG_MFD_SM501 is not set 939# CONFIG_MFD_SM501 is not set
880# CONFIG_HTC_PASIC3 is not set 940# CONFIG_HTC_PASIC3 is not set
941# CONFIG_UCB1400_CORE is not set
942# CONFIG_TPS65010 is not set
881# CONFIG_MFD_TMIO is not set 943# CONFIG_MFD_TMIO is not set
882# CONFIG_MFD_WM8400 is not set 944# CONFIG_MFD_WM8400 is not set
945# CONFIG_MFD_WM831X is not set
883# CONFIG_MFD_WM8350_I2C is not set 946# CONFIG_MFD_WM8350_I2C is not set
947# CONFIG_MFD_PCF50633 is not set
948# CONFIG_MFD_MC13783 is not set
949# CONFIG_AB3100_CORE is not set
950# CONFIG_EZX_PCAP is not set
884# CONFIG_REGULATOR is not set 951# CONFIG_REGULATOR is not set
885 952# CONFIG_MEDIA_SUPPORT is not set
886#
887# Multimedia devices
888#
889
890#
891# Multimedia core support
892#
893# CONFIG_VIDEO_DEV is not set
894# CONFIG_DVB_CORE is not set
895# CONFIG_VIDEO_MEDIA is not set
896
897#
898# Multimedia drivers
899#
900# CONFIG_DAB is not set
901 953
902# 954#
903# Graphics support 955# Graphics support
@@ -929,9 +981,6 @@ CONFIG_FB_CFB_IMAGEBLIT=m
929# CONFIG_FB_BFIN_T350MCQB is not set 981# CONFIG_FB_BFIN_T350MCQB is not set
930# CONFIG_FB_BFIN_LQ035Q1 is not set 982# CONFIG_FB_BFIN_LQ035Q1 is not set
931CONFIG_FB_BF537_LQ035=m 983CONFIG_FB_BF537_LQ035=m
932CONFIG_LQ035_SLAVE_ADDR=0x58
933# CONFIG_FB_BFIN_LANDSCAPE is not set
934# CONFIG_FB_BFIN_BGR is not set
935CONFIG_FB_BFIN_7393=m 984CONFIG_FB_BFIN_7393=m
936CONFIG_NTSC=y 985CONFIG_NTSC=y
937# CONFIG_PAL is not set 986# CONFIG_PAL is not set
@@ -946,15 +995,18 @@ CONFIG_ADV7393_1XMEM=y
946# CONFIG_FB_VIRTUAL is not set 995# CONFIG_FB_VIRTUAL is not set
947# CONFIG_FB_METRONOME is not set 996# CONFIG_FB_METRONOME is not set
948# CONFIG_FB_MB862XX is not set 997# CONFIG_FB_MB862XX is not set
998# CONFIG_FB_BROADSHEET is not set
949CONFIG_BACKLIGHT_LCD_SUPPORT=y 999CONFIG_BACKLIGHT_LCD_SUPPORT=y
950CONFIG_LCD_CLASS_DEVICE=m 1000CONFIG_LCD_CLASS_DEVICE=m
1001# CONFIG_LCD_LMS283GF05 is not set
951# CONFIG_LCD_LTV350QV is not set 1002# CONFIG_LCD_LTV350QV is not set
952# CONFIG_LCD_ILI9320 is not set 1003# CONFIG_LCD_ILI9320 is not set
953# CONFIG_LCD_TDO24M is not set 1004# CONFIG_LCD_TDO24M is not set
954# CONFIG_LCD_VGG2432A4 is not set 1005# CONFIG_LCD_VGG2432A4 is not set
955# CONFIG_LCD_PLATFORM is not set 1006# CONFIG_LCD_PLATFORM is not set
956CONFIG_BACKLIGHT_CLASS_DEVICE=m 1007CONFIG_BACKLIGHT_CLASS_DEVICE=m
957CONFIG_BACKLIGHT_CORGI=m 1008CONFIG_BACKLIGHT_GENERIC=m
1009# CONFIG_BACKLIGHT_ADP8870 is not set
958 1010
959# 1011#
960# Display device support 1012# Display device support
@@ -963,19 +1015,27 @@ CONFIG_BACKLIGHT_CORGI=m
963# CONFIG_LOGO is not set 1015# CONFIG_LOGO is not set
964CONFIG_SOUND=m 1016CONFIG_SOUND=m
965CONFIG_SOUND_OSS_CORE=y 1017CONFIG_SOUND_OSS_CORE=y
1018CONFIG_SOUND_OSS_CORE_PRECLAIM=y
966CONFIG_SND=m 1019CONFIG_SND=m
967CONFIG_SND_TIMER=m 1020CONFIG_SND_TIMER=m
968CONFIG_SND_PCM=m 1021CONFIG_SND_PCM=m
1022CONFIG_SND_JACK=y
969# CONFIG_SND_SEQUENCER is not set 1023# CONFIG_SND_SEQUENCER is not set
970CONFIG_SND_OSSEMUL=y 1024CONFIG_SND_OSSEMUL=y
971CONFIG_SND_MIXER_OSS=m 1025CONFIG_SND_MIXER_OSS=m
972CONFIG_SND_PCM_OSS=m 1026CONFIG_SND_PCM_OSS=m
973CONFIG_SND_PCM_OSS_PLUGINS=y 1027CONFIG_SND_PCM_OSS_PLUGINS=y
1028# CONFIG_SND_HRTIMER is not set
974# CONFIG_SND_DYNAMIC_MINORS is not set 1029# CONFIG_SND_DYNAMIC_MINORS is not set
975CONFIG_SND_SUPPORT_OLD_API=y 1030CONFIG_SND_SUPPORT_OLD_API=y
976CONFIG_SND_VERBOSE_PROCFS=y 1031CONFIG_SND_VERBOSE_PROCFS=y
977# CONFIG_SND_VERBOSE_PRINTK is not set 1032# CONFIG_SND_VERBOSE_PRINTK is not set
978# CONFIG_SND_DEBUG is not set 1033# CONFIG_SND_DEBUG is not set
1034# CONFIG_SND_RAWMIDI_SEQ is not set
1035# CONFIG_SND_OPL3_LIB_SEQ is not set
1036# CONFIG_SND_OPL4_LIB_SEQ is not set
1037# CONFIG_SND_SBAWE_SEQ is not set
1038# CONFIG_SND_EMU10K1_SEQ is not set
979CONFIG_SND_DRIVERS=y 1039CONFIG_SND_DRIVERS=y
980# CONFIG_SND_DUMMY is not set 1040# CONFIG_SND_DUMMY is not set
981# CONFIG_SND_MTPAV is not set 1041# CONFIG_SND_MTPAV is not set
@@ -986,13 +1046,6 @@ CONFIG_SND_SPI=y
986# 1046#
987# ALSA Blackfin devices 1047# ALSA Blackfin devices
988# 1048#
989CONFIG_SND_BLACKFIN_AD1836=m
990CONFIG_SND_BLACKFIN_AD1836_TDM=y
991# CONFIG_SND_BLACKFIN_AD1836_I2S is not set
992CONFIG_SND_BLACKFIN_AD1836_MULSUB=y
993# CONFIG_SND_BLACKFIN_AD1836_5P1 is not set
994CONFIG_SND_BLACKFIN_SPORT=0
995CONFIG_SND_BLACKFIN_SPI_PFBIT=4
996CONFIG_SND_BFIN_SPORT=0 1049CONFIG_SND_BFIN_SPORT=0
997CONFIG_SND_BFIN_AD73322=m 1050CONFIG_SND_BFIN_AD73322=m
998CONFIG_SND_BFIN_AD73322_SPORT0_SE=10 1051CONFIG_SND_BFIN_AD73322_SPORT0_SE=10
@@ -1003,16 +1056,20 @@ CONFIG_SND_SOC_AC97_BUS=y
1003CONFIG_SND_BF5XX_I2S=m 1056CONFIG_SND_BF5XX_I2S=m
1004# CONFIG_SND_BF5XX_SOC_SSM2602 is not set 1057# CONFIG_SND_BF5XX_SOC_SSM2602 is not set
1005CONFIG_SND_BF5XX_SOC_AD73311=m 1058CONFIG_SND_BF5XX_SOC_AD73311=m
1059# CONFIG_SND_BF5XX_SOC_ADAU1371 is not set
1060# CONFIG_SND_BF5XX_SOC_ADAU1761 is not set
1006CONFIG_SND_BFIN_AD73311_SE=4 1061CONFIG_SND_BFIN_AD73311_SE=4
1062# CONFIG_SND_BF5XX_TDM is not set
1007CONFIG_SND_BF5XX_AC97=m 1063CONFIG_SND_BF5XX_AC97=m
1008CONFIG_SND_BF5XX_MMAP_SUPPORT=y 1064CONFIG_SND_BF5XX_MMAP_SUPPORT=y
1009# CONFIG_SND_BF5XX_MULTICHAN_SUPPORT is not set 1065# CONFIG_SND_BF5XX_MULTICHAN_SUPPORT is not set
1066# CONFIG_SND_BF5XX_HAVE_COLD_RESET is not set
1067CONFIG_SND_BF5XX_SOC_AD1980=m
1010CONFIG_SND_BF5XX_SOC_SPORT=m 1068CONFIG_SND_BF5XX_SOC_SPORT=m
1011CONFIG_SND_BF5XX_SOC_I2S=m 1069CONFIG_SND_BF5XX_SOC_I2S=m
1012CONFIG_SND_BF5XX_SOC_AC97=m 1070CONFIG_SND_BF5XX_SOC_AC97=m
1013CONFIG_SND_BF5XX_SOC_AD1980=m
1014CONFIG_SND_BF5XX_SPORT_NUM=0 1071CONFIG_SND_BF5XX_SPORT_NUM=0
1015# CONFIG_SND_BF5XX_HAVE_COLD_RESET is not set 1072CONFIG_SND_SOC_I2C_AND_SPI=m
1016# CONFIG_SND_SOC_ALL_CODECS is not set 1073# CONFIG_SND_SOC_ALL_CODECS is not set
1017CONFIG_SND_SOC_AD1980=m 1074CONFIG_SND_SOC_AD1980=m
1018CONFIG_SND_SOC_AD73311=m 1075CONFIG_SND_SOC_AD73311=m
@@ -1020,14 +1077,12 @@ CONFIG_SND_SOC_AD73311=m
1020CONFIG_AC97_BUS=m 1077CONFIG_AC97_BUS=m
1021CONFIG_HID_SUPPORT=y 1078CONFIG_HID_SUPPORT=y
1022CONFIG_HID=y 1079CONFIG_HID=y
1023# CONFIG_HID_DEBUG is not set
1024# CONFIG_HIDRAW is not set 1080# CONFIG_HIDRAW is not set
1025# CONFIG_HID_PID is not set 1081# CONFIG_HID_PID is not set
1026 1082
1027# 1083#
1028# Special HID drivers 1084# Special HID drivers
1029# 1085#
1030CONFIG_HID_COMPAT=y
1031# CONFIG_USB_SUPPORT is not set 1086# CONFIG_USB_SUPPORT is not set
1032# CONFIG_MMC is not set 1087# CONFIG_MMC is not set
1033# CONFIG_MEMSTICK is not set 1088# CONFIG_MEMSTICK is not set
@@ -1064,6 +1119,7 @@ CONFIG_RTC_INTF_DEV=y
1064# CONFIG_RTC_DRV_S35390A is not set 1119# CONFIG_RTC_DRV_S35390A is not set
1065# CONFIG_RTC_DRV_FM3130 is not set 1120# CONFIG_RTC_DRV_FM3130 is not set
1066# CONFIG_RTC_DRV_RX8581 is not set 1121# CONFIG_RTC_DRV_RX8581 is not set
1122# CONFIG_RTC_DRV_RX8025 is not set
1067 1123
1068# 1124#
1069# SPI RTC drivers 1125# SPI RTC drivers
@@ -1075,6 +1131,7 @@ CONFIG_RTC_INTF_DEV=y
1075# CONFIG_RTC_DRV_R9701 is not set 1131# CONFIG_RTC_DRV_R9701 is not set
1076# CONFIG_RTC_DRV_RS5C348 is not set 1132# CONFIG_RTC_DRV_RS5C348 is not set
1077# CONFIG_RTC_DRV_DS3234 is not set 1133# CONFIG_RTC_DRV_DS3234 is not set
1134# CONFIG_RTC_DRV_PCF2123 is not set
1078 1135
1079# 1136#
1080# Platform RTC drivers 1137# Platform RTC drivers
@@ -1095,10 +1152,21 @@ CONFIG_RTC_INTF_DEV=y
1095# 1152#
1096CONFIG_RTC_DRV_BFIN=y 1153CONFIG_RTC_DRV_BFIN=y
1097# CONFIG_DMADEVICES is not set 1154# CONFIG_DMADEVICES is not set
1155# CONFIG_AUXDISPLAY is not set
1098# CONFIG_UIO is not set 1156# CONFIG_UIO is not set
1157
1158#
1159# TI VLYNQ
1160#
1099# CONFIG_STAGING is not set 1161# CONFIG_STAGING is not set
1100 1162
1101# 1163#
1164# Firmware Drivers
1165#
1166# CONFIG_FIRMWARE_MEMMAP is not set
1167# CONFIG_SIGMA is not set
1168
1169#
1102# File systems 1170# File systems
1103# 1171#
1104# CONFIG_EXT2_FS is not set 1172# CONFIG_EXT2_FS is not set
@@ -1107,9 +1175,13 @@ CONFIG_RTC_DRV_BFIN=y
1107# CONFIG_REISERFS_FS is not set 1175# CONFIG_REISERFS_FS is not set
1108# CONFIG_JFS_FS is not set 1176# CONFIG_JFS_FS is not set
1109# CONFIG_FS_POSIX_ACL is not set 1177# CONFIG_FS_POSIX_ACL is not set
1110CONFIG_FILE_LOCKING=y
1111# CONFIG_XFS_FS is not set 1178# CONFIG_XFS_FS is not set
1179# CONFIG_GFS2_FS is not set
1112# CONFIG_OCFS2_FS is not set 1180# CONFIG_OCFS2_FS is not set
1181# CONFIG_BTRFS_FS is not set
1182# CONFIG_NILFS2_FS is not set
1183CONFIG_FILE_LOCKING=y
1184CONFIG_FSNOTIFY=y
1113# CONFIG_DNOTIFY is not set 1185# CONFIG_DNOTIFY is not set
1114CONFIG_INOTIFY=y 1186CONFIG_INOTIFY=y
1115CONFIG_INOTIFY_USER=y 1187CONFIG_INOTIFY_USER=y
@@ -1119,6 +1191,11 @@ CONFIG_INOTIFY_USER=y
1119# CONFIG_FUSE_FS is not set 1191# CONFIG_FUSE_FS is not set
1120 1192
1121# 1193#
1194# Caches
1195#
1196# CONFIG_FSCACHE is not set
1197
1198#
1122# CD-ROM/DVD Filesystems 1199# CD-ROM/DVD Filesystems
1123# 1200#
1124# CONFIG_ISO9660_FS is not set 1201# CONFIG_ISO9660_FS is not set
@@ -1137,13 +1214,9 @@ CONFIG_INOTIFY_USER=y
1137CONFIG_PROC_FS=y 1214CONFIG_PROC_FS=y
1138CONFIG_PROC_SYSCTL=y 1215CONFIG_PROC_SYSCTL=y
1139CONFIG_SYSFS=y 1216CONFIG_SYSFS=y
1140# CONFIG_TMPFS is not set
1141# CONFIG_HUGETLB_PAGE is not set 1217# CONFIG_HUGETLB_PAGE is not set
1142# CONFIG_CONFIGFS_FS is not set 1218# CONFIG_CONFIGFS_FS is not set
1143 1219CONFIG_MISC_FILESYSTEMS=y
1144#
1145# Miscellaneous filesystems
1146#
1147# CONFIG_ADFS_FS is not set 1220# CONFIG_ADFS_FS is not set
1148# CONFIG_AFFS_FS is not set 1221# CONFIG_AFFS_FS is not set
1149# CONFIG_HFS_FS is not set 1222# CONFIG_HFS_FS is not set
@@ -1162,17 +1235,8 @@ CONFIG_JFFS2_ZLIB=y
1162# CONFIG_JFFS2_LZO is not set 1235# CONFIG_JFFS2_LZO is not set
1163CONFIG_JFFS2_RTIME=y 1236CONFIG_JFFS2_RTIME=y
1164# CONFIG_JFFS2_RUBIN is not set 1237# CONFIG_JFFS2_RUBIN is not set
1165CONFIG_YAFFS_FS=m
1166CONFIG_YAFFS_YAFFS1=y
1167# CONFIG_YAFFS_9BYTE_TAGS is not set
1168# CONFIG_YAFFS_DOES_ECC is not set
1169CONFIG_YAFFS_YAFFS2=y
1170CONFIG_YAFFS_AUTO_YAFFS2=y
1171# CONFIG_YAFFS_DISABLE_LAZY_LOAD is not set
1172# CONFIG_YAFFS_DISABLE_WIDE_TNODES is not set
1173# CONFIG_YAFFS_ALWAYS_CHECK_CHUNK_ERASED is not set
1174CONFIG_YAFFS_SHORT_NAMES_IN_RAM=y
1175# CONFIG_CRAMFS is not set 1238# CONFIG_CRAMFS is not set
1239# CONFIG_SQUASHFS is not set
1176# CONFIG_VXFS_FS is not set 1240# CONFIG_VXFS_FS is not set
1177# CONFIG_MINIX_FS is not set 1241# CONFIG_MINIX_FS is not set
1178# CONFIG_OMFS_FS is not set 1242# CONFIG_OMFS_FS is not set
@@ -1191,7 +1255,6 @@ CONFIG_LOCKD=m
1191CONFIG_LOCKD_V4=y 1255CONFIG_LOCKD_V4=y
1192CONFIG_NFS_COMMON=y 1256CONFIG_NFS_COMMON=y
1193CONFIG_SUNRPC=m 1257CONFIG_SUNRPC=m
1194# CONFIG_SUNRPC_REGISTER_V4 is not set
1195# CONFIG_RPCSEC_GSS_KRB5 is not set 1258# CONFIG_RPCSEC_GSS_KRB5 is not set
1196# CONFIG_RPCSEC_GSS_SPKM3 is not set 1259# CONFIG_RPCSEC_GSS_SPKM3 is not set
1197CONFIG_SMB_FS=m 1260CONFIG_SMB_FS=m
@@ -1256,14 +1319,19 @@ CONFIG_ENABLE_WARN_DEPRECATED=y
1256CONFIG_ENABLE_MUST_CHECK=y 1319CONFIG_ENABLE_MUST_CHECK=y
1257CONFIG_FRAME_WARN=1024 1320CONFIG_FRAME_WARN=1024
1258# CONFIG_MAGIC_SYSRQ is not set 1321# CONFIG_MAGIC_SYSRQ is not set
1322# CONFIG_STRIP_ASM_SYMS is not set
1259# CONFIG_UNUSED_SYMBOLS is not set 1323# CONFIG_UNUSED_SYMBOLS is not set
1260CONFIG_DEBUG_FS=y 1324CONFIG_DEBUG_FS=y
1261# CONFIG_HEADERS_CHECK is not set 1325# CONFIG_HEADERS_CHECK is not set
1326CONFIG_DEBUG_SECTION_MISMATCH=y
1262CONFIG_DEBUG_KERNEL=y 1327CONFIG_DEBUG_KERNEL=y
1263CONFIG_DEBUG_SHIRQ=y 1328CONFIG_DEBUG_SHIRQ=y
1264CONFIG_DETECT_SOFTLOCKUP=y 1329CONFIG_DETECT_SOFTLOCKUP=y
1265# CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC is not set 1330# CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC is not set
1266CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC_VALUE=0 1331CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC_VALUE=0
1332CONFIG_DETECT_HUNG_TASK=y
1333# CONFIG_BOOTPARAM_HUNG_TASK_PANIC is not set
1334CONFIG_BOOTPARAM_HUNG_TASK_PANIC_VALUE=0
1267CONFIG_SCHED_DEBUG=y 1335CONFIG_SCHED_DEBUG=y
1268# CONFIG_SCHEDSTATS is not set 1336# CONFIG_SCHEDSTATS is not set
1269# CONFIG_TIMER_STATS is not set 1337# CONFIG_TIMER_STATS is not set
@@ -1271,31 +1339,39 @@ CONFIG_SCHED_DEBUG=y
1271# CONFIG_DEBUG_SLAB is not set 1339# CONFIG_DEBUG_SLAB is not set
1272# CONFIG_DEBUG_SPINLOCK is not set 1340# CONFIG_DEBUG_SPINLOCK is not set
1273# CONFIG_DEBUG_MUTEXES is not set 1341# CONFIG_DEBUG_MUTEXES is not set
1342# CONFIG_DEBUG_LOCK_ALLOC is not set
1343# CONFIG_PROVE_LOCKING is not set
1344# CONFIG_LOCK_STAT is not set
1274# CONFIG_DEBUG_SPINLOCK_SLEEP is not set 1345# CONFIG_DEBUG_SPINLOCK_SLEEP is not set
1275# CONFIG_DEBUG_LOCKING_API_SELFTESTS is not set 1346# CONFIG_DEBUG_LOCKING_API_SELFTESTS is not set
1276# CONFIG_DEBUG_KOBJECT is not set 1347# CONFIG_DEBUG_KOBJECT is not set
1277CONFIG_DEBUG_BUGVERBOSE=y 1348CONFIG_DEBUG_BUGVERBOSE=y
1278CONFIG_DEBUG_INFO=y 1349CONFIG_DEBUG_INFO=y
1279# CONFIG_DEBUG_VM is not set 1350# CONFIG_DEBUG_VM is not set
1351# CONFIG_DEBUG_NOMMU_REGIONS is not set
1280# CONFIG_DEBUG_WRITECOUNT is not set 1352# CONFIG_DEBUG_WRITECOUNT is not set
1281# CONFIG_DEBUG_MEMORY_INIT is not set 1353# CONFIG_DEBUG_MEMORY_INIT is not set
1282# CONFIG_DEBUG_LIST is not set 1354# CONFIG_DEBUG_LIST is not set
1283# CONFIG_DEBUG_SG is not set 1355# CONFIG_DEBUG_SG is not set
1356# CONFIG_DEBUG_NOTIFIERS is not set
1357# CONFIG_DEBUG_CREDENTIALS is not set
1284# CONFIG_FRAME_POINTER is not set 1358# CONFIG_FRAME_POINTER is not set
1285# CONFIG_BOOT_PRINTK_DELAY is not set 1359# CONFIG_BOOT_PRINTK_DELAY is not set
1286# CONFIG_RCU_TORTURE_TEST is not set 1360# CONFIG_RCU_TORTURE_TEST is not set
1287# CONFIG_RCU_CPU_STALL_DETECTOR is not set 1361# CONFIG_RCU_CPU_STALL_DETECTOR is not set
1288# CONFIG_BACKTRACE_SELF_TEST is not set 1362# CONFIG_BACKTRACE_SELF_TEST is not set
1289# CONFIG_DEBUG_BLOCK_EXT_DEVT is not set 1363# CONFIG_DEBUG_BLOCK_EXT_DEVT is not set
1364# CONFIG_DEBUG_FORCE_WEAK_PER_CPU is not set
1290# CONFIG_FAULT_INJECTION is not set 1365# CONFIG_FAULT_INJECTION is not set
1291 1366# CONFIG_PAGE_POISONING is not set
1292# 1367CONFIG_HAVE_FUNCTION_TRACER=y
1293# Tracers 1368CONFIG_HAVE_FUNCTION_GRAPH_TRACER=y
1294# 1369CONFIG_TRACING_SUPPORT=y
1295# CONFIG_SCHED_TRACER is not set 1370# CONFIG_FTRACE is not set
1296# CONFIG_CONTEXT_SWITCH_TRACER is not set 1371# CONFIG_BRANCH_PROFILE_NONE is not set
1297# CONFIG_BOOT_TRACER is not set 1372# CONFIG_PROFILE_ANNOTATED_BRANCHES is not set
1298# CONFIG_DYNAMIC_PRINTK_DEBUG is not set 1373# CONFIG_PROFILE_ALL_BRANCHES is not set
1374# CONFIG_DYNAMIC_DEBUG is not set
1299# CONFIG_SAMPLES is not set 1375# CONFIG_SAMPLES is not set
1300CONFIG_HAVE_ARCH_KGDB=y 1376CONFIG_HAVE_ARCH_KGDB=y
1301# CONFIG_KGDB is not set 1377# CONFIG_KGDB is not set
@@ -1320,6 +1396,7 @@ CONFIG_DEBUG_BFIN_NO_KERN_HWTRACE=y
1320CONFIG_EARLY_PRINTK=y 1396CONFIG_EARLY_PRINTK=y
1321CONFIG_CPLB_INFO=y 1397CONFIG_CPLB_INFO=y
1322CONFIG_ACCESS_CHECK=y 1398CONFIG_ACCESS_CHECK=y
1399# CONFIG_BFIN_ISRAM_SELF_TEST is not set
1323 1400
1324# 1401#
1325# Security options 1402# Security options
@@ -1328,14 +1405,14 @@ CONFIG_ACCESS_CHECK=y
1328CONFIG_SECURITY=y 1405CONFIG_SECURITY=y
1329# CONFIG_SECURITYFS is not set 1406# CONFIG_SECURITYFS is not set
1330# CONFIG_SECURITY_NETWORK is not set 1407# CONFIG_SECURITY_NETWORK is not set
1408# CONFIG_SECURITY_PATH is not set
1331# CONFIG_SECURITY_FILE_CAPABILITIES is not set 1409# CONFIG_SECURITY_FILE_CAPABILITIES is not set
1332CONFIG_SECURITY_DEFAULT_MMAP_MIN_ADDR=0 1410# CONFIG_SECURITY_TOMOYO is not set
1333CONFIG_CRYPTO=y 1411CONFIG_CRYPTO=y
1334 1412
1335# 1413#
1336# Crypto core or helper 1414# Crypto core or helper
1337# 1415#
1338# CONFIG_CRYPTO_FIPS is not set
1339# CONFIG_CRYPTO_MANAGER is not set 1416# CONFIG_CRYPTO_MANAGER is not set
1340# CONFIG_CRYPTO_MANAGER2 is not set 1417# CONFIG_CRYPTO_MANAGER2 is not set
1341# CONFIG_CRYPTO_GF128MUL is not set 1418# CONFIG_CRYPTO_GF128MUL is not set
@@ -1367,11 +1444,13 @@ CONFIG_CRYPTO=y
1367# 1444#
1368# CONFIG_CRYPTO_HMAC is not set 1445# CONFIG_CRYPTO_HMAC is not set
1369# CONFIG_CRYPTO_XCBC is not set 1446# CONFIG_CRYPTO_XCBC is not set
1447# CONFIG_CRYPTO_VMAC is not set
1370 1448
1371# 1449#
1372# Digest 1450# Digest
1373# 1451#
1374# CONFIG_CRYPTO_CRC32C is not set 1452# CONFIG_CRYPTO_CRC32C is not set
1453# CONFIG_CRYPTO_GHASH is not set
1375# CONFIG_CRYPTO_MD4 is not set 1454# CONFIG_CRYPTO_MD4 is not set
1376# CONFIG_CRYPTO_MD5 is not set 1455# CONFIG_CRYPTO_MD5 is not set
1377# CONFIG_CRYPTO_MICHAEL_MIC is not set 1456# CONFIG_CRYPTO_MICHAEL_MIC is not set
@@ -1408,6 +1487,7 @@ CONFIG_CRYPTO=y
1408# Compression 1487# Compression
1409# 1488#
1410# CONFIG_CRYPTO_DEFLATE is not set 1489# CONFIG_CRYPTO_DEFLATE is not set
1490# CONFIG_CRYPTO_ZLIB is not set
1411# CONFIG_CRYPTO_LZO is not set 1491# CONFIG_CRYPTO_LZO is not set
1412 1492
1413# 1493#
@@ -1415,11 +1495,13 @@ CONFIG_CRYPTO=y
1415# 1495#
1416# CONFIG_CRYPTO_ANSI_CPRNG is not set 1496# CONFIG_CRYPTO_ANSI_CPRNG is not set
1417CONFIG_CRYPTO_HW=y 1497CONFIG_CRYPTO_HW=y
1498# CONFIG_BINARY_PRINTF is not set
1418 1499
1419# 1500#
1420# Library routines 1501# Library routines
1421# 1502#
1422CONFIG_BITREVERSE=y 1503CONFIG_BITREVERSE=y
1504CONFIG_GENERIC_FIND_LAST_BIT=y
1423CONFIG_CRC_CCITT=m 1505CONFIG_CRC_CCITT=m
1424# CONFIG_CRC16 is not set 1506# CONFIG_CRC16 is not set
1425# CONFIG_CRC_T10DIF is not set 1507# CONFIG_CRC_T10DIF is not set
@@ -1429,6 +1511,8 @@ CONFIG_CRC32=y
1429# CONFIG_LIBCRC32C is not set 1511# CONFIG_LIBCRC32C is not set
1430CONFIG_ZLIB_INFLATE=y 1512CONFIG_ZLIB_INFLATE=y
1431CONFIG_ZLIB_DEFLATE=m 1513CONFIG_ZLIB_DEFLATE=m
1514CONFIG_DECOMPRESS_GZIP=y
1432CONFIG_HAS_IOMEM=y 1515CONFIG_HAS_IOMEM=y
1433CONFIG_HAS_IOPORT=y 1516CONFIG_HAS_IOPORT=y
1434CONFIG_HAS_DMA=y 1517CONFIG_HAS_DMA=y
1518CONFIG_NLATTR=y
diff --git a/arch/blackfin/configs/BF538-EZKIT_defconfig b/arch/blackfin/configs/BF538-EZKIT_defconfig
index 09ea2499555e..bc1871d89fd5 100644
--- a/arch/blackfin/configs/BF538-EZKIT_defconfig
+++ b/arch/blackfin/configs/BF538-EZKIT_defconfig
@@ -1,22 +1,27 @@
1# 1#
2# Automatically generated make config: don't edit 2# Automatically generated make config: don't edit
3# Linux kernel version: 2.6.28.10 3# Linux kernel version: 2.6.32.2
4# Thu May 21 05:50:01 2009
5# 4#
6# CONFIG_MMU is not set 5# CONFIG_MMU is not set
7# CONFIG_FPU is not set 6# CONFIG_FPU is not set
8CONFIG_RWSEM_GENERIC_SPINLOCK=y 7CONFIG_RWSEM_GENERIC_SPINLOCK=y
9# CONFIG_RWSEM_XCHGADD_ALGORITHM is not set 8# CONFIG_RWSEM_XCHGADD_ALGORITHM is not set
10CONFIG_BLACKFIN=y 9CONFIG_BLACKFIN=y
10CONFIG_GENERIC_CSUM=y
11CONFIG_GENERIC_BUG=y
11CONFIG_ZONE_DMA=y 12CONFIG_ZONE_DMA=y
12CONFIG_GENERIC_FIND_NEXT_BIT=y 13CONFIG_GENERIC_FIND_NEXT_BIT=y
13CONFIG_GENERIC_HWEIGHT=y
14CONFIG_GENERIC_HARDIRQS=y 14CONFIG_GENERIC_HARDIRQS=y
15CONFIG_GENERIC_IRQ_PROBE=y 15CONFIG_GENERIC_IRQ_PROBE=y
16CONFIG_GENERIC_HARDIRQS_NO__DO_IRQ=y
16CONFIG_GENERIC_GPIO=y 17CONFIG_GENERIC_GPIO=y
17CONFIG_FORCE_MAX_ZONEORDER=14 18CONFIG_FORCE_MAX_ZONEORDER=14
18CONFIG_GENERIC_CALIBRATE_DELAY=y 19CONFIG_GENERIC_CALIBRATE_DELAY=y
20CONFIG_LOCKDEP_SUPPORT=y
21CONFIG_STACKTRACE_SUPPORT=y
22CONFIG_TRACE_IRQFLAGS_SUPPORT=y
19CONFIG_DEFCONFIG_LIST="/lib/modules/$UNAME_RELEASE/.config" 23CONFIG_DEFCONFIG_LIST="/lib/modules/$UNAME_RELEASE/.config"
24CONFIG_CONSTRUCTORS=y
20 25
21# 26#
22# General setup 27# General setup
@@ -26,22 +31,41 @@ CONFIG_BROKEN_ON_SMP=y
26CONFIG_INIT_ENV_ARG_LIMIT=32 31CONFIG_INIT_ENV_ARG_LIMIT=32
27CONFIG_LOCALVERSION="" 32CONFIG_LOCALVERSION=""
28CONFIG_LOCALVERSION_AUTO=y 33CONFIG_LOCALVERSION_AUTO=y
34CONFIG_HAVE_KERNEL_GZIP=y
35CONFIG_HAVE_KERNEL_BZIP2=y
36CONFIG_HAVE_KERNEL_LZMA=y
37CONFIG_KERNEL_GZIP=y
38# CONFIG_KERNEL_BZIP2 is not set
39# CONFIG_KERNEL_LZMA is not set
29CONFIG_SYSVIPC=y 40CONFIG_SYSVIPC=y
30CONFIG_SYSVIPC_SYSCTL=y 41CONFIG_SYSVIPC_SYSCTL=y
31# CONFIG_POSIX_MQUEUE is not set 42# CONFIG_POSIX_MQUEUE is not set
32# CONFIG_BSD_PROCESS_ACCT is not set 43# CONFIG_BSD_PROCESS_ACCT is not set
33# CONFIG_TASKSTATS is not set 44# CONFIG_TASKSTATS is not set
34# CONFIG_AUDIT is not set 45# CONFIG_AUDIT is not set
46
47#
48# RCU Subsystem
49#
50CONFIG_TREE_RCU=y
51# CONFIG_TREE_PREEMPT_RCU is not set
52# CONFIG_RCU_TRACE is not set
53CONFIG_RCU_FANOUT=32
54# CONFIG_RCU_FANOUT_EXACT is not set
55# CONFIG_TREE_RCU_TRACE is not set
35CONFIG_IKCONFIG=y 56CONFIG_IKCONFIG=y
36CONFIG_IKCONFIG_PROC=y 57CONFIG_IKCONFIG_PROC=y
37CONFIG_LOG_BUF_SHIFT=14 58CONFIG_LOG_BUF_SHIFT=14
38# CONFIG_CGROUPS is not set
39# CONFIG_GROUP_SCHED is not set 59# CONFIG_GROUP_SCHED is not set
60# CONFIG_CGROUPS is not set
40# CONFIG_SYSFS_DEPRECATED_V2 is not set 61# CONFIG_SYSFS_DEPRECATED_V2 is not set
41# CONFIG_RELAY is not set 62# CONFIG_RELAY is not set
42# CONFIG_NAMESPACES is not set 63# CONFIG_NAMESPACES is not set
43CONFIG_BLK_DEV_INITRD=y 64CONFIG_BLK_DEV_INITRD=y
44CONFIG_INITRAMFS_SOURCE="" 65CONFIG_INITRAMFS_SOURCE=""
66CONFIG_RD_GZIP=y
67# CONFIG_RD_BZIP2 is not set
68# CONFIG_RD_LZMA is not set
45# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set 69# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
46CONFIG_SYSCTL=y 70CONFIG_SYSCTL=y
47CONFIG_ANON_INODES=y 71CONFIG_ANON_INODES=y
@@ -62,6 +86,10 @@ CONFIG_EPOLL=y
62# CONFIG_TIMERFD is not set 86# CONFIG_TIMERFD is not set
63# CONFIG_EVENTFD is not set 87# CONFIG_EVENTFD is not set
64# CONFIG_AIO is not set 88# CONFIG_AIO is not set
89
90#
91# Kernel Performance Events And Counters
92#
65CONFIG_VM_EVENT_COUNTERS=y 93CONFIG_VM_EVENT_COUNTERS=y
66CONFIG_COMPAT_BRK=y 94CONFIG_COMPAT_BRK=y
67CONFIG_SLAB=y 95CONFIG_SLAB=y
@@ -69,11 +97,15 @@ CONFIG_SLAB=y
69# CONFIG_SLOB is not set 97# CONFIG_SLOB is not set
70CONFIG_MMAP_ALLOW_UNINITIALIZED=y 98CONFIG_MMAP_ALLOW_UNINITIALIZED=y
71# CONFIG_PROFILING is not set 99# CONFIG_PROFILING is not set
72# CONFIG_MARKERS is not set
73CONFIG_HAVE_OPROFILE=y 100CONFIG_HAVE_OPROFILE=y
101
102#
103# GCOV-based kernel profiling
104#
105# CONFIG_GCOV_KERNEL is not set
106# CONFIG_SLOW_WORK is not set
74# CONFIG_HAVE_GENERIC_DMA_COHERENT is not set 107# CONFIG_HAVE_GENERIC_DMA_COHERENT is not set
75CONFIG_SLABINFO=y 108CONFIG_SLABINFO=y
76CONFIG_TINY_SHMEM=y
77CONFIG_BASE_SMALL=0 109CONFIG_BASE_SMALL=0
78CONFIG_MODULES=y 110CONFIG_MODULES=y
79# CONFIG_MODULE_FORCE_LOAD is not set 111# CONFIG_MODULE_FORCE_LOAD is not set
@@ -81,11 +113,8 @@ CONFIG_MODULE_UNLOAD=y
81# CONFIG_MODULE_FORCE_UNLOAD is not set 113# CONFIG_MODULE_FORCE_UNLOAD is not set
82# CONFIG_MODVERSIONS is not set 114# CONFIG_MODVERSIONS is not set
83# CONFIG_MODULE_SRCVERSION_ALL is not set 115# CONFIG_MODULE_SRCVERSION_ALL is not set
84CONFIG_KMOD=y
85CONFIG_BLOCK=y 116CONFIG_BLOCK=y
86# CONFIG_LBD is not set 117# CONFIG_LBDAF is not set
87# CONFIG_BLK_DEV_IO_TRACE is not set
88# CONFIG_LSF is not set
89# CONFIG_BLK_DEV_BSG is not set 118# CONFIG_BLK_DEV_BSG is not set
90# CONFIG_BLK_DEV_INTEGRITY is not set 119# CONFIG_BLK_DEV_INTEGRITY is not set
91 120
@@ -101,7 +130,6 @@ CONFIG_DEFAULT_AS=y
101# CONFIG_DEFAULT_CFQ is not set 130# CONFIG_DEFAULT_CFQ is not set
102# CONFIG_DEFAULT_NOOP is not set 131# CONFIG_DEFAULT_NOOP is not set
103CONFIG_DEFAULT_IOSCHED="anticipatory" 132CONFIG_DEFAULT_IOSCHED="anticipatory"
104CONFIG_CLASSIC_RCU=y
105# CONFIG_PREEMPT_NONE is not set 133# CONFIG_PREEMPT_NONE is not set
106CONFIG_PREEMPT_VOLUNTARY=y 134CONFIG_PREEMPT_VOLUNTARY=y
107# CONFIG_PREEMPT is not set 135# CONFIG_PREEMPT is not set
@@ -132,15 +160,15 @@ CONFIG_PREEMPT_VOLUNTARY=y
132# CONFIG_BF537 is not set 160# CONFIG_BF537 is not set
133CONFIG_BF538=y 161CONFIG_BF538=y
134# CONFIG_BF539 is not set 162# CONFIG_BF539 is not set
135# CONFIG_BF542 is not set 163# CONFIG_BF542_std is not set
136# CONFIG_BF542M is not set 164# CONFIG_BF542M is not set
137# CONFIG_BF544 is not set 165# CONFIG_BF544_std is not set
138# CONFIG_BF544M is not set 166# CONFIG_BF544M is not set
139# CONFIG_BF547 is not set 167# CONFIG_BF547_std is not set
140# CONFIG_BF547M is not set 168# CONFIG_BF547M is not set
141# CONFIG_BF548 is not set 169# CONFIG_BF548_std is not set
142# CONFIG_BF548M is not set 170# CONFIG_BF548M is not set
143# CONFIG_BF549 is not set 171# CONFIG_BF549_std is not set
144# CONFIG_BF549M is not set 172# CONFIG_BF549M is not set
145# CONFIG_BF561 is not set 173# CONFIG_BF561 is not set
146CONFIG_BF_REV_MIN=4 174CONFIG_BF_REV_MIN=4
@@ -246,7 +274,7 @@ CONFIG_GENERIC_TIME=y
246CONFIG_GENERIC_CLOCKEVENTS=y 274CONFIG_GENERIC_CLOCKEVENTS=y
247# CONFIG_TICKSOURCE_GPTMR0 is not set 275# CONFIG_TICKSOURCE_GPTMR0 is not set
248CONFIG_TICKSOURCE_CORETMR=y 276CONFIG_TICKSOURCE_CORETMR=y
249# CONFIG_CYCLES_CLOCKSOURCE is not set 277CONFIG_CYCLES_CLOCKSOURCE=y
250# CONFIG_GPTMR0_CLOCKSOURCE is not set 278# CONFIG_GPTMR0_CLOCKSOURCE is not set
251CONFIG_TICK_ONESHOT=y 279CONFIG_TICK_ONESHOT=y
252# CONFIG_NO_HZ is not set 280# CONFIG_NO_HZ is not set
@@ -298,7 +326,6 @@ CONFIG_FLATMEM=y
298CONFIG_FLAT_NODE_MEM_MAP=y 326CONFIG_FLAT_NODE_MEM_MAP=y
299CONFIG_PAGEFLAGS_EXTENDED=y 327CONFIG_PAGEFLAGS_EXTENDED=y
300CONFIG_SPLIT_PTLOCK_CPUS=4 328CONFIG_SPLIT_PTLOCK_CPUS=4
301# CONFIG_RESOURCES_64BIT is not set
302# CONFIG_PHYS_ADDR_T_64BIT is not set 329# CONFIG_PHYS_ADDR_T_64BIT is not set
303CONFIG_ZONE_DMA_FLAG=1 330CONFIG_ZONE_DMA_FLAG=1
304CONFIG_VIRT_TO_BUS=y 331CONFIG_VIRT_TO_BUS=y
@@ -307,16 +334,18 @@ CONFIG_BFIN_GPTIMERS=m
307# CONFIG_DMA_UNCACHED_4M is not set 334# CONFIG_DMA_UNCACHED_4M is not set
308# CONFIG_DMA_UNCACHED_2M is not set 335# CONFIG_DMA_UNCACHED_2M is not set
309CONFIG_DMA_UNCACHED_1M=y 336CONFIG_DMA_UNCACHED_1M=y
337# CONFIG_DMA_UNCACHED_512K is not set
338# CONFIG_DMA_UNCACHED_256K is not set
339# CONFIG_DMA_UNCACHED_128K is not set
310# CONFIG_DMA_UNCACHED_NONE is not set 340# CONFIG_DMA_UNCACHED_NONE is not set
311 341
312# 342#
313# Cache Support 343# Cache Support
314# 344#
315CONFIG_BFIN_ICACHE=y 345CONFIG_BFIN_ICACHE=y
316# CONFIG_BFIN_ICACHE_LOCK is not set 346CONFIG_BFIN_EXTMEM_ICACHEABLE=y
317CONFIG_BFIN_DCACHE=y 347CONFIG_BFIN_DCACHE=y
318# CONFIG_BFIN_DCACHE_BANKA is not set 348# CONFIG_BFIN_DCACHE_BANKA is not set
319CONFIG_BFIN_EXTMEM_ICACHEABLE=y
320CONFIG_BFIN_EXTMEM_DCACHEABLE=y 349CONFIG_BFIN_EXTMEM_DCACHEABLE=y
321CONFIG_BFIN_EXTMEM_WRITEBACK=y 350CONFIG_BFIN_EXTMEM_WRITEBACK=y
322# CONFIG_BFIN_EXTMEM_WRITETHROUGH is not set 351# CONFIG_BFIN_EXTMEM_WRITETHROUGH is not set
@@ -327,7 +356,7 @@ CONFIG_BFIN_EXTMEM_WRITEBACK=y
327# CONFIG_MPU is not set 356# CONFIG_MPU is not set
328 357
329# 358#
330# Asynchonous Memory Configuration 359# Asynchronous Memory Configuration
331# 360#
332 361
333# 362#
@@ -383,11 +412,6 @@ CONFIG_NET=y
383CONFIG_PACKET=y 412CONFIG_PACKET=y
384# CONFIG_PACKET_MMAP is not set 413# CONFIG_PACKET_MMAP is not set
385CONFIG_UNIX=y 414CONFIG_UNIX=y
386CONFIG_XFRM=y
387# CONFIG_XFRM_USER is not set
388# CONFIG_XFRM_SUB_POLICY is not set
389# CONFIG_XFRM_MIGRATE is not set
390# CONFIG_XFRM_STATISTICS is not set
391# CONFIG_NET_KEY is not set 415# CONFIG_NET_KEY is not set
392CONFIG_INET=y 416CONFIG_INET=y
393# CONFIG_IP_MULTICAST is not set 417# CONFIG_IP_MULTICAST is not set
@@ -411,7 +435,6 @@ CONFIG_IP_PNP=y
411# CONFIG_INET_XFRM_MODE_BEET is not set 435# CONFIG_INET_XFRM_MODE_BEET is not set
412# CONFIG_INET_LRO is not set 436# CONFIG_INET_LRO is not set
413# CONFIG_INET_DIAG is not set 437# CONFIG_INET_DIAG is not set
414CONFIG_INET_TCP_DIAG=y
415# CONFIG_TCP_CONG_ADVANCED is not set 438# CONFIG_TCP_CONG_ADVANCED is not set
416CONFIG_TCP_CONG_CUBIC=y 439CONFIG_TCP_CONG_CUBIC=y
417CONFIG_DEFAULT_TCP_CONG="cubic" 440CONFIG_DEFAULT_TCP_CONG="cubic"
@@ -422,6 +445,7 @@ CONFIG_DEFAULT_TCP_CONG="cubic"
422# CONFIG_NETFILTER is not set 445# CONFIG_NETFILTER is not set
423# CONFIG_IP_DCCP is not set 446# CONFIG_IP_DCCP is not set
424# CONFIG_IP_SCTP is not set 447# CONFIG_IP_SCTP is not set
448# CONFIG_RDS is not set
425# CONFIG_TIPC is not set 449# CONFIG_TIPC is not set
426# CONFIG_ATM is not set 450# CONFIG_ATM is not set
427# CONFIG_BRIDGE is not set 451# CONFIG_BRIDGE is not set
@@ -435,14 +459,34 @@ CONFIG_DEFAULT_TCP_CONG="cubic"
435# CONFIG_LAPB is not set 459# CONFIG_LAPB is not set
436# CONFIG_ECONET is not set 460# CONFIG_ECONET is not set
437# CONFIG_WAN_ROUTER is not set 461# CONFIG_WAN_ROUTER is not set
462# CONFIG_PHONET is not set
463# CONFIG_IEEE802154 is not set
438# CONFIG_NET_SCHED is not set 464# CONFIG_NET_SCHED is not set
465# CONFIG_DCB is not set
439 466
440# 467#
441# Network testing 468# Network testing
442# 469#
443# CONFIG_NET_PKTGEN is not set 470# CONFIG_NET_PKTGEN is not set
444# CONFIG_HAMRADIO is not set 471# CONFIG_HAMRADIO is not set
445# CONFIG_CAN is not set 472CONFIG_CAN=m
473CONFIG_CAN_RAW=m
474CONFIG_CAN_BCM=m
475
476#
477# CAN Device Drivers
478#
479# CONFIG_CAN_VCAN is not set
480CONFIG_CAN_DEV=m
481# CONFIG_CAN_CALC_BITTIMING is not set
482CONFIG_CAN_BFIN=m
483# CONFIG_CAN_SJA1000 is not set
484
485#
486# CAN USB interfaces
487#
488# CONFIG_CAN_EMS_USB is not set
489# CONFIG_CAN_DEBUG_DEVICES is not set
446CONFIG_IRDA=m 490CONFIG_IRDA=m
447 491
448# 492#
@@ -481,13 +525,8 @@ CONFIG_SIR_BFIN_DMA=y
481# 525#
482# CONFIG_BT is not set 526# CONFIG_BT is not set
483# CONFIG_AF_RXRPC is not set 527# CONFIG_AF_RXRPC is not set
484# CONFIG_PHONET is not set 528# CONFIG_WIRELESS is not set
485CONFIG_WIRELESS=y 529# CONFIG_WIMAX is not set
486# CONFIG_CFG80211 is not set
487CONFIG_WIRELESS_OLD_REGULATORY=y
488# CONFIG_WIRELESS_EXT is not set
489# CONFIG_MAC80211 is not set
490# CONFIG_IEEE80211 is not set
491# CONFIG_RFKILL is not set 530# CONFIG_RFKILL is not set
492# CONFIG_NET_9P is not set 531# CONFIG_NET_9P is not set
493 532
@@ -508,6 +547,7 @@ CONFIG_PREVENT_FIRMWARE_BUILD=y
508# CONFIG_CONNECTOR is not set 547# CONFIG_CONNECTOR is not set
509CONFIG_MTD=y 548CONFIG_MTD=y
510# CONFIG_MTD_DEBUG is not set 549# CONFIG_MTD_DEBUG is not set
550# CONFIG_MTD_TESTS is not set
511# CONFIG_MTD_CONCAT is not set 551# CONFIG_MTD_CONCAT is not set
512CONFIG_MTD_PARTITIONS=y 552CONFIG_MTD_PARTITIONS=y
513# CONFIG_MTD_REDBOOT_PARTS is not set 553# CONFIG_MTD_REDBOOT_PARTS is not set
@@ -566,6 +606,7 @@ CONFIG_MTD_PHYSMAP=m
566# 606#
567# CONFIG_MTD_DATAFLASH is not set 607# CONFIG_MTD_DATAFLASH is not set
568# CONFIG_MTD_M25P80 is not set 608# CONFIG_MTD_M25P80 is not set
609# CONFIG_MTD_SST25L is not set
569# CONFIG_MTD_SLRAM is not set 610# CONFIG_MTD_SLRAM is not set
570# CONFIG_MTD_PHRAM is not set 611# CONFIG_MTD_PHRAM is not set
571# CONFIG_MTD_MTDRAM is not set 612# CONFIG_MTD_MTDRAM is not set
@@ -581,11 +622,6 @@ CONFIG_MTD_NAND=m
581# CONFIG_MTD_NAND_VERIFY_WRITE is not set 622# CONFIG_MTD_NAND_VERIFY_WRITE is not set
582# CONFIG_MTD_NAND_ECC_SMC is not set 623# CONFIG_MTD_NAND_ECC_SMC is not set
583# CONFIG_MTD_NAND_MUSEUM_IDS is not set 624# CONFIG_MTD_NAND_MUSEUM_IDS is not set
584CONFIG_MTD_NAND_BFIN=m
585CONFIG_BFIN_NAND_BASE=0x20212000
586CONFIG_BFIN_NAND_CLE=2
587CONFIG_BFIN_NAND_ALE=1
588CONFIG_BFIN_NAND_READY=3
589CONFIG_MTD_NAND_IDS=m 625CONFIG_MTD_NAND_IDS=m
590# CONFIG_MTD_NAND_DISKONCHIP is not set 626# CONFIG_MTD_NAND_DISKONCHIP is not set
591# CONFIG_MTD_NAND_NANDSIM is not set 627# CONFIG_MTD_NAND_NANDSIM is not set
@@ -593,6 +629,11 @@ CONFIG_MTD_NAND_IDS=m
593# CONFIG_MTD_ONENAND is not set 629# CONFIG_MTD_ONENAND is not set
594 630
595# 631#
632# LPDDR flash memory drivers
633#
634# CONFIG_MTD_LPDDR is not set
635
636#
596# UBI - Unsorted block images 637# UBI - Unsorted block images
597# 638#
598# CONFIG_MTD_UBI is not set 639# CONFIG_MTD_UBI is not set
@@ -643,14 +684,20 @@ CONFIG_SMSC_PHY=y
643# CONFIG_BROADCOM_PHY is not set 684# CONFIG_BROADCOM_PHY is not set
644# CONFIG_ICPLUS_PHY is not set 685# CONFIG_ICPLUS_PHY is not set
645# CONFIG_REALTEK_PHY is not set 686# CONFIG_REALTEK_PHY is not set
687# CONFIG_NATIONAL_PHY is not set
688# CONFIG_STE10XP is not set
689# CONFIG_LSI_ET1011C_PHY is not set
646# CONFIG_FIXED_PHY is not set 690# CONFIG_FIXED_PHY is not set
647# CONFIG_MDIO_BITBANG is not set 691# CONFIG_MDIO_BITBANG is not set
648CONFIG_NET_ETHERNET=y 692CONFIG_NET_ETHERNET=y
649CONFIG_MII=y 693CONFIG_MII=y
650CONFIG_SMC91X=y 694CONFIG_SMC91X=y
651# CONFIG_SMSC911X is not set
652# CONFIG_DM9000 is not set 695# CONFIG_DM9000 is not set
653# CONFIG_ENC28J60 is not set 696# CONFIG_ENC28J60 is not set
697# CONFIG_ETHOC is not set
698# CONFIG_SMSC911X is not set
699# CONFIG_DNET is not set
700# CONFIG_ADF702X is not set
654# CONFIG_IBM_NEW_EMAC_ZMII is not set 701# CONFIG_IBM_NEW_EMAC_ZMII is not set
655# CONFIG_IBM_NEW_EMAC_RGMII is not set 702# CONFIG_IBM_NEW_EMAC_RGMII is not set
656# CONFIG_IBM_NEW_EMAC_TAH is not set 703# CONFIG_IBM_NEW_EMAC_TAH is not set
@@ -659,15 +706,16 @@ CONFIG_SMC91X=y
659# CONFIG_IBM_NEW_EMAC_MAL_CLR_ICINTSTAT is not set 706# CONFIG_IBM_NEW_EMAC_MAL_CLR_ICINTSTAT is not set
660# CONFIG_IBM_NEW_EMAC_MAL_COMMON_ERR is not set 707# CONFIG_IBM_NEW_EMAC_MAL_COMMON_ERR is not set
661# CONFIG_B44 is not set 708# CONFIG_B44 is not set
709# CONFIG_KS8842 is not set
710# CONFIG_KS8851 is not set
711# CONFIG_KS8851_MLL is not set
662# CONFIG_NETDEV_1000 is not set 712# CONFIG_NETDEV_1000 is not set
663# CONFIG_NETDEV_10000 is not set 713# CONFIG_NETDEV_10000 is not set
714# CONFIG_WLAN is not set
664 715
665# 716#
666# Wireless LAN 717# Enable WiMAX (Networking options) to see the WiMAX drivers
667# 718#
668# CONFIG_WLAN_PRE80211 is not set
669# CONFIG_WLAN_80211 is not set
670# CONFIG_IWLWIFI_LEDS is not set
671# CONFIG_WAN is not set 719# CONFIG_WAN is not set
672# CONFIG_PPP is not set 720# CONFIG_PPP is not set
673# CONFIG_SLIP is not set 721# CONFIG_SLIP is not set
@@ -700,14 +748,17 @@ CONFIG_INPUT_EVDEV=m
700# CONFIG_INPUT_JOYSTICK is not set 748# CONFIG_INPUT_JOYSTICK is not set
701# CONFIG_INPUT_TABLET is not set 749# CONFIG_INPUT_TABLET is not set
702CONFIG_INPUT_TOUCHSCREEN=y 750CONFIG_INPUT_TOUCHSCREEN=y
751# CONFIG_TOUCHSCREEN_ADS7846 is not set
703# CONFIG_TOUCHSCREEN_AD7877 is not set 752# CONFIG_TOUCHSCREEN_AD7877 is not set
704# CONFIG_TOUCHSCREEN_AD7879_I2C is not set 753# CONFIG_TOUCHSCREEN_AD7879_I2C is not set
705CONFIG_TOUCHSCREEN_AD7879_SPI=y 754CONFIG_TOUCHSCREEN_AD7879_SPI=y
706CONFIG_TOUCHSCREEN_AD7879=y 755CONFIG_TOUCHSCREEN_AD7879=y
707# CONFIG_TOUCHSCREEN_ADS7846 is not set 756# CONFIG_TOUCHSCREEN_EETI is not set
708# CONFIG_TOUCHSCREEN_FUJITSU is not set 757# CONFIG_TOUCHSCREEN_FUJITSU is not set
709# CONFIG_TOUCHSCREEN_GUNZE is not set 758# CONFIG_TOUCHSCREEN_GUNZE is not set
710# CONFIG_TOUCHSCREEN_ELO is not set 759# CONFIG_TOUCHSCREEN_ELO is not set
760# CONFIG_TOUCHSCREEN_WACOM_W8001 is not set
761# CONFIG_TOUCHSCREEN_MCS5000 is not set
711# CONFIG_TOUCHSCREEN_MTOUCH is not set 762# CONFIG_TOUCHSCREEN_MTOUCH is not set
712# CONFIG_TOUCHSCREEN_INEXIO is not set 763# CONFIG_TOUCHSCREEN_INEXIO is not set
713# CONFIG_TOUCHSCREEN_MK712 is not set 764# CONFIG_TOUCHSCREEN_MK712 is not set
@@ -715,9 +766,13 @@ CONFIG_TOUCHSCREEN_AD7879=y
715# CONFIG_TOUCHSCREEN_TOUCHRIGHT is not set 766# CONFIG_TOUCHSCREEN_TOUCHRIGHT is not set
716# CONFIG_TOUCHSCREEN_TOUCHWIN is not set 767# CONFIG_TOUCHSCREEN_TOUCHWIN is not set
717# CONFIG_TOUCHSCREEN_TOUCHIT213 is not set 768# CONFIG_TOUCHSCREEN_TOUCHIT213 is not set
769# CONFIG_TOUCHSCREEN_TSC2007 is not set
718CONFIG_INPUT_MISC=y 770CONFIG_INPUT_MISC=y
719# CONFIG_INPUT_UINPUT is not set 771# CONFIG_INPUT_UINPUT is not set
720# CONFIG_CONFIG_INPUT_PCF8574 is not set 772# CONFIG_INPUT_GPIO_ROTARY_ENCODER is not set
773# CONFIG_INPUT_AD714X is not set
774# CONFIG_INPUT_ADXL34X is not set
775# CONFIG_INPUT_PCF8574 is not set
721 776
722# 777#
723# Hardware I/O ports 778# Hardware I/O ports
@@ -728,16 +783,13 @@ CONFIG_INPUT_MISC=y
728# 783#
729# Character devices 784# Character devices
730# 785#
731# CONFIG_AD9960 is not set
732CONFIG_BFIN_DMA_INTERFACE=m 786CONFIG_BFIN_DMA_INTERFACE=m
733# CONFIG_BFIN_PPI is not set 787# CONFIG_BFIN_PPI is not set
734# CONFIG_BFIN_PPIFCD is not set 788# CONFIG_BFIN_PPIFCD is not set
735# CONFIG_BFIN_SIMPLE_TIMER is not set 789# CONFIG_BFIN_SIMPLE_TIMER is not set
736# CONFIG_BFIN_SPI_ADC is not set 790# CONFIG_BFIN_SPI_ADC is not set
737CONFIG_BFIN_SPORT=m 791CONFIG_BFIN_SPORT=m
738# CONFIG_BFIN_TIMER_LATENCY is not set
739# CONFIG_BFIN_TWI_LCD is not set 792# CONFIG_BFIN_TWI_LCD is not set
740CONFIG_SIMPLE_GPIO=m
741# CONFIG_VT is not set 793# CONFIG_VT is not set
742# CONFIG_DEVKMEM is not set 794# CONFIG_DEVKMEM is not set
743CONFIG_BFIN_JTAG_COMM=m 795CONFIG_BFIN_JTAG_COMM=m
@@ -751,6 +803,7 @@ CONFIG_BFIN_JTAG_COMM=m
751# 803#
752# Non-8250 serial port support 804# Non-8250 serial port support
753# 805#
806# CONFIG_SERIAL_MAX3100 is not set
754CONFIG_SERIAL_BFIN=y 807CONFIG_SERIAL_BFIN=y
755CONFIG_SERIAL_BFIN_CONSOLE=y 808CONFIG_SERIAL_BFIN_CONSOLE=y
756CONFIG_SERIAL_BFIN_DMA=y 809CONFIG_SERIAL_BFIN_DMA=y
@@ -765,12 +818,8 @@ CONFIG_SERIAL_CORE=y
765CONFIG_SERIAL_CORE_CONSOLE=y 818CONFIG_SERIAL_CORE_CONSOLE=y
766# CONFIG_SERIAL_BFIN_SPORT is not set 819# CONFIG_SERIAL_BFIN_SPORT is not set
767CONFIG_UNIX98_PTYS=y 820CONFIG_UNIX98_PTYS=y
821# CONFIG_DEVPTS_MULTIPLE_INSTANCES is not set
768# CONFIG_LEGACY_PTYS is not set 822# CONFIG_LEGACY_PTYS is not set
769
770#
771# CAN, the car bus and industrial fieldbus
772#
773# CONFIG_CAN4LINUX is not set
774# CONFIG_IPMI_HANDLER is not set 823# CONFIG_IPMI_HANDLER is not set
775# CONFIG_HW_RANDOM is not set 824# CONFIG_HW_RANDOM is not set
776# CONFIG_R3964 is not set 825# CONFIG_R3964 is not set
@@ -778,6 +827,7 @@ CONFIG_UNIX98_PTYS=y
778# CONFIG_TCG_TPM is not set 827# CONFIG_TCG_TPM is not set
779CONFIG_I2C=m 828CONFIG_I2C=m
780CONFIG_I2C_BOARDINFO=y 829CONFIG_I2C_BOARDINFO=y
830CONFIG_I2C_COMPAT=y
781# CONFIG_I2C_CHARDEV is not set 831# CONFIG_I2C_CHARDEV is not set
782CONFIG_I2C_HELPER_AUTO=y 832CONFIG_I2C_HELPER_AUTO=y
783 833
@@ -810,14 +860,6 @@ CONFIG_I2C_BLACKFIN_TWI_CLK_KHZ=100
810# Miscellaneous I2C Chip support 860# Miscellaneous I2C Chip support
811# 861#
812# CONFIG_DS1682 is not set 862# CONFIG_DS1682 is not set
813# CONFIG_EEPROM_AT24 is not set
814# CONFIG_SENSORS_AD5252 is not set
815# CONFIG_EEPROM_LEGACY is not set
816# CONFIG_SENSORS_PCF8574 is not set
817# CONFIG_PCF8575 is not set
818# CONFIG_SENSORS_PCA9539 is not set
819# CONFIG_SENSORS_PCF8591 is not set
820# CONFIG_SENSORS_MAX6875 is not set
821# CONFIG_SENSORS_TSL2550 is not set 863# CONFIG_SENSORS_TSL2550 is not set
822# CONFIG_I2C_DEBUG_CORE is not set 864# CONFIG_I2C_DEBUG_CORE is not set
823# CONFIG_I2C_DEBUG_ALGO is not set 865# CONFIG_I2C_DEBUG_ALGO is not set
@@ -834,13 +876,18 @@ CONFIG_SPI_BFIN=y
834# CONFIG_SPI_BFIN_LOCK is not set 876# CONFIG_SPI_BFIN_LOCK is not set
835# CONFIG_SPI_BFIN_SPORT is not set 877# CONFIG_SPI_BFIN_SPORT is not set
836# CONFIG_SPI_BITBANG is not set 878# CONFIG_SPI_BITBANG is not set
879# CONFIG_SPI_GPIO is not set
837 880
838# 881#
839# SPI Protocol Masters 882# SPI Protocol Masters
840# 883#
841# CONFIG_EEPROM_AT25 is not set
842# CONFIG_SPI_SPIDEV is not set 884# CONFIG_SPI_SPIDEV is not set
843# CONFIG_SPI_TLE62X0 is not set 885# CONFIG_SPI_TLE62X0 is not set
886
887#
888# PPS support
889#
890# CONFIG_PPS is not set
844CONFIG_ARCH_WANT_OPTIONAL_GPIOLIB=y 891CONFIG_ARCH_WANT_OPTIONAL_GPIOLIB=y
845CONFIG_GPIOLIB=y 892CONFIG_GPIOLIB=y
846# CONFIG_DEBUG_GPIO is not set 893# CONFIG_DEBUG_GPIO is not set
@@ -856,6 +903,7 @@ CONFIG_GPIO_SYSFS=y
856# CONFIG_GPIO_MAX732X is not set 903# CONFIG_GPIO_MAX732X is not set
857# CONFIG_GPIO_PCA953X is not set 904# CONFIG_GPIO_PCA953X is not set
858# CONFIG_GPIO_PCF857X is not set 905# CONFIG_GPIO_PCF857X is not set
906# CONFIG_GPIO_ADP5588 is not set
859 907
860# 908#
861# PCI GPIO expanders: 909# PCI GPIO expanders:
@@ -866,11 +914,15 @@ CONFIG_GPIO_SYSFS=y
866# 914#
867# CONFIG_GPIO_MAX7301 is not set 915# CONFIG_GPIO_MAX7301 is not set
868# CONFIG_GPIO_MCP23S08 is not set 916# CONFIG_GPIO_MCP23S08 is not set
917# CONFIG_GPIO_MC33880 is not set
918
919#
920# AC97 GPIO expanders:
921#
869# CONFIG_W1 is not set 922# CONFIG_W1 is not set
870# CONFIG_POWER_SUPPLY is not set 923# CONFIG_POWER_SUPPLY is not set
871# CONFIG_HWMON is not set 924# CONFIG_HWMON is not set
872# CONFIG_THERMAL is not set 925# CONFIG_THERMAL is not set
873# CONFIG_THERMAL_HWMON is not set
874CONFIG_WATCHDOG=y 926CONFIG_WATCHDOG=y
875# CONFIG_WATCHDOG_NOWAYOUT is not set 927# CONFIG_WATCHDOG_NOWAYOUT is not set
876 928
@@ -892,26 +944,17 @@ CONFIG_SSB_POSSIBLE=y
892# CONFIG_MFD_CORE is not set 944# CONFIG_MFD_CORE is not set
893# CONFIG_MFD_SM501 is not set 945# CONFIG_MFD_SM501 is not set
894# CONFIG_HTC_PASIC3 is not set 946# CONFIG_HTC_PASIC3 is not set
947# CONFIG_TPS65010 is not set
895# CONFIG_MFD_TMIO is not set 948# CONFIG_MFD_TMIO is not set
896# CONFIG_MFD_WM8400 is not set 949# CONFIG_MFD_WM8400 is not set
950# CONFIG_MFD_WM831X is not set
897# CONFIG_MFD_WM8350_I2C is not set 951# CONFIG_MFD_WM8350_I2C is not set
952# CONFIG_MFD_PCF50633 is not set
953# CONFIG_MFD_MC13783 is not set
954# CONFIG_AB3100_CORE is not set
955# CONFIG_EZX_PCAP is not set
898# CONFIG_REGULATOR is not set 956# CONFIG_REGULATOR is not set
899 957# CONFIG_MEDIA_SUPPORT is not set
900#
901# Multimedia devices
902#
903
904#
905# Multimedia core support
906#
907# CONFIG_VIDEO_DEV is not set
908# CONFIG_DVB_CORE is not set
909# CONFIG_VIDEO_MEDIA is not set
910
911#
912# Multimedia drivers
913#
914# CONFIG_DAB is not set
915 958
916# 959#
917# Graphics support 960# Graphics support
@@ -947,6 +990,7 @@ CONFIG_FB_BFIN_LQ035Q1=m
947# CONFIG_FB_VIRTUAL is not set 990# CONFIG_FB_VIRTUAL is not set
948# CONFIG_FB_METRONOME is not set 991# CONFIG_FB_METRONOME is not set
949# CONFIG_FB_MB862XX is not set 992# CONFIG_FB_MB862XX is not set
993# CONFIG_FB_BROADSHEET is not set
950# CONFIG_BACKLIGHT_LCD_SUPPORT is not set 994# CONFIG_BACKLIGHT_LCD_SUPPORT is not set
951 995
952# 996#
@@ -957,14 +1001,12 @@ CONFIG_FB_BFIN_LQ035Q1=m
957# CONFIG_SOUND is not set 1001# CONFIG_SOUND is not set
958CONFIG_HID_SUPPORT=y 1002CONFIG_HID_SUPPORT=y
959CONFIG_HID=y 1003CONFIG_HID=y
960# CONFIG_HID_DEBUG is not set
961# CONFIG_HIDRAW is not set 1004# CONFIG_HIDRAW is not set
962# CONFIG_HID_PID is not set 1005# CONFIG_HID_PID is not set
963 1006
964# 1007#
965# Special HID drivers 1008# Special HID drivers
966# 1009#
967CONFIG_HID_COMPAT=y
968# CONFIG_USB_SUPPORT is not set 1010# CONFIG_USB_SUPPORT is not set
969# CONFIG_MMC is not set 1011# CONFIG_MMC is not set
970# CONFIG_MEMSTICK is not set 1012# CONFIG_MEMSTICK is not set
@@ -1001,6 +1043,7 @@ CONFIG_RTC_INTF_DEV=y
1001# CONFIG_RTC_DRV_S35390A is not set 1043# CONFIG_RTC_DRV_S35390A is not set
1002# CONFIG_RTC_DRV_FM3130 is not set 1044# CONFIG_RTC_DRV_FM3130 is not set
1003# CONFIG_RTC_DRV_RX8581 is not set 1045# CONFIG_RTC_DRV_RX8581 is not set
1046# CONFIG_RTC_DRV_RX8025 is not set
1004 1047
1005# 1048#
1006# SPI RTC drivers 1049# SPI RTC drivers
@@ -1012,6 +1055,7 @@ CONFIG_RTC_INTF_DEV=y
1012# CONFIG_RTC_DRV_R9701 is not set 1055# CONFIG_RTC_DRV_R9701 is not set
1013# CONFIG_RTC_DRV_RS5C348 is not set 1056# CONFIG_RTC_DRV_RS5C348 is not set
1014# CONFIG_RTC_DRV_DS3234 is not set 1057# CONFIG_RTC_DRV_DS3234 is not set
1058# CONFIG_RTC_DRV_PCF2123 is not set
1015 1059
1016# 1060#
1017# Platform RTC drivers 1061# Platform RTC drivers
@@ -1032,10 +1076,21 @@ CONFIG_RTC_INTF_DEV=y
1032# 1076#
1033CONFIG_RTC_DRV_BFIN=y 1077CONFIG_RTC_DRV_BFIN=y
1034# CONFIG_DMADEVICES is not set 1078# CONFIG_DMADEVICES is not set
1079# CONFIG_AUXDISPLAY is not set
1035# CONFIG_UIO is not set 1080# CONFIG_UIO is not set
1081
1082#
1083# TI VLYNQ
1084#
1036# CONFIG_STAGING is not set 1085# CONFIG_STAGING is not set
1037 1086
1038# 1087#
1088# Firmware Drivers
1089#
1090# CONFIG_FIRMWARE_MEMMAP is not set
1091# CONFIG_SIGMA is not set
1092
1093#
1039# File systems 1094# File systems
1040# 1095#
1041# CONFIG_EXT2_FS is not set 1096# CONFIG_EXT2_FS is not set
@@ -1044,9 +1099,13 @@ CONFIG_RTC_DRV_BFIN=y
1044# CONFIG_REISERFS_FS is not set 1099# CONFIG_REISERFS_FS is not set
1045# CONFIG_JFS_FS is not set 1100# CONFIG_JFS_FS is not set
1046# CONFIG_FS_POSIX_ACL is not set 1101# CONFIG_FS_POSIX_ACL is not set
1047CONFIG_FILE_LOCKING=y
1048# CONFIG_XFS_FS is not set 1102# CONFIG_XFS_FS is not set
1103# CONFIG_GFS2_FS is not set
1049# CONFIG_OCFS2_FS is not set 1104# CONFIG_OCFS2_FS is not set
1105# CONFIG_BTRFS_FS is not set
1106# CONFIG_NILFS2_FS is not set
1107CONFIG_FILE_LOCKING=y
1108CONFIG_FSNOTIFY=y
1050# CONFIG_DNOTIFY is not set 1109# CONFIG_DNOTIFY is not set
1051CONFIG_INOTIFY=y 1110CONFIG_INOTIFY=y
1052CONFIG_INOTIFY_USER=y 1111CONFIG_INOTIFY_USER=y
@@ -1056,6 +1115,11 @@ CONFIG_INOTIFY_USER=y
1056# CONFIG_FUSE_FS is not set 1115# CONFIG_FUSE_FS is not set
1057 1116
1058# 1117#
1118# Caches
1119#
1120# CONFIG_FSCACHE is not set
1121
1122#
1059# CD-ROM/DVD Filesystems 1123# CD-ROM/DVD Filesystems
1060# 1124#
1061# CONFIG_ISO9660_FS is not set 1125# CONFIG_ISO9660_FS is not set
@@ -1074,13 +1138,9 @@ CONFIG_INOTIFY_USER=y
1074CONFIG_PROC_FS=y 1138CONFIG_PROC_FS=y
1075CONFIG_PROC_SYSCTL=y 1139CONFIG_PROC_SYSCTL=y
1076CONFIG_SYSFS=y 1140CONFIG_SYSFS=y
1077# CONFIG_TMPFS is not set
1078# CONFIG_HUGETLB_PAGE is not set 1141# CONFIG_HUGETLB_PAGE is not set
1079# CONFIG_CONFIGFS_FS is not set 1142# CONFIG_CONFIGFS_FS is not set
1080 1143CONFIG_MISC_FILESYSTEMS=y
1081#
1082# Miscellaneous filesystems
1083#
1084# CONFIG_ADFS_FS is not set 1144# CONFIG_ADFS_FS is not set
1085# CONFIG_AFFS_FS is not set 1145# CONFIG_AFFS_FS is not set
1086# CONFIG_HFS_FS is not set 1146# CONFIG_HFS_FS is not set
@@ -1099,17 +1159,8 @@ CONFIG_JFFS2_ZLIB=y
1099# CONFIG_JFFS2_LZO is not set 1159# CONFIG_JFFS2_LZO is not set
1100CONFIG_JFFS2_RTIME=y 1160CONFIG_JFFS2_RTIME=y
1101# CONFIG_JFFS2_RUBIN is not set 1161# CONFIG_JFFS2_RUBIN is not set
1102CONFIG_YAFFS_FS=m
1103CONFIG_YAFFS_YAFFS1=y
1104# CONFIG_YAFFS_9BYTE_TAGS is not set
1105# CONFIG_YAFFS_DOES_ECC is not set
1106CONFIG_YAFFS_YAFFS2=y
1107CONFIG_YAFFS_AUTO_YAFFS2=y
1108# CONFIG_YAFFS_DISABLE_LAZY_LOAD is not set
1109# CONFIG_YAFFS_DISABLE_WIDE_TNODES is not set
1110# CONFIG_YAFFS_ALWAYS_CHECK_CHUNK_ERASED is not set
1111CONFIG_YAFFS_SHORT_NAMES_IN_RAM=y
1112# CONFIG_CRAMFS is not set 1162# CONFIG_CRAMFS is not set
1163# CONFIG_SQUASHFS is not set
1113# CONFIG_VXFS_FS is not set 1164# CONFIG_VXFS_FS is not set
1114# CONFIG_MINIX_FS is not set 1165# CONFIG_MINIX_FS is not set
1115# CONFIG_OMFS_FS is not set 1166# CONFIG_OMFS_FS is not set
@@ -1128,7 +1179,6 @@ CONFIG_LOCKD=m
1128CONFIG_LOCKD_V4=y 1179CONFIG_LOCKD_V4=y
1129CONFIG_NFS_COMMON=y 1180CONFIG_NFS_COMMON=y
1130CONFIG_SUNRPC=m 1181CONFIG_SUNRPC=m
1131# CONFIG_SUNRPC_REGISTER_V4 is not set
1132# CONFIG_RPCSEC_GSS_KRB5 is not set 1182# CONFIG_RPCSEC_GSS_KRB5 is not set
1133# CONFIG_RPCSEC_GSS_SPKM3 is not set 1183# CONFIG_RPCSEC_GSS_SPKM3 is not set
1134CONFIG_SMB_FS=m 1184CONFIG_SMB_FS=m
@@ -1193,14 +1243,19 @@ CONFIG_ENABLE_WARN_DEPRECATED=y
1193CONFIG_ENABLE_MUST_CHECK=y 1243CONFIG_ENABLE_MUST_CHECK=y
1194CONFIG_FRAME_WARN=1024 1244CONFIG_FRAME_WARN=1024
1195# CONFIG_MAGIC_SYSRQ is not set 1245# CONFIG_MAGIC_SYSRQ is not set
1246# CONFIG_STRIP_ASM_SYMS is not set
1196# CONFIG_UNUSED_SYMBOLS is not set 1247# CONFIG_UNUSED_SYMBOLS is not set
1197CONFIG_DEBUG_FS=y 1248CONFIG_DEBUG_FS=y
1198# CONFIG_HEADERS_CHECK is not set 1249# CONFIG_HEADERS_CHECK is not set
1250CONFIG_DEBUG_SECTION_MISMATCH=y
1199CONFIG_DEBUG_KERNEL=y 1251CONFIG_DEBUG_KERNEL=y
1200CONFIG_DEBUG_SHIRQ=y 1252CONFIG_DEBUG_SHIRQ=y
1201CONFIG_DETECT_SOFTLOCKUP=y 1253CONFIG_DETECT_SOFTLOCKUP=y
1202# CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC is not set 1254# CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC is not set
1203CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC_VALUE=0 1255CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC_VALUE=0
1256CONFIG_DETECT_HUNG_TASK=y
1257# CONFIG_BOOTPARAM_HUNG_TASK_PANIC is not set
1258CONFIG_BOOTPARAM_HUNG_TASK_PANIC_VALUE=0
1204CONFIG_SCHED_DEBUG=y 1259CONFIG_SCHED_DEBUG=y
1205# CONFIG_SCHEDSTATS is not set 1260# CONFIG_SCHEDSTATS is not set
1206# CONFIG_TIMER_STATS is not set 1261# CONFIG_TIMER_STATS is not set
@@ -1208,31 +1263,39 @@ CONFIG_SCHED_DEBUG=y
1208# CONFIG_DEBUG_SLAB is not set 1263# CONFIG_DEBUG_SLAB is not set
1209# CONFIG_DEBUG_SPINLOCK is not set 1264# CONFIG_DEBUG_SPINLOCK is not set
1210# CONFIG_DEBUG_MUTEXES is not set 1265# CONFIG_DEBUG_MUTEXES is not set
1266# CONFIG_DEBUG_LOCK_ALLOC is not set
1267# CONFIG_PROVE_LOCKING is not set
1268# CONFIG_LOCK_STAT is not set
1211# CONFIG_DEBUG_SPINLOCK_SLEEP is not set 1269# CONFIG_DEBUG_SPINLOCK_SLEEP is not set
1212# CONFIG_DEBUG_LOCKING_API_SELFTESTS is not set 1270# CONFIG_DEBUG_LOCKING_API_SELFTESTS is not set
1213# CONFIG_DEBUG_KOBJECT is not set 1271# CONFIG_DEBUG_KOBJECT is not set
1214CONFIG_DEBUG_BUGVERBOSE=y 1272CONFIG_DEBUG_BUGVERBOSE=y
1215CONFIG_DEBUG_INFO=y 1273CONFIG_DEBUG_INFO=y
1216# CONFIG_DEBUG_VM is not set 1274# CONFIG_DEBUG_VM is not set
1275# CONFIG_DEBUG_NOMMU_REGIONS is not set
1217# CONFIG_DEBUG_WRITECOUNT is not set 1276# CONFIG_DEBUG_WRITECOUNT is not set
1218# CONFIG_DEBUG_MEMORY_INIT is not set 1277# CONFIG_DEBUG_MEMORY_INIT is not set
1219# CONFIG_DEBUG_LIST is not set 1278# CONFIG_DEBUG_LIST is not set
1220# CONFIG_DEBUG_SG is not set 1279# CONFIG_DEBUG_SG is not set
1280# CONFIG_DEBUG_NOTIFIERS is not set
1281# CONFIG_DEBUG_CREDENTIALS is not set
1221# CONFIG_FRAME_POINTER is not set 1282# CONFIG_FRAME_POINTER is not set
1222# CONFIG_BOOT_PRINTK_DELAY is not set 1283# CONFIG_BOOT_PRINTK_DELAY is not set
1223# CONFIG_RCU_TORTURE_TEST is not set 1284# CONFIG_RCU_TORTURE_TEST is not set
1224# CONFIG_RCU_CPU_STALL_DETECTOR is not set 1285# CONFIG_RCU_CPU_STALL_DETECTOR is not set
1225# CONFIG_BACKTRACE_SELF_TEST is not set 1286# CONFIG_BACKTRACE_SELF_TEST is not set
1226# CONFIG_DEBUG_BLOCK_EXT_DEVT is not set 1287# CONFIG_DEBUG_BLOCK_EXT_DEVT is not set
1288# CONFIG_DEBUG_FORCE_WEAK_PER_CPU is not set
1227# CONFIG_FAULT_INJECTION is not set 1289# CONFIG_FAULT_INJECTION is not set
1228 1290# CONFIG_PAGE_POISONING is not set
1229# 1291CONFIG_HAVE_FUNCTION_TRACER=y
1230# Tracers 1292CONFIG_HAVE_FUNCTION_GRAPH_TRACER=y
1231# 1293CONFIG_TRACING_SUPPORT=y
1232# CONFIG_SCHED_TRACER is not set 1294# CONFIG_FTRACE is not set
1233# CONFIG_CONTEXT_SWITCH_TRACER is not set 1295# CONFIG_BRANCH_PROFILE_NONE is not set
1234# CONFIG_BOOT_TRACER is not set 1296# CONFIG_PROFILE_ANNOTATED_BRANCHES is not set
1235# CONFIG_DYNAMIC_PRINTK_DEBUG is not set 1297# CONFIG_PROFILE_ALL_BRANCHES is not set
1298# CONFIG_DYNAMIC_DEBUG is not set
1236# CONFIG_SAMPLES is not set 1299# CONFIG_SAMPLES is not set
1237CONFIG_HAVE_ARCH_KGDB=y 1300CONFIG_HAVE_ARCH_KGDB=y
1238# CONFIG_KGDB is not set 1301# CONFIG_KGDB is not set
@@ -1257,6 +1320,7 @@ CONFIG_DEBUG_BFIN_NO_KERN_HWTRACE=y
1257CONFIG_EARLY_PRINTK=y 1320CONFIG_EARLY_PRINTK=y
1258CONFIG_CPLB_INFO=y 1321CONFIG_CPLB_INFO=y
1259CONFIG_ACCESS_CHECK=y 1322CONFIG_ACCESS_CHECK=y
1323# CONFIG_BFIN_ISRAM_SELF_TEST is not set
1260 1324
1261# 1325#
1262# Security options 1326# Security options
@@ -1265,14 +1329,14 @@ CONFIG_ACCESS_CHECK=y
1265CONFIG_SECURITY=y 1329CONFIG_SECURITY=y
1266# CONFIG_SECURITYFS is not set 1330# CONFIG_SECURITYFS is not set
1267# CONFIG_SECURITY_NETWORK is not set 1331# CONFIG_SECURITY_NETWORK is not set
1332# CONFIG_SECURITY_PATH is not set
1268# CONFIG_SECURITY_FILE_CAPABILITIES is not set 1333# CONFIG_SECURITY_FILE_CAPABILITIES is not set
1269CONFIG_SECURITY_DEFAULT_MMAP_MIN_ADDR=0 1334# CONFIG_SECURITY_TOMOYO is not set
1270CONFIG_CRYPTO=y 1335CONFIG_CRYPTO=y
1271 1336
1272# 1337#
1273# Crypto core or helper 1338# Crypto core or helper
1274# 1339#
1275# CONFIG_CRYPTO_FIPS is not set
1276# CONFIG_CRYPTO_MANAGER is not set 1340# CONFIG_CRYPTO_MANAGER is not set
1277# CONFIG_CRYPTO_MANAGER2 is not set 1341# CONFIG_CRYPTO_MANAGER2 is not set
1278# CONFIG_CRYPTO_GF128MUL is not set 1342# CONFIG_CRYPTO_GF128MUL is not set
@@ -1304,11 +1368,13 @@ CONFIG_CRYPTO=y
1304# 1368#
1305# CONFIG_CRYPTO_HMAC is not set 1369# CONFIG_CRYPTO_HMAC is not set
1306# CONFIG_CRYPTO_XCBC is not set 1370# CONFIG_CRYPTO_XCBC is not set
1371# CONFIG_CRYPTO_VMAC is not set
1307 1372
1308# 1373#
1309# Digest 1374# Digest
1310# 1375#
1311# CONFIG_CRYPTO_CRC32C is not set 1376# CONFIG_CRYPTO_CRC32C is not set
1377# CONFIG_CRYPTO_GHASH is not set
1312# CONFIG_CRYPTO_MD4 is not set 1378# CONFIG_CRYPTO_MD4 is not set
1313# CONFIG_CRYPTO_MD5 is not set 1379# CONFIG_CRYPTO_MD5 is not set
1314# CONFIG_CRYPTO_MICHAEL_MIC is not set 1380# CONFIG_CRYPTO_MICHAEL_MIC is not set
@@ -1345,6 +1411,7 @@ CONFIG_CRYPTO=y
1345# Compression 1411# Compression
1346# 1412#
1347# CONFIG_CRYPTO_DEFLATE is not set 1413# CONFIG_CRYPTO_DEFLATE is not set
1414# CONFIG_CRYPTO_ZLIB is not set
1348# CONFIG_CRYPTO_LZO is not set 1415# CONFIG_CRYPTO_LZO is not set
1349 1416
1350# 1417#
@@ -1352,11 +1419,13 @@ CONFIG_CRYPTO=y
1352# 1419#
1353# CONFIG_CRYPTO_ANSI_CPRNG is not set 1420# CONFIG_CRYPTO_ANSI_CPRNG is not set
1354CONFIG_CRYPTO_HW=y 1421CONFIG_CRYPTO_HW=y
1422# CONFIG_BINARY_PRINTF is not set
1355 1423
1356# 1424#
1357# Library routines 1425# Library routines
1358# 1426#
1359CONFIG_BITREVERSE=y 1427CONFIG_BITREVERSE=y
1428CONFIG_GENERIC_FIND_LAST_BIT=y
1360CONFIG_CRC_CCITT=m 1429CONFIG_CRC_CCITT=m
1361# CONFIG_CRC16 is not set 1430# CONFIG_CRC16 is not set
1362# CONFIG_CRC_T10DIF is not set 1431# CONFIG_CRC_T10DIF is not set
@@ -1366,6 +1435,8 @@ CONFIG_CRC32=y
1366# CONFIG_LIBCRC32C is not set 1435# CONFIG_LIBCRC32C is not set
1367CONFIG_ZLIB_INFLATE=y 1436CONFIG_ZLIB_INFLATE=y
1368CONFIG_ZLIB_DEFLATE=m 1437CONFIG_ZLIB_DEFLATE=m
1438CONFIG_DECOMPRESS_GZIP=y
1369CONFIG_HAS_IOMEM=y 1439CONFIG_HAS_IOMEM=y
1370CONFIG_HAS_IOPORT=y 1440CONFIG_HAS_IOPORT=y
1371CONFIG_HAS_DMA=y 1441CONFIG_HAS_DMA=y
1442CONFIG_NLATTR=y
diff --git a/arch/blackfin/configs/BF548-EZKIT_defconfig b/arch/blackfin/configs/BF548-EZKIT_defconfig
index eb3e98b6f3f0..ca309cfc6ac4 100644
--- a/arch/blackfin/configs/BF548-EZKIT_defconfig
+++ b/arch/blackfin/configs/BF548-EZKIT_defconfig
@@ -1,7 +1,6 @@
1# 1#
2# Automatically generated make config: don't edit 2# Automatically generated make config: don't edit
3# Linux kernel version: 2.6.31.5 3# Linux kernel version: 2.6.32.2
4# Mon Nov 2 22:02:56 2009
5# 4#
6# CONFIG_MMU is not set 5# CONFIG_MMU is not set
7# CONFIG_FPU is not set 6# CONFIG_FPU is not set
@@ -12,7 +11,6 @@ CONFIG_GENERIC_CSUM=y
12CONFIG_GENERIC_BUG=y 11CONFIG_GENERIC_BUG=y
13CONFIG_ZONE_DMA=y 12CONFIG_ZONE_DMA=y
14CONFIG_GENERIC_FIND_NEXT_BIT=y 13CONFIG_GENERIC_FIND_NEXT_BIT=y
15CONFIG_GENERIC_HWEIGHT=y
16CONFIG_GENERIC_HARDIRQS=y 14CONFIG_GENERIC_HARDIRQS=y
17CONFIG_GENERIC_IRQ_PROBE=y 15CONFIG_GENERIC_IRQ_PROBE=y
18CONFIG_GENERIC_HARDIRQS_NO__DO_IRQ=y 16CONFIG_GENERIC_HARDIRQS_NO__DO_IRQ=y
@@ -49,11 +47,12 @@ CONFIG_SYSVIPC_SYSCTL=y
49# 47#
50# RCU Subsystem 48# RCU Subsystem
51# 49#
52CONFIG_CLASSIC_RCU=y 50CONFIG_TREE_RCU=y
53# CONFIG_TREE_RCU is not set 51# CONFIG_TREE_PREEMPT_RCU is not set
54# CONFIG_PREEMPT_RCU is not set 52# CONFIG_RCU_TRACE is not set
53CONFIG_RCU_FANOUT=32
54# CONFIG_RCU_FANOUT_EXACT is not set
55# CONFIG_TREE_RCU_TRACE is not set 55# CONFIG_TREE_RCU_TRACE is not set
56# CONFIG_PREEMPT_RCU_TRACE is not set
57CONFIG_IKCONFIG=y 56CONFIG_IKCONFIG=y
58CONFIG_IKCONFIG_PROC=y 57CONFIG_IKCONFIG_PROC=y
59CONFIG_LOG_BUF_SHIFT=14 58CONFIG_LOG_BUF_SHIFT=14
@@ -89,24 +88,23 @@ CONFIG_EPOLL=y
89# CONFIG_AIO is not set 88# CONFIG_AIO is not set
90 89
91# 90#
92# Performance Counters 91# Kernel Performance Events And Counters
93# 92#
94CONFIG_VM_EVENT_COUNTERS=y 93CONFIG_VM_EVENT_COUNTERS=y
95# CONFIG_STRIP_ASM_SYMS is not set
96CONFIG_COMPAT_BRK=y 94CONFIG_COMPAT_BRK=y
97CONFIG_SLAB=y 95CONFIG_SLAB=y
98# CONFIG_SLUB is not set 96# CONFIG_SLUB is not set
99# CONFIG_SLOB is not set 97# CONFIG_SLOB is not set
100CONFIG_MMAP_ALLOW_UNINITIALIZED=y 98CONFIG_MMAP_ALLOW_UNINITIALIZED=y
101# CONFIG_PROFILING is not set 99# CONFIG_PROFILING is not set
102# CONFIG_MARKERS is not set
103CONFIG_HAVE_OPROFILE=y 100CONFIG_HAVE_OPROFILE=y
104 101
105# 102#
106# GCOV-based kernel profiling 103# GCOV-based kernel profiling
107# 104#
108# CONFIG_GCOV_KERNEL is not set 105# CONFIG_GCOV_KERNEL is not set
109# CONFIG_SLOW_WORK is not set 106CONFIG_SLOW_WORK=y
107# CONFIG_SLOW_WORK_DEBUG is not set
110# CONFIG_HAVE_GENERIC_DMA_COHERENT is not set 108# CONFIG_HAVE_GENERIC_DMA_COHERENT is not set
111CONFIG_SLABINFO=y 109CONFIG_SLABINFO=y
112CONFIG_BASE_SMALL=0 110CONFIG_BASE_SMALL=0
@@ -163,15 +161,15 @@ CONFIG_PREEMPT_VOLUNTARY=y
163# CONFIG_BF537 is not set 161# CONFIG_BF537 is not set
164# CONFIG_BF538 is not set 162# CONFIG_BF538 is not set
165# CONFIG_BF539 is not set 163# CONFIG_BF539 is not set
166# CONFIG_BF542 is not set 164# CONFIG_BF542_std is not set
167# CONFIG_BF542M is not set 165# CONFIG_BF542M is not set
168# CONFIG_BF544 is not set 166# CONFIG_BF544_std is not set
169# CONFIG_BF544M is not set 167# CONFIG_BF544M is not set
170# CONFIG_BF547 is not set 168# CONFIG_BF547_std is not set
171# CONFIG_BF547M is not set 169# CONFIG_BF547M is not set
172CONFIG_BF548_std=y 170CONFIG_BF548_std=y
173# CONFIG_BF548M is not set 171# CONFIG_BF548M is not set
174# CONFIG_BF549 is not set 172# CONFIG_BF549_std is not set
175# CONFIG_BF549M is not set 173# CONFIG_BF549M is not set
176# CONFIG_BF561 is not set 174# CONFIG_BF561 is not set
177CONFIG_BF_REV_MIN=0 175CONFIG_BF_REV_MIN=0
@@ -185,7 +183,6 @@ CONFIG_BF_REV_0_2=y
185# CONFIG_BF_REV_0_6 is not set 183# CONFIG_BF_REV_0_6 is not set
186# CONFIG_BF_REV_ANY is not set 184# CONFIG_BF_REV_ANY is not set
187# CONFIG_BF_REV_NONE is not set 185# CONFIG_BF_REV_NONE is not set
188CONFIG_BF54x=y
189CONFIG_IRQ_PLL_WAKEUP=7 186CONFIG_IRQ_PLL_WAKEUP=7
190CONFIG_IRQ_RTC=8 187CONFIG_IRQ_RTC=8
191CONFIG_IRQ_SPORT0_RX=9 188CONFIG_IRQ_SPORT0_RX=9
@@ -221,6 +218,8 @@ CONFIG_IRQ_SPI1=10
221CONFIG_IRQ_SPI2=10 218CONFIG_IRQ_SPI2=10
222CONFIG_IRQ_TWI0=11 219CONFIG_IRQ_TWI0=11
223CONFIG_IRQ_TWI1=11 220CONFIG_IRQ_TWI1=11
221CONFIG_BF548=y
222CONFIG_BF54x=y
224CONFIG_BFIN548_EZKIT=y 223CONFIG_BFIN548_EZKIT=y
225# CONFIG_BFIN548_BLUETECHNIX_CM is not set 224# CONFIG_BFIN548_BLUETECHNIX_CM is not set
226 225
@@ -387,12 +386,14 @@ CONFIG_SPLIT_PTLOCK_CPUS=4
387# CONFIG_PHYS_ADDR_T_64BIT is not set 386# CONFIG_PHYS_ADDR_T_64BIT is not set
388CONFIG_ZONE_DMA_FLAG=1 387CONFIG_ZONE_DMA_FLAG=1
389CONFIG_VIRT_TO_BUS=y 388CONFIG_VIRT_TO_BUS=y
390CONFIG_DEFAULT_MMAP_MIN_ADDR=4096
391CONFIG_NOMMU_INITIAL_TRIM_EXCESS=0 389CONFIG_NOMMU_INITIAL_TRIM_EXCESS=0
392CONFIG_BFIN_GPTIMERS=m 390CONFIG_BFIN_GPTIMERS=m
393# CONFIG_DMA_UNCACHED_4M is not set 391# CONFIG_DMA_UNCACHED_4M is not set
394CONFIG_DMA_UNCACHED_2M=y 392CONFIG_DMA_UNCACHED_2M=y
395# CONFIG_DMA_UNCACHED_1M is not set 393# CONFIG_DMA_UNCACHED_1M is not set
394# CONFIG_DMA_UNCACHED_512K is not set
395# CONFIG_DMA_UNCACHED_256K is not set
396# CONFIG_DMA_UNCACHED_128K is not set
396# CONFIG_DMA_UNCACHED_NONE is not set 397# CONFIG_DMA_UNCACHED_NONE is not set
397 398
398# 399#
@@ -505,6 +506,7 @@ CONFIG_DEFAULT_TCP_CONG="cubic"
505# CONFIG_NETFILTER is not set 506# CONFIG_NETFILTER is not set
506# CONFIG_IP_DCCP is not set 507# CONFIG_IP_DCCP is not set
507# CONFIG_IP_SCTP is not set 508# CONFIG_IP_SCTP is not set
509# CONFIG_RDS is not set
508# CONFIG_TIPC is not set 510# CONFIG_TIPC is not set
509# CONFIG_ATM is not set 511# CONFIG_ATM is not set
510# CONFIG_BRIDGE is not set 512# CONFIG_BRIDGE is not set
@@ -528,7 +530,24 @@ CONFIG_DEFAULT_TCP_CONG="cubic"
528# 530#
529# CONFIG_NET_PKTGEN is not set 531# CONFIG_NET_PKTGEN is not set
530# CONFIG_HAMRADIO is not set 532# CONFIG_HAMRADIO is not set
531# CONFIG_CAN is not set 533CONFIG_CAN=m
534CONFIG_CAN_RAW=m
535CONFIG_CAN_BCM=m
536
537#
538# CAN Device Drivers
539#
540# CONFIG_CAN_VCAN is not set
541CONFIG_CAN_DEV=m
542# CONFIG_CAN_CALC_BITTIMING is not set
543CONFIG_CAN_BFIN=m
544# CONFIG_CAN_SJA1000 is not set
545
546#
547# CAN USB interfaces
548#
549# CONFIG_CAN_EMS_USB is not set
550# CONFIG_CAN_DEBUG_DEVICES is not set
532CONFIG_IRDA=m 551CONFIG_IRDA=m
533 552
534# 553#
@@ -663,6 +682,7 @@ CONFIG_MTD_PHYSMAP=y
663# CONFIG_MTD_DATAFLASH is not set 682# CONFIG_MTD_DATAFLASH is not set
664CONFIG_MTD_M25P80=y 683CONFIG_MTD_M25P80=y
665CONFIG_M25PXX_USE_FAST_READ=y 684CONFIG_M25PXX_USE_FAST_READ=y
685# CONFIG_MTD_SST25L is not set
666# CONFIG_MTD_SLRAM is not set 686# CONFIG_MTD_SLRAM is not set
667# CONFIG_MTD_PHRAM is not set 687# CONFIG_MTD_PHRAM is not set
668# CONFIG_MTD_MTDRAM is not set 688# CONFIG_MTD_MTDRAM is not set
@@ -711,10 +731,10 @@ CONFIG_BLK_DEV_RAM_SIZE=4096
711# CONFIG_ATA_OVER_ETH is not set 731# CONFIG_ATA_OVER_ETH is not set
712# CONFIG_BLK_DEV_HD is not set 732# CONFIG_BLK_DEV_HD is not set
713CONFIG_MISC_DEVICES=y 733CONFIG_MISC_DEVICES=y
734# CONFIG_AD525X_DPOT is not set
714# CONFIG_ICS932S401 is not set 735# CONFIG_ICS932S401 is not set
715# CONFIG_ENCLOSURE_SERVICES is not set 736# CONFIG_ENCLOSURE_SERVICES is not set
716# CONFIG_ISL29003 is not set 737# CONFIG_ISL29003 is not set
717# CONFIG_AD525X_DPOT is not set
718# CONFIG_C2PORT is not set 738# CONFIG_C2PORT is not set
719 739
720# 740#
@@ -767,7 +787,8 @@ CONFIG_SCSI_WAIT_SCAN=m
767# CONFIG_SCSI_OSD_INITIATOR is not set 787# CONFIG_SCSI_OSD_INITIATOR is not set
768CONFIG_ATA=y 788CONFIG_ATA=y
769# CONFIG_ATA_NONSTANDARD is not set 789# CONFIG_ATA_NONSTANDARD is not set
770CONFIG_SATA_PMP=y 790CONFIG_ATA_VERBOSE_ERROR=y
791# CONFIG_SATA_PMP is not set
771CONFIG_ATA_SFF=y 792CONFIG_ATA_SFF=y
772# CONFIG_SATA_MV is not set 793# CONFIG_SATA_MV is not set
773# CONFIG_PATA_PLATFORM is not set 794# CONFIG_PATA_PLATFORM is not set
@@ -808,6 +829,7 @@ CONFIG_MII=y
808# CONFIG_ETHOC is not set 829# CONFIG_ETHOC is not set
809CONFIG_SMSC911X=y 830CONFIG_SMSC911X=y
810# CONFIG_DNET is not set 831# CONFIG_DNET is not set
832# CONFIG_ADF702X is not set
811# CONFIG_IBM_NEW_EMAC_ZMII is not set 833# CONFIG_IBM_NEW_EMAC_ZMII is not set
812# CONFIG_IBM_NEW_EMAC_RGMII is not set 834# CONFIG_IBM_NEW_EMAC_RGMII is not set
813# CONFIG_IBM_NEW_EMAC_TAH is not set 835# CONFIG_IBM_NEW_EMAC_TAH is not set
@@ -818,12 +840,10 @@ CONFIG_SMSC911X=y
818# CONFIG_B44 is not set 840# CONFIG_B44 is not set
819# CONFIG_KS8842 is not set 841# CONFIG_KS8842 is not set
820# CONFIG_KS8851 is not set 842# CONFIG_KS8851 is not set
843# CONFIG_KS8851_MLL is not set
821# CONFIG_NETDEV_1000 is not set 844# CONFIG_NETDEV_1000 is not set
822# CONFIG_NETDEV_10000 is not set 845# CONFIG_NETDEV_10000 is not set
823 846CONFIG_WLAN=y
824#
825# Wireless LAN
826#
827# CONFIG_WLAN_PRE80211 is not set 847# CONFIG_WLAN_PRE80211 is not set
828CONFIG_WLAN_80211=y 848CONFIG_WLAN_80211=y
829CONFIG_LIBERTAS=m 849CONFIG_LIBERTAS=m
@@ -877,10 +897,12 @@ CONFIG_INPUT_EVBUG=m
877CONFIG_INPUT_KEYBOARD=y 897CONFIG_INPUT_KEYBOARD=y
878# CONFIG_KEYBOARD_ADP5588 is not set 898# CONFIG_KEYBOARD_ADP5588 is not set
879# CONFIG_KEYBOARD_ATKBD is not set 899# CONFIG_KEYBOARD_ATKBD is not set
900# CONFIG_QT2160 is not set
880CONFIG_KEYBOARD_BFIN=y 901CONFIG_KEYBOARD_BFIN=y
881# CONFIG_KEYBOARD_LKKBD is not set 902# CONFIG_KEYBOARD_LKKBD is not set
882# CONFIG_KEYBOARD_GPIO is not set 903# CONFIG_KEYBOARD_GPIO is not set
883# CONFIG_KEYBOARD_MATRIX is not set 904# CONFIG_KEYBOARD_MATRIX is not set
905# CONFIG_KEYBOARD_MAX7359 is not set
884# CONFIG_KEYBOARD_NEWTON is not set 906# CONFIG_KEYBOARD_NEWTON is not set
885# CONFIG_KEYBOARD_OPENCORES is not set 907# CONFIG_KEYBOARD_OPENCORES is not set
886# CONFIG_KEYBOARD_STOWAWAY is not set 908# CONFIG_KEYBOARD_STOWAWAY is not set
@@ -900,6 +922,7 @@ CONFIG_TOUCHSCREEN_AD7877=m
900# CONFIG_TOUCHSCREEN_GUNZE is not set 922# CONFIG_TOUCHSCREEN_GUNZE is not set
901# CONFIG_TOUCHSCREEN_ELO is not set 923# CONFIG_TOUCHSCREEN_ELO is not set
902# CONFIG_TOUCHSCREEN_WACOM_W8001 is not set 924# CONFIG_TOUCHSCREEN_WACOM_W8001 is not set
925# CONFIG_TOUCHSCREEN_MCS5000 is not set
903# CONFIG_TOUCHSCREEN_MTOUCH is not set 926# CONFIG_TOUCHSCREEN_MTOUCH is not set
904# CONFIG_TOUCHSCREEN_INEXIO is not set 927# CONFIG_TOUCHSCREEN_INEXIO is not set
905# CONFIG_TOUCHSCREEN_MK712 is not set 928# CONFIG_TOUCHSCREEN_MK712 is not set
@@ -910,7 +933,6 @@ CONFIG_TOUCHSCREEN_AD7877=m
910# CONFIG_TOUCHSCREEN_USB_COMPOSITE is not set 933# CONFIG_TOUCHSCREEN_USB_COMPOSITE is not set
911# CONFIG_TOUCHSCREEN_TOUCHIT213 is not set 934# CONFIG_TOUCHSCREEN_TOUCHIT213 is not set
912# CONFIG_TOUCHSCREEN_TSC2007 is not set 935# CONFIG_TOUCHSCREEN_TSC2007 is not set
913# CONFIG_TOUCHSCREEN_W90X900 is not set
914CONFIG_INPUT_MISC=y 936CONFIG_INPUT_MISC=y
915# CONFIG_INPUT_ATI_REMOTE is not set 937# CONFIG_INPUT_ATI_REMOTE is not set
916# CONFIG_INPUT_ATI_REMOTE2 is not set 938# CONFIG_INPUT_ATI_REMOTE2 is not set
@@ -976,11 +998,6 @@ CONFIG_UNIX98_PTYS=y
976# CONFIG_LEGACY_PTYS is not set 998# CONFIG_LEGACY_PTYS is not set
977CONFIG_BFIN_OTP=y 999CONFIG_BFIN_OTP=y
978# CONFIG_BFIN_OTP_WRITE_ENABLE is not set 1000# CONFIG_BFIN_OTP_WRITE_ENABLE is not set
979
980#
981# CAN, the car bus and industrial fieldbus
982#
983# CONFIG_CAN4LINUX is not set
984# CONFIG_IPMI_HANDLER is not set 1001# CONFIG_IPMI_HANDLER is not set
985# CONFIG_HW_RANDOM is not set 1002# CONFIG_HW_RANDOM is not set
986# CONFIG_R3964 is not set 1003# CONFIG_R3964 is not set
@@ -988,6 +1005,7 @@ CONFIG_BFIN_OTP=y
988# CONFIG_TCG_TPM is not set 1005# CONFIG_TCG_TPM is not set
989CONFIG_I2C=y 1006CONFIG_I2C=y
990CONFIG_I2C_BOARDINFO=y 1007CONFIG_I2C_BOARDINFO=y
1008CONFIG_I2C_COMPAT=y
991CONFIG_I2C_CHARDEV=y 1009CONFIG_I2C_CHARDEV=y
992CONFIG_I2C_HELPER_AUTO=y 1010CONFIG_I2C_HELPER_AUTO=y
993 1011
@@ -1021,9 +1039,6 @@ CONFIG_I2C_BLACKFIN_TWI_CLK_KHZ=100
1021# Miscellaneous I2C Chip support 1039# Miscellaneous I2C Chip support
1022# 1040#
1023# CONFIG_DS1682 is not set 1041# CONFIG_DS1682 is not set
1024# CONFIG_SENSORS_PCF8574 is not set
1025# CONFIG_PCF8575 is not set
1026# CONFIG_SENSORS_PCA9539 is not set
1027# CONFIG_SENSORS_TSL2550 is not set 1042# CONFIG_SENSORS_TSL2550 is not set
1028# CONFIG_I2C_DEBUG_CORE is not set 1043# CONFIG_I2C_DEBUG_CORE is not set
1029# CONFIG_I2C_DEBUG_ALGO is not set 1044# CONFIG_I2C_DEBUG_ALGO is not set
@@ -1078,11 +1093,15 @@ CONFIG_GPIO_SYSFS=y
1078# 1093#
1079# CONFIG_GPIO_MAX7301 is not set 1094# CONFIG_GPIO_MAX7301 is not set
1080# CONFIG_GPIO_MCP23S08 is not set 1095# CONFIG_GPIO_MCP23S08 is not set
1096# CONFIG_GPIO_MC33880 is not set
1097
1098#
1099# AC97 GPIO expanders:
1100#
1081# CONFIG_W1 is not set 1101# CONFIG_W1 is not set
1082# CONFIG_POWER_SUPPLY is not set 1102# CONFIG_POWER_SUPPLY is not set
1083# CONFIG_HWMON is not set 1103# CONFIG_HWMON is not set
1084# CONFIG_THERMAL is not set 1104# CONFIG_THERMAL is not set
1085# CONFIG_THERMAL_HWMON is not set
1086CONFIG_WATCHDOG=y 1105CONFIG_WATCHDOG=y
1087# CONFIG_WATCHDOG_NOWAYOUT is not set 1106# CONFIG_WATCHDOG_NOWAYOUT is not set
1088 1107
@@ -1116,8 +1135,10 @@ CONFIG_SSB_POSSIBLE=y
1116# CONFIG_PMIC_DA903X is not set 1135# CONFIG_PMIC_DA903X is not set
1117# CONFIG_PMIC_ADP5520 is not set 1136# CONFIG_PMIC_ADP5520 is not set
1118# CONFIG_MFD_WM8400 is not set 1137# CONFIG_MFD_WM8400 is not set
1138# CONFIG_MFD_WM831X is not set
1119# CONFIG_MFD_WM8350_I2C is not set 1139# CONFIG_MFD_WM8350_I2C is not set
1120# CONFIG_MFD_PCF50633 is not set 1140# CONFIG_MFD_PCF50633 is not set
1141# CONFIG_MFD_MC13783 is not set
1121# CONFIG_AB3100_CORE is not set 1142# CONFIG_AB3100_CORE is not set
1122# CONFIG_EZX_PCAP is not set 1143# CONFIG_EZX_PCAP is not set
1123# CONFIG_REGULATOR is not set 1144# CONFIG_REGULATOR is not set
@@ -1192,6 +1213,7 @@ CONFIG_LOGO=y
1192CONFIG_LOGO_BLACKFIN_CLUT224=y 1213CONFIG_LOGO_BLACKFIN_CLUT224=y
1193CONFIG_SOUND=y 1214CONFIG_SOUND=y
1194CONFIG_SOUND_OSS_CORE=y 1215CONFIG_SOUND_OSS_CORE=y
1216CONFIG_SOUND_OSS_CORE_PRECLAIM=y
1195CONFIG_SND=y 1217CONFIG_SND=y
1196CONFIG_SND_TIMER=y 1218CONFIG_SND_TIMER=y
1197CONFIG_SND_PCM=y 1219CONFIG_SND_PCM=y
@@ -1245,7 +1267,6 @@ CONFIG_SND_SOC_AD1980=y
1245CONFIG_AC97_BUS=y 1267CONFIG_AC97_BUS=y
1246CONFIG_HID_SUPPORT=y 1268CONFIG_HID_SUPPORT=y
1247CONFIG_HID=y 1269CONFIG_HID=y
1248# CONFIG_HID_DEBUG is not set
1249# CONFIG_HIDRAW is not set 1270# CONFIG_HIDRAW is not set
1250 1271
1251# 1272#
@@ -1268,6 +1289,7 @@ CONFIG_HID_CYPRESS=y
1268CONFIG_HID_EZKEY=y 1289CONFIG_HID_EZKEY=y
1269# CONFIG_HID_KYE is not set 1290# CONFIG_HID_KYE is not set
1270CONFIG_HID_GYRATION=y 1291CONFIG_HID_GYRATION=y
1292# CONFIG_HID_TWINHAN is not set
1271# CONFIG_HID_KENSINGTON is not set 1293# CONFIG_HID_KENSINGTON is not set
1272CONFIG_HID_LOGITECH=y 1294CONFIG_HID_LOGITECH=y
1273# CONFIG_LOGITECH_FF is not set 1295# CONFIG_LOGITECH_FF is not set
@@ -1422,10 +1444,11 @@ CONFIG_MMC_BLOCK_BOUNCE=y
1422# MMC/SD/SDIO Host Controller Drivers 1444# MMC/SD/SDIO Host Controller Drivers
1423# 1445#
1424# CONFIG_MMC_SDHCI is not set 1446# CONFIG_MMC_SDHCI is not set
1447# CONFIG_MMC_AT91 is not set
1448# CONFIG_MMC_ATMELMCI is not set
1449# CONFIG_MMC_SPI is not set
1425CONFIG_SDH_BFIN=y 1450CONFIG_SDH_BFIN=y
1426# CONFIG_SDH_BFIN_MISSING_CMD_PULLUP_WORKAROUND is not set 1451# CONFIG_SDH_BFIN_MISSING_CMD_PULLUP_WORKAROUND is not set
1427# CONFIG_SDH_BFIN_ENABLE_SDIO_IRQ is not set
1428# CONFIG_MMC_SPI is not set
1429# CONFIG_MEMSTICK is not set 1452# CONFIG_MEMSTICK is not set
1430# CONFIG_NEW_LEDS is not set 1453# CONFIG_NEW_LEDS is not set
1431# CONFIG_ACCESSIBILITY is not set 1454# CONFIG_ACCESSIBILITY is not set
@@ -1472,6 +1495,7 @@ CONFIG_RTC_INTF_DEV=y
1472# CONFIG_RTC_DRV_R9701 is not set 1495# CONFIG_RTC_DRV_R9701 is not set
1473# CONFIG_RTC_DRV_RS5C348 is not set 1496# CONFIG_RTC_DRV_RS5C348 is not set
1474# CONFIG_RTC_DRV_DS3234 is not set 1497# CONFIG_RTC_DRV_DS3234 is not set
1498# CONFIG_RTC_DRV_PCF2123 is not set
1475 1499
1476# 1500#
1477# Platform RTC drivers 1501# Platform RTC drivers
@@ -1522,6 +1546,7 @@ CONFIG_FS_MBCACHE=y
1522# CONFIG_XFS_FS is not set 1546# CONFIG_XFS_FS is not set
1523# CONFIG_OCFS2_FS is not set 1547# CONFIG_OCFS2_FS is not set
1524# CONFIG_BTRFS_FS is not set 1548# CONFIG_BTRFS_FS is not set
1549# CONFIG_NILFS2_FS is not set
1525CONFIG_FILE_LOCKING=y 1550CONFIG_FILE_LOCKING=y
1526CONFIG_FSNOTIFY=y 1551CONFIG_FSNOTIFY=y
1527# CONFIG_DNOTIFY is not set 1552# CONFIG_DNOTIFY is not set
@@ -1563,7 +1588,6 @@ CONFIG_NTFS_RW=y
1563CONFIG_PROC_FS=y 1588CONFIG_PROC_FS=y
1564CONFIG_PROC_SYSCTL=y 1589CONFIG_PROC_SYSCTL=y
1565CONFIG_SYSFS=y 1590CONFIG_SYSFS=y
1566# CONFIG_TMPFS is not set
1567# CONFIG_HUGETLB_PAGE is not set 1591# CONFIG_HUGETLB_PAGE is not set
1568# CONFIG_CONFIGFS_FS is not set 1592# CONFIG_CONFIGFS_FS is not set
1569CONFIG_MISC_FILESYSTEMS=y 1593CONFIG_MISC_FILESYSTEMS=y
@@ -1595,7 +1619,6 @@ CONFIG_JFFS2_RTIME=y
1595# CONFIG_ROMFS_FS is not set 1619# CONFIG_ROMFS_FS is not set
1596# CONFIG_SYSV_FS is not set 1620# CONFIG_SYSV_FS is not set
1597# CONFIG_UFS_FS is not set 1621# CONFIG_UFS_FS is not set
1598# CONFIG_NILFS2_FS is not set
1599CONFIG_NETWORK_FILESYSTEMS=y 1622CONFIG_NETWORK_FILESYSTEMS=y
1600CONFIG_NFS_FS=m 1623CONFIG_NFS_FS=m
1601CONFIG_NFS_V3=y 1624CONFIG_NFS_V3=y
@@ -1680,6 +1703,7 @@ CONFIG_ENABLE_WARN_DEPRECATED=y
1680CONFIG_ENABLE_MUST_CHECK=y 1703CONFIG_ENABLE_MUST_CHECK=y
1681CONFIG_FRAME_WARN=1024 1704CONFIG_FRAME_WARN=1024
1682# CONFIG_MAGIC_SYSRQ is not set 1705# CONFIG_MAGIC_SYSRQ is not set
1706# CONFIG_STRIP_ASM_SYMS is not set
1683# CONFIG_UNUSED_SYMBOLS is not set 1707# CONFIG_UNUSED_SYMBOLS is not set
1684CONFIG_DEBUG_FS=y 1708CONFIG_DEBUG_FS=y
1685# CONFIG_HEADERS_CHECK is not set 1709# CONFIG_HEADERS_CHECK is not set
@@ -1714,12 +1738,14 @@ CONFIG_DEBUG_INFO=y
1714# CONFIG_DEBUG_LIST is not set 1738# CONFIG_DEBUG_LIST is not set
1715# CONFIG_DEBUG_SG is not set 1739# CONFIG_DEBUG_SG is not set
1716# CONFIG_DEBUG_NOTIFIERS is not set 1740# CONFIG_DEBUG_NOTIFIERS is not set
1741# CONFIG_DEBUG_CREDENTIALS is not set
1717# CONFIG_FRAME_POINTER is not set 1742# CONFIG_FRAME_POINTER is not set
1718# CONFIG_BOOT_PRINTK_DELAY is not set 1743# CONFIG_BOOT_PRINTK_DELAY is not set
1719# CONFIG_RCU_TORTURE_TEST is not set 1744# CONFIG_RCU_TORTURE_TEST is not set
1720# CONFIG_RCU_CPU_STALL_DETECTOR is not set 1745# CONFIG_RCU_CPU_STALL_DETECTOR is not set
1721# CONFIG_BACKTRACE_SELF_TEST is not set 1746# CONFIG_BACKTRACE_SELF_TEST is not set
1722# CONFIG_DEBUG_BLOCK_EXT_DEVT is not set 1747# CONFIG_DEBUG_BLOCK_EXT_DEVT is not set
1748# CONFIG_DEBUG_FORCE_WEAK_PER_CPU is not set
1723# CONFIG_FAULT_INJECTION is not set 1749# CONFIG_FAULT_INJECTION is not set
1724# CONFIG_PAGE_POISONING is not set 1750# CONFIG_PAGE_POISONING is not set
1725CONFIG_HAVE_FUNCTION_TRACER=y 1751CONFIG_HAVE_FUNCTION_TRACER=y
@@ -1730,7 +1756,6 @@ CONFIG_TRACING_SUPPORT=y
1730# CONFIG_SAMPLES is not set 1756# CONFIG_SAMPLES is not set
1731CONFIG_HAVE_ARCH_KGDB=y 1757CONFIG_HAVE_ARCH_KGDB=y
1732# CONFIG_KGDB is not set 1758# CONFIG_KGDB is not set
1733# CONFIG_KMEMCHECK is not set
1734# CONFIG_DEBUG_STACKOVERFLOW is not set 1759# CONFIG_DEBUG_STACKOVERFLOW is not set
1735# CONFIG_DEBUG_STACK_USAGE is not set 1760# CONFIG_DEBUG_STACK_USAGE is not set
1736CONFIG_DEBUG_VERBOSE=y 1761CONFIG_DEBUG_VERBOSE=y
@@ -1766,7 +1791,6 @@ CONFIG_CRYPTO=y
1766# 1791#
1767# Crypto core or helper 1792# Crypto core or helper
1768# 1793#
1769# CONFIG_CRYPTO_FIPS is not set
1770# CONFIG_CRYPTO_MANAGER is not set 1794# CONFIG_CRYPTO_MANAGER is not set
1771# CONFIG_CRYPTO_MANAGER2 is not set 1795# CONFIG_CRYPTO_MANAGER2 is not set
1772# CONFIG_CRYPTO_GF128MUL is not set 1796# CONFIG_CRYPTO_GF128MUL is not set
@@ -1798,11 +1822,13 @@ CONFIG_CRYPTO=y
1798# 1822#
1799# CONFIG_CRYPTO_HMAC is not set 1823# CONFIG_CRYPTO_HMAC is not set
1800# CONFIG_CRYPTO_XCBC is not set 1824# CONFIG_CRYPTO_XCBC is not set
1825# CONFIG_CRYPTO_VMAC is not set
1801 1826
1802# 1827#
1803# Digest 1828# Digest
1804# 1829#
1805# CONFIG_CRYPTO_CRC32C is not set 1830# CONFIG_CRYPTO_CRC32C is not set
1831# CONFIG_CRYPTO_GHASH is not set
1806# CONFIG_CRYPTO_MD4 is not set 1832# CONFIG_CRYPTO_MD4 is not set
1807# CONFIG_CRYPTO_MD5 is not set 1833# CONFIG_CRYPTO_MD5 is not set
1808# CONFIG_CRYPTO_MICHAEL_MIC is not set 1834# CONFIG_CRYPTO_MICHAEL_MIC is not set
diff --git a/arch/blackfin/configs/BF561-ACVILON_defconfig b/arch/blackfin/configs/BF561-ACVILON_defconfig
index b9b0f93d0bd3..6a776ce75e9c 100644
--- a/arch/blackfin/configs/BF561-ACVILON_defconfig
+++ b/arch/blackfin/configs/BF561-ACVILON_defconfig
@@ -114,7 +114,7 @@ CONFIG_MODULE_UNLOAD=y
114# CONFIG_MODVERSIONS is not set 114# CONFIG_MODVERSIONS is not set
115# CONFIG_MODULE_SRCVERSION_ALL is not set 115# CONFIG_MODULE_SRCVERSION_ALL is not set
116CONFIG_BLOCK=y 116CONFIG_BLOCK=y
117CONFIG_LBDAF=y 117# CONFIG_LBDAF is not set
118# CONFIG_BLK_DEV_BSG is not set 118# CONFIG_BLK_DEV_BSG is not set
119# CONFIG_BLK_DEV_INTEGRITY is not set 119# CONFIG_BLK_DEV_INTEGRITY is not set
120 120
@@ -1486,19 +1486,10 @@ CONFIG_DEBUG_INFO=y
1486CONFIG_HAVE_FUNCTION_TRACER=y 1486CONFIG_HAVE_FUNCTION_TRACER=y
1487CONFIG_HAVE_FUNCTION_GRAPH_TRACER=y 1487CONFIG_HAVE_FUNCTION_GRAPH_TRACER=y
1488CONFIG_TRACING_SUPPORT=y 1488CONFIG_TRACING_SUPPORT=y
1489CONFIG_FTRACE=y 1489# CONFIG_FTRACE is not set
1490# CONFIG_FUNCTION_TRACER is not set 1490# CONFIG_BRANCH_PROFILE_NONE is not set
1491# CONFIG_IRQSOFF_TRACER is not set
1492# CONFIG_SCHED_TRACER is not set
1493# CONFIG_ENABLE_DEFAULT_TRACERS is not set
1494# CONFIG_BOOT_TRACER is not set
1495CONFIG_BRANCH_PROFILE_NONE=y
1496# CONFIG_PROFILE_ANNOTATED_BRANCHES is not set 1491# CONFIG_PROFILE_ANNOTATED_BRANCHES is not set
1497# CONFIG_PROFILE_ALL_BRANCHES is not set 1492# CONFIG_PROFILE_ALL_BRANCHES is not set
1498# CONFIG_STACK_TRACER is not set
1499# CONFIG_KMEMTRACE is not set
1500# CONFIG_WORKQUEUE_TRACER is not set
1501# CONFIG_BLK_DEV_IO_TRACE is not set
1502# CONFIG_DYNAMIC_DEBUG is not set 1493# CONFIG_DYNAMIC_DEBUG is not set
1503# CONFIG_SAMPLES is not set 1494# CONFIG_SAMPLES is not set
1504CONFIG_HAVE_ARCH_KGDB=y 1495CONFIG_HAVE_ARCH_KGDB=y
diff --git a/arch/blackfin/configs/BF561-EZKIT_defconfig b/arch/blackfin/configs/BF561-EZKIT_defconfig
index e3ecdcc3e76b..792ff0938835 100644
--- a/arch/blackfin/configs/BF561-EZKIT_defconfig
+++ b/arch/blackfin/configs/BF561-EZKIT_defconfig
@@ -1,7 +1,6 @@
1# 1#
2# Automatically generated make config: don't edit 2# Automatically generated make config: don't edit
3# Linux kernel version: 2.6.31.5 3# Linux kernel version: 2.6.32.2
4# Mon Nov 2 21:59:31 2009
5# 4#
6# CONFIG_MMU is not set 5# CONFIG_MMU is not set
7# CONFIG_FPU is not set 6# CONFIG_FPU is not set
@@ -12,7 +11,6 @@ CONFIG_GENERIC_CSUM=y
12CONFIG_GENERIC_BUG=y 11CONFIG_GENERIC_BUG=y
13CONFIG_ZONE_DMA=y 12CONFIG_ZONE_DMA=y
14CONFIG_GENERIC_FIND_NEXT_BIT=y 13CONFIG_GENERIC_FIND_NEXT_BIT=y
15CONFIG_GENERIC_HWEIGHT=y
16CONFIG_GENERIC_HARDIRQS=y 14CONFIG_GENERIC_HARDIRQS=y
17CONFIG_GENERIC_IRQ_PROBE=y 15CONFIG_GENERIC_IRQ_PROBE=y
18CONFIG_GENERIC_HARDIRQS_NO__DO_IRQ=y 16CONFIG_GENERIC_HARDIRQS_NO__DO_IRQ=y
@@ -49,11 +47,12 @@ CONFIG_SYSVIPC_SYSCTL=y
49# 47#
50# RCU Subsystem 48# RCU Subsystem
51# 49#
52CONFIG_CLASSIC_RCU=y 50CONFIG_TREE_RCU=y
53# CONFIG_TREE_RCU is not set 51# CONFIG_TREE_PREEMPT_RCU is not set
54# CONFIG_PREEMPT_RCU is not set 52# CONFIG_RCU_TRACE is not set
53CONFIG_RCU_FANOUT=32
54# CONFIG_RCU_FANOUT_EXACT is not set
55# CONFIG_TREE_RCU_TRACE is not set 55# CONFIG_TREE_RCU_TRACE is not set
56# CONFIG_PREEMPT_RCU_TRACE is not set
57CONFIG_IKCONFIG=y 56CONFIG_IKCONFIG=y
58CONFIG_IKCONFIG_PROC=y 57CONFIG_IKCONFIG_PROC=y
59CONFIG_LOG_BUF_SHIFT=14 58CONFIG_LOG_BUF_SHIFT=14
@@ -89,17 +88,15 @@ CONFIG_EPOLL=y
89# CONFIG_AIO is not set 88# CONFIG_AIO is not set
90 89
91# 90#
92# Performance Counters 91# Kernel Performance Events And Counters
93# 92#
94CONFIG_VM_EVENT_COUNTERS=y 93CONFIG_VM_EVENT_COUNTERS=y
95# CONFIG_STRIP_ASM_SYMS is not set
96CONFIG_COMPAT_BRK=y 94CONFIG_COMPAT_BRK=y
97CONFIG_SLAB=y 95CONFIG_SLAB=y
98# CONFIG_SLUB is not set 96# CONFIG_SLUB is not set
99# CONFIG_SLOB is not set 97# CONFIG_SLOB is not set
100CONFIG_MMAP_ALLOW_UNINITIALIZED=y 98CONFIG_MMAP_ALLOW_UNINITIALIZED=y
101# CONFIG_PROFILING is not set 99# CONFIG_PROFILING is not set
102# CONFIG_MARKERS is not set
103CONFIG_HAVE_OPROFILE=y 100CONFIG_HAVE_OPROFILE=y
104 101
105# 102#
@@ -163,15 +160,15 @@ CONFIG_PREEMPT_VOLUNTARY=y
163# CONFIG_BF537 is not set 160# CONFIG_BF537 is not set
164# CONFIG_BF538 is not set 161# CONFIG_BF538 is not set
165# CONFIG_BF539 is not set 162# CONFIG_BF539 is not set
166# CONFIG_BF542 is not set 163# CONFIG_BF542_std is not set
167# CONFIG_BF542M is not set 164# CONFIG_BF542M is not set
168# CONFIG_BF544 is not set 165# CONFIG_BF544_std is not set
169# CONFIG_BF544M is not set 166# CONFIG_BF544M is not set
170# CONFIG_BF547 is not set 167# CONFIG_BF547_std is not set
171# CONFIG_BF547M is not set 168# CONFIG_BF547M is not set
172# CONFIG_BF548 is not set 169# CONFIG_BF548_std is not set
173# CONFIG_BF548M is not set 170# CONFIG_BF548M is not set
174# CONFIG_BF549 is not set 171# CONFIG_BF549_std is not set
175# CONFIG_BF549M is not set 172# CONFIG_BF549M is not set
176CONFIG_BF561=y 173CONFIG_BF561=y
177# CONFIG_SMP is not set 174# CONFIG_SMP is not set
@@ -180,9 +177,9 @@ CONFIG_BF_REV_MAX=5
180# CONFIG_BF_REV_0_0 is not set 177# CONFIG_BF_REV_0_0 is not set
181# CONFIG_BF_REV_0_1 is not set 178# CONFIG_BF_REV_0_1 is not set
182# CONFIG_BF_REV_0_2 is not set 179# CONFIG_BF_REV_0_2 is not set
183# CONFIG_BF_REV_0_3 is not set 180CONFIG_BF_REV_0_3=y
184# CONFIG_BF_REV_0_4 is not set 181# CONFIG_BF_REV_0_4 is not set
185CONFIG_BF_REV_0_5=y 182# CONFIG_BF_REV_0_5 is not set
186# CONFIG_BF_REV_0_6 is not set 183# CONFIG_BF_REV_0_6 is not set
187# CONFIG_BF_REV_ANY is not set 184# CONFIG_BF_REV_ANY is not set
188# CONFIG_BF_REV_NONE is not set 185# CONFIG_BF_REV_NONE is not set
@@ -298,7 +295,7 @@ CONFIG_GENERIC_TIME=y
298CONFIG_GENERIC_CLOCKEVENTS=y 295CONFIG_GENERIC_CLOCKEVENTS=y
299# CONFIG_TICKSOURCE_GPTMR0 is not set 296# CONFIG_TICKSOURCE_GPTMR0 is not set
300CONFIG_TICKSOURCE_CORETMR=y 297CONFIG_TICKSOURCE_CORETMR=y
301# CONFIG_CYCLES_CLOCKSOURCE is not set 298CONFIG_CYCLES_CLOCKSOURCE=y
302# CONFIG_GPTMR0_CLOCKSOURCE is not set 299# CONFIG_GPTMR0_CLOCKSOURCE is not set
303CONFIG_TICK_ONESHOT=y 300CONFIG_TICK_ONESHOT=y
304# CONFIG_NO_HZ is not set 301# CONFIG_NO_HZ is not set
@@ -353,12 +350,14 @@ CONFIG_SPLIT_PTLOCK_CPUS=4
353# CONFIG_PHYS_ADDR_T_64BIT is not set 350# CONFIG_PHYS_ADDR_T_64BIT is not set
354CONFIG_ZONE_DMA_FLAG=1 351CONFIG_ZONE_DMA_FLAG=1
355CONFIG_VIRT_TO_BUS=y 352CONFIG_VIRT_TO_BUS=y
356CONFIG_DEFAULT_MMAP_MIN_ADDR=4096
357CONFIG_NOMMU_INITIAL_TRIM_EXCESS=0 353CONFIG_NOMMU_INITIAL_TRIM_EXCESS=0
358CONFIG_BFIN_GPTIMERS=m 354CONFIG_BFIN_GPTIMERS=m
359# CONFIG_DMA_UNCACHED_4M is not set 355# CONFIG_DMA_UNCACHED_4M is not set
360# CONFIG_DMA_UNCACHED_2M is not set 356# CONFIG_DMA_UNCACHED_2M is not set
361CONFIG_DMA_UNCACHED_1M=y 357CONFIG_DMA_UNCACHED_1M=y
358# CONFIG_DMA_UNCACHED_512K is not set
359# CONFIG_DMA_UNCACHED_256K is not set
360# CONFIG_DMA_UNCACHED_128K is not set
362# CONFIG_DMA_UNCACHED_NONE is not set 361# CONFIG_DMA_UNCACHED_NONE is not set
363 362
364# 363#
@@ -370,9 +369,11 @@ CONFIG_BFIN_EXTMEM_ICACHEABLE=y
370CONFIG_BFIN_DCACHE=y 369CONFIG_BFIN_DCACHE=y
371# CONFIG_BFIN_DCACHE_BANKA is not set 370# CONFIG_BFIN_DCACHE_BANKA is not set
372CONFIG_BFIN_EXTMEM_DCACHEABLE=y 371CONFIG_BFIN_EXTMEM_DCACHEABLE=y
373CONFIG_BFIN_EXTMEM_WRITEBACK=y 372# CONFIG_BFIN_EXTMEM_WRITEBACK is not set
374# CONFIG_BFIN_EXTMEM_WRITETHROUGH is not set 373CONFIG_BFIN_EXTMEM_WRITETHROUGH=y
375# CONFIG_BFIN_L2_DCACHEABLE is not set 374CONFIG_BFIN_L2_DCACHEABLE=y
375# CONFIG_BFIN_L2_WRITEBACK is not set
376CONFIG_BFIN_L2_WRITETHROUGH=y
376 377
377# 378#
378# Memory Protection Unit 379# Memory Protection Unit
@@ -472,6 +473,7 @@ CONFIG_DEFAULT_TCP_CONG="cubic"
472# CONFIG_NETFILTER is not set 473# CONFIG_NETFILTER is not set
473# CONFIG_IP_DCCP is not set 474# CONFIG_IP_DCCP is not set
474# CONFIG_IP_SCTP is not set 475# CONFIG_IP_SCTP is not set
476# CONFIG_RDS is not set
475# CONFIG_TIPC is not set 477# CONFIG_TIPC is not set
476# CONFIG_ATM is not set 478# CONFIG_ATM is not set
477# CONFIG_BRIDGE is not set 479# CONFIG_BRIDGE is not set
@@ -613,6 +615,7 @@ CONFIG_MTD_PHYSMAP=m
613# 615#
614# CONFIG_MTD_DATAFLASH is not set 616# CONFIG_MTD_DATAFLASH is not set
615# CONFIG_MTD_M25P80 is not set 617# CONFIG_MTD_M25P80 is not set
618# CONFIG_MTD_SST25L is not set
616# CONFIG_MTD_SLRAM is not set 619# CONFIG_MTD_SLRAM is not set
617# CONFIG_MTD_PHRAM is not set 620# CONFIG_MTD_PHRAM is not set
618# CONFIG_MTD_MTDRAM is not set 621# CONFIG_MTD_MTDRAM is not set
@@ -685,6 +688,7 @@ CONFIG_SMC91X=y
685# CONFIG_ETHOC is not set 688# CONFIG_ETHOC is not set
686# CONFIG_SMSC911X is not set 689# CONFIG_SMSC911X is not set
687# CONFIG_DNET is not set 690# CONFIG_DNET is not set
691# CONFIG_ADF702X is not set
688# CONFIG_IBM_NEW_EMAC_ZMII is not set 692# CONFIG_IBM_NEW_EMAC_ZMII is not set
689# CONFIG_IBM_NEW_EMAC_RGMII is not set 693# CONFIG_IBM_NEW_EMAC_RGMII is not set
690# CONFIG_IBM_NEW_EMAC_TAH is not set 694# CONFIG_IBM_NEW_EMAC_TAH is not set
@@ -695,14 +699,10 @@ CONFIG_SMC91X=y
695# CONFIG_B44 is not set 699# CONFIG_B44 is not set
696# CONFIG_KS8842 is not set 700# CONFIG_KS8842 is not set
697# CONFIG_KS8851 is not set 701# CONFIG_KS8851 is not set
702# CONFIG_KS8851_MLL is not set
698# CONFIG_NETDEV_1000 is not set 703# CONFIG_NETDEV_1000 is not set
699# CONFIG_NETDEV_10000 is not set 704# CONFIG_NETDEV_10000 is not set
700 705# CONFIG_WLAN is not set
701#
702# Wireless LAN
703#
704# CONFIG_WLAN_PRE80211 is not set
705# CONFIG_WLAN_80211 is not set
706 706
707# 707#
708# Enable WiMAX (Networking options) to see the WiMAX drivers 708# Enable WiMAX (Networking options) to see the WiMAX drivers
@@ -782,11 +782,6 @@ CONFIG_SERIAL_CORE_CONSOLE=y
782CONFIG_UNIX98_PTYS=y 782CONFIG_UNIX98_PTYS=y
783# CONFIG_DEVPTS_MULTIPLE_INSTANCES is not set 783# CONFIG_DEVPTS_MULTIPLE_INSTANCES is not set
784# CONFIG_LEGACY_PTYS is not set 784# CONFIG_LEGACY_PTYS is not set
785
786#
787# CAN, the car bus and industrial fieldbus
788#
789# CONFIG_CAN4LINUX is not set
790# CONFIG_IPMI_HANDLER is not set 785# CONFIG_IPMI_HANDLER is not set
791# CONFIG_HW_RANDOM is not set 786# CONFIG_HW_RANDOM is not set
792# CONFIG_R3964 is not set 787# CONFIG_R3964 is not set
@@ -838,11 +833,15 @@ CONFIG_GPIO_SYSFS=y
838# 833#
839# CONFIG_GPIO_MAX7301 is not set 834# CONFIG_GPIO_MAX7301 is not set
840# CONFIG_GPIO_MCP23S08 is not set 835# CONFIG_GPIO_MCP23S08 is not set
836# CONFIG_GPIO_MC33880 is not set
837
838#
839# AC97 GPIO expanders:
840#
841# CONFIG_W1 is not set 841# CONFIG_W1 is not set
842# CONFIG_POWER_SUPPLY is not set 842# CONFIG_POWER_SUPPLY is not set
843# CONFIG_HWMON is not set 843# CONFIG_HWMON is not set
844# CONFIG_THERMAL is not set 844# CONFIG_THERMAL is not set
845# CONFIG_THERMAL_HWMON is not set
846CONFIG_WATCHDOG=y 845CONFIG_WATCHDOG=y
847# CONFIG_WATCHDOG_NOWAYOUT is not set 846# CONFIG_WATCHDOG_NOWAYOUT is not set
848 847
@@ -865,6 +864,7 @@ CONFIG_SSB_POSSIBLE=y
865# CONFIG_MFD_SM501 is not set 864# CONFIG_MFD_SM501 is not set
866# CONFIG_HTC_PASIC3 is not set 865# CONFIG_HTC_PASIC3 is not set
867# CONFIG_MFD_TMIO is not set 866# CONFIG_MFD_TMIO is not set
867# CONFIG_MFD_MC13783 is not set
868# CONFIG_EZX_PCAP is not set 868# CONFIG_EZX_PCAP is not set
869# CONFIG_REGULATOR is not set 869# CONFIG_REGULATOR is not set
870# CONFIG_MEDIA_SUPPORT is not set 870# CONFIG_MEDIA_SUPPORT is not set
@@ -884,7 +884,6 @@ CONFIG_SSB_POSSIBLE=y
884# CONFIG_SOUND is not set 884# CONFIG_SOUND is not set
885CONFIG_HID_SUPPORT=y 885CONFIG_HID_SUPPORT=y
886CONFIG_HID=m 886CONFIG_HID=m
887# CONFIG_HID_DEBUG is not set
888# CONFIG_HIDRAW is not set 887# CONFIG_HIDRAW is not set
889# CONFIG_HID_PID is not set 888# CONFIG_HID_PID is not set
890 889
@@ -923,6 +922,7 @@ CONFIG_HID=m
923# CONFIG_XFS_FS is not set 922# CONFIG_XFS_FS is not set
924# CONFIG_OCFS2_FS is not set 923# CONFIG_OCFS2_FS is not set
925# CONFIG_BTRFS_FS is not set 924# CONFIG_BTRFS_FS is not set
925# CONFIG_NILFS2_FS is not set
926CONFIG_FILE_LOCKING=y 926CONFIG_FILE_LOCKING=y
927CONFIG_FSNOTIFY=y 927CONFIG_FSNOTIFY=y
928# CONFIG_DNOTIFY is not set 928# CONFIG_DNOTIFY is not set
@@ -957,7 +957,6 @@ CONFIG_INOTIFY_USER=y
957CONFIG_PROC_FS=y 957CONFIG_PROC_FS=y
958CONFIG_PROC_SYSCTL=y 958CONFIG_PROC_SYSCTL=y
959CONFIG_SYSFS=y 959CONFIG_SYSFS=y
960# CONFIG_TMPFS is not set
961# CONFIG_HUGETLB_PAGE is not set 960# CONFIG_HUGETLB_PAGE is not set
962# CONFIG_CONFIGFS_FS is not set 961# CONFIG_CONFIGFS_FS is not set
963CONFIG_MISC_FILESYSTEMS=y 962CONFIG_MISC_FILESYSTEMS=y
@@ -989,7 +988,6 @@ CONFIG_JFFS2_RTIME=y
989# CONFIG_ROMFS_FS is not set 988# CONFIG_ROMFS_FS is not set
990# CONFIG_SYSV_FS is not set 989# CONFIG_SYSV_FS is not set
991# CONFIG_UFS_FS is not set 990# CONFIG_UFS_FS is not set
992# CONFIG_NILFS2_FS is not set
993CONFIG_NETWORK_FILESYSTEMS=y 991CONFIG_NETWORK_FILESYSTEMS=y
994CONFIG_NFS_FS=m 992CONFIG_NFS_FS=m
995CONFIG_NFS_V3=y 993CONFIG_NFS_V3=y
@@ -1064,6 +1062,7 @@ CONFIG_ENABLE_WARN_DEPRECATED=y
1064CONFIG_ENABLE_MUST_CHECK=y 1062CONFIG_ENABLE_MUST_CHECK=y
1065CONFIG_FRAME_WARN=1024 1063CONFIG_FRAME_WARN=1024
1066# CONFIG_MAGIC_SYSRQ is not set 1064# CONFIG_MAGIC_SYSRQ is not set
1065# CONFIG_STRIP_ASM_SYMS is not set
1067# CONFIG_UNUSED_SYMBOLS is not set 1066# CONFIG_UNUSED_SYMBOLS is not set
1068CONFIG_DEBUG_FS=y 1067CONFIG_DEBUG_FS=y
1069# CONFIG_HEADERS_CHECK is not set 1068# CONFIG_HEADERS_CHECK is not set
@@ -1098,26 +1097,24 @@ CONFIG_DEBUG_INFO=y
1098# CONFIG_DEBUG_LIST is not set 1097# CONFIG_DEBUG_LIST is not set
1099# CONFIG_DEBUG_SG is not set 1098# CONFIG_DEBUG_SG is not set
1100# CONFIG_DEBUG_NOTIFIERS is not set 1099# CONFIG_DEBUG_NOTIFIERS is not set
1100# CONFIG_DEBUG_CREDENTIALS is not set
1101# CONFIG_FRAME_POINTER is not set 1101# CONFIG_FRAME_POINTER is not set
1102# CONFIG_BOOT_PRINTK_DELAY is not set 1102# CONFIG_BOOT_PRINTK_DELAY is not set
1103# CONFIG_RCU_TORTURE_TEST is not set 1103# CONFIG_RCU_TORTURE_TEST is not set
1104# CONFIG_RCU_CPU_STALL_DETECTOR is not set 1104# CONFIG_RCU_CPU_STALL_DETECTOR is not set
1105# CONFIG_BACKTRACE_SELF_TEST is not set 1105# CONFIG_BACKTRACE_SELF_TEST is not set
1106# CONFIG_DEBUG_BLOCK_EXT_DEVT is not set 1106# CONFIG_DEBUG_BLOCK_EXT_DEVT is not set
1107# CONFIG_DEBUG_FORCE_WEAK_PER_CPU is not set
1107# CONFIG_FAULT_INJECTION is not set 1108# CONFIG_FAULT_INJECTION is not set
1108# CONFIG_PAGE_POISONING is not set 1109# CONFIG_PAGE_POISONING is not set
1109CONFIG_HAVE_FUNCTION_TRACER=y 1110CONFIG_HAVE_FUNCTION_TRACER=y
1110CONFIG_HAVE_FUNCTION_GRAPH_TRACER=y 1111CONFIG_HAVE_FUNCTION_GRAPH_TRACER=y
1111CONFIG_TRACING_SUPPORT=y 1112CONFIG_TRACING_SUPPORT=y
1112# CONFIG_FTRACE is not set 1113# CONFIG_FTRACE is not set
1113# CONFIG_BRANCH_PROFILE_NONE is not set
1114# CONFIG_PROFILE_ANNOTATED_BRANCHES is not set
1115# CONFIG_PROFILE_ALL_BRANCHES is not set
1116# CONFIG_DYNAMIC_DEBUG is not set 1114# CONFIG_DYNAMIC_DEBUG is not set
1117# CONFIG_SAMPLES is not set 1115# CONFIG_SAMPLES is not set
1118CONFIG_HAVE_ARCH_KGDB=y 1116CONFIG_HAVE_ARCH_KGDB=y
1119# CONFIG_KGDB is not set 1117# CONFIG_KGDB is not set
1120# CONFIG_KMEMCHECK is not set
1121# CONFIG_DEBUG_STACKOVERFLOW is not set 1118# CONFIG_DEBUG_STACKOVERFLOW is not set
1122# CONFIG_DEBUG_STACK_USAGE is not set 1119# CONFIG_DEBUG_STACK_USAGE is not set
1123CONFIG_DEBUG_VERBOSE=y 1120CONFIG_DEBUG_VERBOSE=y
@@ -1153,7 +1150,6 @@ CONFIG_CRYPTO=y
1153# 1150#
1154# Crypto core or helper 1151# Crypto core or helper
1155# 1152#
1156# CONFIG_CRYPTO_FIPS is not set
1157# CONFIG_CRYPTO_MANAGER is not set 1153# CONFIG_CRYPTO_MANAGER is not set
1158# CONFIG_CRYPTO_MANAGER2 is not set 1154# CONFIG_CRYPTO_MANAGER2 is not set
1159# CONFIG_CRYPTO_GF128MUL is not set 1155# CONFIG_CRYPTO_GF128MUL is not set
@@ -1185,11 +1181,13 @@ CONFIG_CRYPTO=y
1185# 1181#
1186# CONFIG_CRYPTO_HMAC is not set 1182# CONFIG_CRYPTO_HMAC is not set
1187# CONFIG_CRYPTO_XCBC is not set 1183# CONFIG_CRYPTO_XCBC is not set
1184# CONFIG_CRYPTO_VMAC is not set
1188 1185
1189# 1186#
1190# Digest 1187# Digest
1191# 1188#
1192# CONFIG_CRYPTO_CRC32C is not set 1189# CONFIG_CRYPTO_CRC32C is not set
1190# CONFIG_CRYPTO_GHASH is not set
1193# CONFIG_CRYPTO_MD4 is not set 1191# CONFIG_CRYPTO_MD4 is not set
1194# CONFIG_CRYPTO_MD5 is not set 1192# CONFIG_CRYPTO_MD5 is not set
1195# CONFIG_CRYPTO_MICHAEL_MIC is not set 1193# CONFIG_CRYPTO_MICHAEL_MIC is not set
diff --git a/arch/blackfin/configs/H8606_defconfig b/arch/blackfin/configs/H8606_defconfig
index bc7fae3d8b83..ed0a7ebeb85c 100644
--- a/arch/blackfin/configs/H8606_defconfig
+++ b/arch/blackfin/configs/H8606_defconfig
@@ -834,13 +834,6 @@ CONFIG_SND_VERBOSE_PROCFS=y
834# 834#
835# ALSA Blackfin devices 835# ALSA Blackfin devices
836# 836#
837CONFIG_SND_BLACKFIN_AD1836=m
838CONFIG_SND_BLACKFIN_AD1836_TDM=y
839# CONFIG_SND_BLACKFIN_AD1836_I2S is not set
840CONFIG_SND_BLACKFIN_AD1836_MULSUB=y
841# CONFIG_SND_BLACKFIN_AD1836_5P1 is not set
842CONFIG_SND_BLACKFIN_SPORT=0
843CONFIG_SND_BLACKFIN_SPI_PFBIT=4
844# CONFIG_SND_BFIN_AD73311 is not set 837# CONFIG_SND_BFIN_AD73311 is not set
845 838
846# 839#
diff --git a/arch/blackfin/configs/PNAV-10_defconfig b/arch/blackfin/configs/PNAV-10_defconfig
index 67d12768602a..ad58fede1f41 100644
--- a/arch/blackfin/configs/PNAV-10_defconfig
+++ b/arch/blackfin/configs/PNAV-10_defconfig
@@ -1,21 +1,27 @@
1# 1#
2# Automatically generated make config: don't edit 2# Automatically generated make config: don't edit
3# Linux kernel version: 2.6.28.10 3# Linux kernel version: 2.6.32.2
4# 4#
5# CONFIG_MMU is not set 5# CONFIG_MMU is not set
6# CONFIG_FPU is not set 6# CONFIG_FPU is not set
7CONFIG_RWSEM_GENERIC_SPINLOCK=y 7CONFIG_RWSEM_GENERIC_SPINLOCK=y
8# CONFIG_RWSEM_XCHGADD_ALGORITHM is not set 8# CONFIG_RWSEM_XCHGADD_ALGORITHM is not set
9CONFIG_BLACKFIN=y 9CONFIG_BLACKFIN=y
10CONFIG_GENERIC_CSUM=y
11CONFIG_GENERIC_BUG=y
10CONFIG_ZONE_DMA=y 12CONFIG_ZONE_DMA=y
11CONFIG_GENERIC_FIND_NEXT_BIT=y 13CONFIG_GENERIC_FIND_NEXT_BIT=y
12CONFIG_GENERIC_HWEIGHT=y
13CONFIG_GENERIC_HARDIRQS=y 14CONFIG_GENERIC_HARDIRQS=y
14CONFIG_GENERIC_IRQ_PROBE=y 15CONFIG_GENERIC_IRQ_PROBE=y
16CONFIG_GENERIC_HARDIRQS_NO__DO_IRQ=y
15CONFIG_GENERIC_GPIO=y 17CONFIG_GENERIC_GPIO=y
16CONFIG_FORCE_MAX_ZONEORDER=14 18CONFIG_FORCE_MAX_ZONEORDER=14
17CONFIG_GENERIC_CALIBRATE_DELAY=y 19CONFIG_GENERIC_CALIBRATE_DELAY=y
20CONFIG_LOCKDEP_SUPPORT=y
21CONFIG_STACKTRACE_SUPPORT=y
22CONFIG_TRACE_IRQFLAGS_SUPPORT=y
18CONFIG_DEFCONFIG_LIST="/lib/modules/$UNAME_RELEASE/.config" 23CONFIG_DEFCONFIG_LIST="/lib/modules/$UNAME_RELEASE/.config"
24CONFIG_CONSTRUCTORS=y
19 25
20# 26#
21# General setup 27# General setup
@@ -25,16 +31,32 @@ CONFIG_BROKEN_ON_SMP=y
25CONFIG_INIT_ENV_ARG_LIMIT=32 31CONFIG_INIT_ENV_ARG_LIMIT=32
26CONFIG_LOCALVERSION="" 32CONFIG_LOCALVERSION=""
27CONFIG_LOCALVERSION_AUTO=y 33CONFIG_LOCALVERSION_AUTO=y
34CONFIG_HAVE_KERNEL_GZIP=y
35CONFIG_HAVE_KERNEL_BZIP2=y
36CONFIG_HAVE_KERNEL_LZMA=y
37CONFIG_KERNEL_GZIP=y
38# CONFIG_KERNEL_BZIP2 is not set
39# CONFIG_KERNEL_LZMA is not set
28CONFIG_SYSVIPC=y 40CONFIG_SYSVIPC=y
29CONFIG_SYSVIPC_SYSCTL=y 41CONFIG_SYSVIPC_SYSCTL=y
30# CONFIG_POSIX_MQUEUE is not set 42# CONFIG_POSIX_MQUEUE is not set
31# CONFIG_BSD_PROCESS_ACCT is not set 43# CONFIG_BSD_PROCESS_ACCT is not set
32# CONFIG_TASKSTATS is not set 44# CONFIG_TASKSTATS is not set
33# CONFIG_AUDIT is not set 45# CONFIG_AUDIT is not set
46
47#
48# RCU Subsystem
49#
50CONFIG_TREE_RCU=y
51# CONFIG_TREE_PREEMPT_RCU is not set
52# CONFIG_RCU_TRACE is not set
53CONFIG_RCU_FANOUT=32
54# CONFIG_RCU_FANOUT_EXACT is not set
55# CONFIG_TREE_RCU_TRACE is not set
34# CONFIG_IKCONFIG is not set 56# CONFIG_IKCONFIG is not set
35CONFIG_LOG_BUF_SHIFT=14 57CONFIG_LOG_BUF_SHIFT=14
36# CONFIG_CGROUPS is not set
37# CONFIG_GROUP_SCHED is not set 58# CONFIG_GROUP_SCHED is not set
59# CONFIG_CGROUPS is not set
38# CONFIG_SYSFS_DEPRECATED_V2 is not set 60# CONFIG_SYSFS_DEPRECATED_V2 is not set
39# CONFIG_RELAY is not set 61# CONFIG_RELAY is not set
40# CONFIG_NAMESPACES is not set 62# CONFIG_NAMESPACES is not set
@@ -58,6 +80,10 @@ CONFIG_SIGNALFD=y
58CONFIG_TIMERFD=y 80CONFIG_TIMERFD=y
59CONFIG_EVENTFD=y 81CONFIG_EVENTFD=y
60# CONFIG_AIO is not set 82# CONFIG_AIO is not set
83
84#
85# Kernel Performance Events And Counters
86#
61CONFIG_VM_EVENT_COUNTERS=y 87CONFIG_VM_EVENT_COUNTERS=y
62CONFIG_COMPAT_BRK=y 88CONFIG_COMPAT_BRK=y
63CONFIG_SLAB=y 89CONFIG_SLAB=y
@@ -65,11 +91,14 @@ CONFIG_SLAB=y
65# CONFIG_SLOB is not set 91# CONFIG_SLOB is not set
66CONFIG_MMAP_ALLOW_UNINITIALIZED=y 92CONFIG_MMAP_ALLOW_UNINITIALIZED=y
67# CONFIG_PROFILING is not set 93# CONFIG_PROFILING is not set
68# CONFIG_MARKERS is not set
69CONFIG_HAVE_OPROFILE=y 94CONFIG_HAVE_OPROFILE=y
95
96#
97# GCOV-based kernel profiling
98#
99# CONFIG_SLOW_WORK is not set
70# CONFIG_HAVE_GENERIC_DMA_COHERENT is not set 100# CONFIG_HAVE_GENERIC_DMA_COHERENT is not set
71CONFIG_SLABINFO=y 101CONFIG_SLABINFO=y
72CONFIG_TINY_SHMEM=y
73CONFIG_BASE_SMALL=0 102CONFIG_BASE_SMALL=0
74CONFIG_MODULES=y 103CONFIG_MODULES=y
75# CONFIG_MODULE_FORCE_LOAD is not set 104# CONFIG_MODULE_FORCE_LOAD is not set
@@ -77,11 +106,8 @@ CONFIG_MODULE_UNLOAD=y
77# CONFIG_MODULE_FORCE_UNLOAD is not set 106# CONFIG_MODULE_FORCE_UNLOAD is not set
78# CONFIG_MODVERSIONS is not set 107# CONFIG_MODVERSIONS is not set
79# CONFIG_MODULE_SRCVERSION_ALL is not set 108# CONFIG_MODULE_SRCVERSION_ALL is not set
80CONFIG_KMOD=y
81CONFIG_BLOCK=y 109CONFIG_BLOCK=y
82# CONFIG_LBD is not set 110# CONFIG_LBDAF is not set
83# CONFIG_BLK_DEV_IO_TRACE is not set
84# CONFIG_LSF is not set
85# CONFIG_BLK_DEV_BSG is not set 111# CONFIG_BLK_DEV_BSG is not set
86# CONFIG_BLK_DEV_INTEGRITY is not set 112# CONFIG_BLK_DEV_INTEGRITY is not set
87 113
@@ -97,7 +123,6 @@ CONFIG_DEFAULT_AS=y
97# CONFIG_DEFAULT_CFQ is not set 123# CONFIG_DEFAULT_CFQ is not set
98# CONFIG_DEFAULT_NOOP is not set 124# CONFIG_DEFAULT_NOOP is not set
99CONFIG_DEFAULT_IOSCHED="anticipatory" 125CONFIG_DEFAULT_IOSCHED="anticipatory"
100CONFIG_CLASSIC_RCU=y
101# CONFIG_PREEMPT_NONE is not set 126# CONFIG_PREEMPT_NONE is not set
102CONFIG_PREEMPT_VOLUNTARY=y 127CONFIG_PREEMPT_VOLUNTARY=y
103# CONFIG_PREEMPT is not set 128# CONFIG_PREEMPT is not set
@@ -128,15 +153,15 @@ CONFIG_PREEMPT_VOLUNTARY=y
128CONFIG_BF537=y 153CONFIG_BF537=y
129# CONFIG_BF538 is not set 154# CONFIG_BF538 is not set
130# CONFIG_BF539 is not set 155# CONFIG_BF539 is not set
131# CONFIG_BF542 is not set 156# CONFIG_BF542_std is not set
132# CONFIG_BF542M is not set 157# CONFIG_BF542M is not set
133# CONFIG_BF544 is not set 158# CONFIG_BF544_std is not set
134# CONFIG_BF544M is not set 159# CONFIG_BF544M is not set
135# CONFIG_BF547 is not set 160# CONFIG_BF547_std is not set
136# CONFIG_BF547M is not set 161# CONFIG_BF547M is not set
137# CONFIG_BF548 is not set 162# CONFIG_BF548_std is not set
138# CONFIG_BF548M is not set 163# CONFIG_BF548M is not set
139# CONFIG_BF549 is not set 164# CONFIG_BF549_std is not set
140# CONFIG_BF549M is not set 165# CONFIG_BF549M is not set
141# CONFIG_BF561 is not set 166# CONFIG_BF561 is not set
142CONFIG_BF_REV_MIN=2 167CONFIG_BF_REV_MIN=2
@@ -180,7 +205,8 @@ CONFIG_IRQ_MEM_DMA1=13
180CONFIG_IRQ_WATCH=13 205CONFIG_IRQ_WATCH=13
181CONFIG_IRQ_SPI=10 206CONFIG_IRQ_SPI=10
182# CONFIG_BFIN537_STAMP is not set 207# CONFIG_BFIN537_STAMP is not set
183# CONFIG_BFIN537_BLUETECHNIX_CM is not set 208# CONFIG_BFIN537_BLUETECHNIX_CM_E is not set
209# CONFIG_BFIN537_BLUETECHNIX_CM_U is not set
184# CONFIG_BFIN537_BLUETECHNIX_TCM is not set 210# CONFIG_BFIN537_BLUETECHNIX_TCM is not set
185CONFIG_PNAV10=y 211CONFIG_PNAV10=y
186# CONFIG_CAMSIG_MINOTAUR is not set 212# CONFIG_CAMSIG_MINOTAUR is not set
@@ -282,7 +308,6 @@ CONFIG_FLATMEM=y
282CONFIG_FLAT_NODE_MEM_MAP=y 308CONFIG_FLAT_NODE_MEM_MAP=y
283CONFIG_PAGEFLAGS_EXTENDED=y 309CONFIG_PAGEFLAGS_EXTENDED=y
284CONFIG_SPLIT_PTLOCK_CPUS=4 310CONFIG_SPLIT_PTLOCK_CPUS=4
285# CONFIG_RESOURCES_64BIT is not set
286# CONFIG_PHYS_ADDR_T_64BIT is not set 311# CONFIG_PHYS_ADDR_T_64BIT is not set
287CONFIG_ZONE_DMA_FLAG=1 312CONFIG_ZONE_DMA_FLAG=1
288CONFIG_VIRT_TO_BUS=y 313CONFIG_VIRT_TO_BUS=y
@@ -291,16 +316,18 @@ CONFIG_BFIN_GPTIMERS=y
291# CONFIG_DMA_UNCACHED_4M is not set 316# CONFIG_DMA_UNCACHED_4M is not set
292# CONFIG_DMA_UNCACHED_2M is not set 317# CONFIG_DMA_UNCACHED_2M is not set
293CONFIG_DMA_UNCACHED_1M=y 318CONFIG_DMA_UNCACHED_1M=y
319# CONFIG_DMA_UNCACHED_512K is not set
320# CONFIG_DMA_UNCACHED_256K is not set
321# CONFIG_DMA_UNCACHED_128K is not set
294# CONFIG_DMA_UNCACHED_NONE is not set 322# CONFIG_DMA_UNCACHED_NONE is not set
295 323
296# 324#
297# Cache Support 325# Cache Support
298# 326#
299CONFIG_BFIN_ICACHE=y 327CONFIG_BFIN_ICACHE=y
300# CONFIG_BFIN_ICACHE_LOCK is not set 328CONFIG_BFIN_EXTMEM_ICACHEABLE=y
301CONFIG_BFIN_DCACHE=y 329CONFIG_BFIN_DCACHE=y
302# CONFIG_BFIN_DCACHE_BANKA is not set 330# CONFIG_BFIN_DCACHE_BANKA is not set
303CONFIG_BFIN_EXTMEM_ICACHEABLE=y
304CONFIG_BFIN_EXTMEM_DCACHEABLE=y 331CONFIG_BFIN_EXTMEM_DCACHEABLE=y
305CONFIG_BFIN_EXTMEM_WRITEBACK=y 332CONFIG_BFIN_EXTMEM_WRITEBACK=y
306# CONFIG_BFIN_EXTMEM_WRITETHROUGH is not set 333# CONFIG_BFIN_EXTMEM_WRITETHROUGH is not set
@@ -311,7 +338,7 @@ CONFIG_BFIN_EXTMEM_WRITEBACK=y
311# CONFIG_MPU is not set 338# CONFIG_MPU is not set
312 339
313# 340#
314# Asynchonous Memory Configuration 341# Asynchronous Memory Configuration
315# 342#
316 343
317# 344#
@@ -367,11 +394,6 @@ CONFIG_NET=y
367CONFIG_PACKET=y 394CONFIG_PACKET=y
368# CONFIG_PACKET_MMAP is not set 395# CONFIG_PACKET_MMAP is not set
369CONFIG_UNIX=y 396CONFIG_UNIX=y
370CONFIG_XFRM=y
371# CONFIG_XFRM_USER is not set
372# CONFIG_XFRM_SUB_POLICY is not set
373# CONFIG_XFRM_MIGRATE is not set
374# CONFIG_XFRM_STATISTICS is not set
375# CONFIG_NET_KEY is not set 397# CONFIG_NET_KEY is not set
376CONFIG_INET=y 398CONFIG_INET=y
377# CONFIG_IP_MULTICAST is not set 399# CONFIG_IP_MULTICAST is not set
@@ -395,7 +417,6 @@ CONFIG_IP_PNP=y
395# CONFIG_INET_XFRM_MODE_BEET is not set 417# CONFIG_INET_XFRM_MODE_BEET is not set
396# CONFIG_INET_LRO is not set 418# CONFIG_INET_LRO is not set
397# CONFIG_INET_DIAG is not set 419# CONFIG_INET_DIAG is not set
398CONFIG_INET_TCP_DIAG=y
399# CONFIG_TCP_CONG_ADVANCED is not set 420# CONFIG_TCP_CONG_ADVANCED is not set
400CONFIG_TCP_CONG_CUBIC=y 421CONFIG_TCP_CONG_CUBIC=y
401CONFIG_DEFAULT_TCP_CONG="cubic" 422CONFIG_DEFAULT_TCP_CONG="cubic"
@@ -406,6 +427,7 @@ CONFIG_DEFAULT_TCP_CONG="cubic"
406# CONFIG_NETFILTER is not set 427# CONFIG_NETFILTER is not set
407# CONFIG_IP_DCCP is not set 428# CONFIG_IP_DCCP is not set
408# CONFIG_IP_SCTP is not set 429# CONFIG_IP_SCTP is not set
430# CONFIG_RDS is not set
409# CONFIG_TIPC is not set 431# CONFIG_TIPC is not set
410# CONFIG_ATM is not set 432# CONFIG_ATM is not set
411# CONFIG_BRIDGE is not set 433# CONFIG_BRIDGE is not set
@@ -419,7 +441,10 @@ CONFIG_DEFAULT_TCP_CONG="cubic"
419# CONFIG_LAPB is not set 441# CONFIG_LAPB is not set
420# CONFIG_ECONET is not set 442# CONFIG_ECONET is not set
421# CONFIG_WAN_ROUTER is not set 443# CONFIG_WAN_ROUTER is not set
444# CONFIG_PHONET is not set
445# CONFIG_IEEE802154 is not set
422# CONFIG_NET_SCHED is not set 446# CONFIG_NET_SCHED is not set
447# CONFIG_DCB is not set
423 448
424# 449#
425# Network testing 450# Network testing
@@ -430,13 +455,8 @@ CONFIG_DEFAULT_TCP_CONG="cubic"
430# CONFIG_IRDA is not set 455# CONFIG_IRDA is not set
431# CONFIG_BT is not set 456# CONFIG_BT is not set
432# CONFIG_AF_RXRPC is not set 457# CONFIG_AF_RXRPC is not set
433# CONFIG_PHONET is not set 458# CONFIG_WIRELESS is not set
434CONFIG_WIRELESS=y 459# CONFIG_WIMAX is not set
435# CONFIG_CFG80211 is not set
436CONFIG_WIRELESS_OLD_REGULATORY=y
437# CONFIG_WIRELESS_EXT is not set
438# CONFIG_MAC80211 is not set
439# CONFIG_IEEE80211 is not set
440# CONFIG_RFKILL is not set 460# CONFIG_RFKILL is not set
441# CONFIG_NET_9P is not set 461# CONFIG_NET_9P is not set
442 462
@@ -455,6 +475,7 @@ CONFIG_PREVENT_FIRMWARE_BUILD=y
455# CONFIG_CONNECTOR is not set 475# CONFIG_CONNECTOR is not set
456CONFIG_MTD=y 476CONFIG_MTD=y
457# CONFIG_MTD_DEBUG is not set 477# CONFIG_MTD_DEBUG is not set
478# CONFIG_MTD_TESTS is not set
458# CONFIG_MTD_CONCAT is not set 479# CONFIG_MTD_CONCAT is not set
459CONFIG_MTD_PARTITIONS=y 480CONFIG_MTD_PARTITIONS=y
460# CONFIG_MTD_REDBOOT_PARTS is not set 481# CONFIG_MTD_REDBOOT_PARTS is not set
@@ -506,6 +527,7 @@ CONFIG_MTD_UCLINUX=y
506# 527#
507# CONFIG_MTD_DATAFLASH is not set 528# CONFIG_MTD_DATAFLASH is not set
508# CONFIG_MTD_M25P80 is not set 529# CONFIG_MTD_M25P80 is not set
530# CONFIG_MTD_SST25L is not set
509# CONFIG_MTD_SLRAM is not set 531# CONFIG_MTD_SLRAM is not set
510# CONFIG_MTD_PHRAM is not set 532# CONFIG_MTD_PHRAM is not set
511# CONFIG_MTD_MTDRAM is not set 533# CONFIG_MTD_MTDRAM is not set
@@ -521,11 +543,6 @@ CONFIG_MTD_NAND=y
521# CONFIG_MTD_NAND_VERIFY_WRITE is not set 543# CONFIG_MTD_NAND_VERIFY_WRITE is not set
522# CONFIG_MTD_NAND_ECC_SMC is not set 544# CONFIG_MTD_NAND_ECC_SMC is not set
523# CONFIG_MTD_NAND_MUSEUM_IDS is not set 545# CONFIG_MTD_NAND_MUSEUM_IDS is not set
524CONFIG_MTD_NAND_BFIN=y
525CONFIG_BFIN_NAND_BASE=0x20100000
526CONFIG_BFIN_NAND_CLE=2
527CONFIG_BFIN_NAND_ALE=1
528CONFIG_BFIN_NAND_READY=44
529CONFIG_MTD_NAND_IDS=y 546CONFIG_MTD_NAND_IDS=y
530# CONFIG_MTD_NAND_DISKONCHIP is not set 547# CONFIG_MTD_NAND_DISKONCHIP is not set
531# CONFIG_MTD_NAND_NANDSIM is not set 548# CONFIG_MTD_NAND_NANDSIM is not set
@@ -533,6 +550,11 @@ CONFIG_MTD_NAND_IDS=y
533# CONFIG_MTD_ONENAND is not set 550# CONFIG_MTD_ONENAND is not set
534 551
535# 552#
553# LPDDR flash memory drivers
554#
555# CONFIG_MTD_LPDDR is not set
556
557#
536# UBI - Unsorted block images 558# UBI - Unsorted block images
537# 559#
538# CONFIG_MTD_UBI is not set 560# CONFIG_MTD_UBI is not set
@@ -549,10 +571,20 @@ CONFIG_BLK_DEV_RAM_SIZE=4096
549# CONFIG_ATA_OVER_ETH is not set 571# CONFIG_ATA_OVER_ETH is not set
550# CONFIG_BLK_DEV_HD is not set 572# CONFIG_BLK_DEV_HD is not set
551CONFIG_MISC_DEVICES=y 573CONFIG_MISC_DEVICES=y
552# CONFIG_EEPROM_93CX6 is not set 574# CONFIG_AD525X_DPOT is not set
553# CONFIG_ICS932S401 is not set 575# CONFIG_ICS932S401 is not set
554# CONFIG_ENCLOSURE_SERVICES is not set 576# CONFIG_ENCLOSURE_SERVICES is not set
577# CONFIG_ISL29003 is not set
555# CONFIG_C2PORT is not set 578# CONFIG_C2PORT is not set
579
580#
581# EEPROM support
582#
583# CONFIG_EEPROM_AT24 is not set
584# CONFIG_EEPROM_AT25 is not set
585# CONFIG_EEPROM_LEGACY is not set
586# CONFIG_EEPROM_MAX6875 is not set
587# CONFIG_EEPROM_93CX6 is not set
556CONFIG_HAVE_IDE=y 588CONFIG_HAVE_IDE=y
557# CONFIG_IDE is not set 589# CONFIG_IDE is not set
558 590
@@ -587,6 +619,9 @@ CONFIG_PHYLIB=y
587# CONFIG_BROADCOM_PHY is not set 619# CONFIG_BROADCOM_PHY is not set
588# CONFIG_ICPLUS_PHY is not set 620# CONFIG_ICPLUS_PHY is not set
589# CONFIG_REALTEK_PHY is not set 621# CONFIG_REALTEK_PHY is not set
622# CONFIG_NATIONAL_PHY is not set
623# CONFIG_STE10XP is not set
624# CONFIG_LSI_ET1011C_PHY is not set
590# CONFIG_FIXED_PHY is not set 625# CONFIG_FIXED_PHY is not set
591# CONFIG_MDIO_BITBANG is not set 626# CONFIG_MDIO_BITBANG is not set
592CONFIG_NET_ETHERNET=y 627CONFIG_NET_ETHERNET=y
@@ -597,9 +632,12 @@ CONFIG_BFIN_TX_DESC_NUM=100
597CONFIG_BFIN_RX_DESC_NUM=100 632CONFIG_BFIN_RX_DESC_NUM=100
598CONFIG_BFIN_MAC_RMII=y 633CONFIG_BFIN_MAC_RMII=y
599# CONFIG_SMC91X is not set 634# CONFIG_SMC91X is not set
600# CONFIG_SMSC911X is not set
601# CONFIG_DM9000 is not set 635# CONFIG_DM9000 is not set
602# CONFIG_ENC28J60 is not set 636# CONFIG_ENC28J60 is not set
637# CONFIG_ETHOC is not set
638# CONFIG_SMSC911X is not set
639# CONFIG_DNET is not set
640# CONFIG_ADF702X is not set
603# CONFIG_IBM_NEW_EMAC_ZMII is not set 641# CONFIG_IBM_NEW_EMAC_ZMII is not set
604# CONFIG_IBM_NEW_EMAC_RGMII is not set 642# CONFIG_IBM_NEW_EMAC_RGMII is not set
605# CONFIG_IBM_NEW_EMAC_TAH is not set 643# CONFIG_IBM_NEW_EMAC_TAH is not set
@@ -608,15 +646,16 @@ CONFIG_BFIN_MAC_RMII=y
608# CONFIG_IBM_NEW_EMAC_MAL_CLR_ICINTSTAT is not set 646# CONFIG_IBM_NEW_EMAC_MAL_CLR_ICINTSTAT is not set
609# CONFIG_IBM_NEW_EMAC_MAL_COMMON_ERR is not set 647# CONFIG_IBM_NEW_EMAC_MAL_COMMON_ERR is not set
610# CONFIG_B44 is not set 648# CONFIG_B44 is not set
649# CONFIG_KS8842 is not set
650# CONFIG_KS8851 is not set
651# CONFIG_KS8851_MLL is not set
611# CONFIG_NETDEV_1000 is not set 652# CONFIG_NETDEV_1000 is not set
612# CONFIG_NETDEV_10000 is not set 653# CONFIG_NETDEV_10000 is not set
654# CONFIG_WLAN is not set
613 655
614# 656#
615# Wireless LAN 657# Enable WiMAX (Networking options) to see the WiMAX drivers
616# 658#
617# CONFIG_WLAN_PRE80211 is not set
618# CONFIG_WLAN_80211 is not set
619# CONFIG_IWLWIFI_LEDS is not set
620# CONFIG_WAN is not set 659# CONFIG_WAN is not set
621# CONFIG_PPP is not set 660# CONFIG_PPP is not set
622# CONFIG_SLIP is not set 661# CONFIG_SLIP is not set
@@ -649,14 +688,17 @@ CONFIG_INPUT_EVDEV=y
649# CONFIG_INPUT_JOYSTICK is not set 688# CONFIG_INPUT_JOYSTICK is not set
650# CONFIG_INPUT_TABLET is not set 689# CONFIG_INPUT_TABLET is not set
651CONFIG_INPUT_TOUCHSCREEN=y 690CONFIG_INPUT_TOUCHSCREEN=y
691# CONFIG_TOUCHSCREEN_ADS7846 is not set
652CONFIG_TOUCHSCREEN_AD7877=y 692CONFIG_TOUCHSCREEN_AD7877=y
653# CONFIG_TOUCHSCREEN_AD7879_I2C is not set 693# CONFIG_TOUCHSCREEN_AD7879_I2C is not set
654# CONFIG_TOUCHSCREEN_AD7879_SPI is not set 694# CONFIG_TOUCHSCREEN_AD7879_SPI is not set
655# CONFIG_TOUCHSCREEN_AD7879 is not set 695# CONFIG_TOUCHSCREEN_AD7879 is not set
656# CONFIG_TOUCHSCREEN_ADS7846 is not set 696# CONFIG_TOUCHSCREEN_EETI is not set
657# CONFIG_TOUCHSCREEN_FUJITSU is not set 697# CONFIG_TOUCHSCREEN_FUJITSU is not set
658# CONFIG_TOUCHSCREEN_GUNZE is not set 698# CONFIG_TOUCHSCREEN_GUNZE is not set
659# CONFIG_TOUCHSCREEN_ELO is not set 699# CONFIG_TOUCHSCREEN_ELO is not set
700# CONFIG_TOUCHSCREEN_WACOM_W8001 is not set
701# CONFIG_TOUCHSCREEN_MCS5000 is not set
660# CONFIG_TOUCHSCREEN_MTOUCH is not set 702# CONFIG_TOUCHSCREEN_MTOUCH is not set
661# CONFIG_TOUCHSCREEN_INEXIO is not set 703# CONFIG_TOUCHSCREEN_INEXIO is not set
662# CONFIG_TOUCHSCREEN_MK712 is not set 704# CONFIG_TOUCHSCREEN_MK712 is not set
@@ -665,6 +707,7 @@ CONFIG_TOUCHSCREEN_AD7877=y
665# CONFIG_TOUCHSCREEN_TOUCHWIN is not set 707# CONFIG_TOUCHSCREEN_TOUCHWIN is not set
666# CONFIG_TOUCHSCREEN_USB_COMPOSITE is not set 708# CONFIG_TOUCHSCREEN_USB_COMPOSITE is not set
667# CONFIG_TOUCHSCREEN_TOUCHIT213 is not set 709# CONFIG_TOUCHSCREEN_TOUCHIT213 is not set
710# CONFIG_TOUCHSCREEN_TSC2007 is not set
668CONFIG_INPUT_MISC=y 711CONFIG_INPUT_MISC=y
669# CONFIG_INPUT_ATI_REMOTE is not set 712# CONFIG_INPUT_ATI_REMOTE is not set
670# CONFIG_INPUT_ATI_REMOTE2 is not set 713# CONFIG_INPUT_ATI_REMOTE2 is not set
@@ -673,7 +716,9 @@ CONFIG_INPUT_MISC=y
673# CONFIG_INPUT_YEALINK is not set 716# CONFIG_INPUT_YEALINK is not set
674# CONFIG_INPUT_CM109 is not set 717# CONFIG_INPUT_CM109 is not set
675CONFIG_INPUT_UINPUT=y 718CONFIG_INPUT_UINPUT=y
676# CONFIG_CONFIG_INPUT_PCF8574 is not set 719# CONFIG_INPUT_AD714X is not set
720# CONFIG_INPUT_ADXL34X is not set
721# CONFIG_INPUT_PCF8574 is not set
677 722
678# 723#
679# Hardware I/O ports 724# Hardware I/O ports
@@ -684,16 +729,13 @@ CONFIG_INPUT_UINPUT=y
684# 729#
685# Character devices 730# Character devices
686# 731#
687# CONFIG_AD9960 is not set
688CONFIG_BFIN_DMA_INTERFACE=m 732CONFIG_BFIN_DMA_INTERFACE=m
689# CONFIG_BFIN_PPI is not set 733# CONFIG_BFIN_PPI is not set
690# CONFIG_BFIN_PPIFCD is not set 734# CONFIG_BFIN_PPIFCD is not set
691# CONFIG_BFIN_SIMPLE_TIMER is not set 735# CONFIG_BFIN_SIMPLE_TIMER is not set
692# CONFIG_BFIN_SPI_ADC is not set 736# CONFIG_BFIN_SPI_ADC is not set
693CONFIG_BFIN_SPORT=y 737CONFIG_BFIN_SPORT=y
694# CONFIG_BFIN_TIMER_LATENCY is not set
695# CONFIG_BFIN_TWI_LCD is not set 738# CONFIG_BFIN_TWI_LCD is not set
696# CONFIG_SIMPLE_GPIO is not set
697# CONFIG_VT is not set 739# CONFIG_VT is not set
698CONFIG_DEVKMEM=y 740CONFIG_DEVKMEM=y
699# CONFIG_BFIN_JTAG_COMM is not set 741# CONFIG_BFIN_JTAG_COMM is not set
@@ -707,6 +749,7 @@ CONFIG_DEVKMEM=y
707# 749#
708# Non-8250 serial port support 750# Non-8250 serial port support
709# 751#
752# CONFIG_SERIAL_MAX3100 is not set
710CONFIG_SERIAL_BFIN=y 753CONFIG_SERIAL_BFIN=y
711CONFIG_SERIAL_BFIN_CONSOLE=y 754CONFIG_SERIAL_BFIN_CONSOLE=y
712CONFIG_SERIAL_BFIN_DMA=y 755CONFIG_SERIAL_BFIN_DMA=y
@@ -719,24 +762,17 @@ CONFIG_SERIAL_CORE=y
719CONFIG_SERIAL_CORE_CONSOLE=y 762CONFIG_SERIAL_CORE_CONSOLE=y
720# CONFIG_SERIAL_BFIN_SPORT is not set 763# CONFIG_SERIAL_BFIN_SPORT is not set
721CONFIG_UNIX98_PTYS=y 764CONFIG_UNIX98_PTYS=y
765# CONFIG_DEVPTS_MULTIPLE_INSTANCES is not set
722# CONFIG_LEGACY_PTYS is not set 766# CONFIG_LEGACY_PTYS is not set
723
724#
725# CAN, the car bus and industrial fieldbus
726#
727CONFIG_CAN4LINUX=y
728
729#
730# linux embedded drivers
731#
732CONFIG_CAN_BLACKFIN=m
733# CONFIG_IPMI_HANDLER is not set 767# CONFIG_IPMI_HANDLER is not set
734CONFIG_HW_RANDOM=y 768CONFIG_HW_RANDOM=y
769# CONFIG_HW_RANDOM_TIMERIOMEM is not set
735# CONFIG_R3964 is not set 770# CONFIG_R3964 is not set
736# CONFIG_RAW_DRIVER is not set 771# CONFIG_RAW_DRIVER is not set
737# CONFIG_TCG_TPM is not set 772# CONFIG_TCG_TPM is not set
738CONFIG_I2C=y 773CONFIG_I2C=y
739CONFIG_I2C_BOARDINFO=y 774CONFIG_I2C_BOARDINFO=y
775CONFIG_I2C_COMPAT=y
740CONFIG_I2C_CHARDEV=y 776CONFIG_I2C_CHARDEV=y
741CONFIG_I2C_HELPER_AUTO=y 777CONFIG_I2C_HELPER_AUTO=y
742 778
@@ -769,14 +805,6 @@ CONFIG_I2C_BLACKFIN_TWI_CLK_KHZ=100
769# Miscellaneous I2C Chip support 805# Miscellaneous I2C Chip support
770# 806#
771# CONFIG_DS1682 is not set 807# CONFIG_DS1682 is not set
772# CONFIG_EEPROM_AT24 is not set
773# CONFIG_SENSORS_AD5252 is not set
774# CONFIG_EEPROM_LEGACY is not set
775CONFIG_SENSORS_PCF8574=m
776# CONFIG_PCF8575 is not set
777# CONFIG_SENSORS_PCA9539 is not set
778# CONFIG_SENSORS_PCF8591 is not set
779# CONFIG_SENSORS_MAX6875 is not set
780# CONFIG_SENSORS_TSL2550 is not set 808# CONFIG_SENSORS_TSL2550 is not set
781# CONFIG_I2C_DEBUG_CORE is not set 809# CONFIG_I2C_DEBUG_CORE is not set
782# CONFIG_I2C_DEBUG_ALGO is not set 810# CONFIG_I2C_DEBUG_ALGO is not set
@@ -792,20 +820,29 @@ CONFIG_SPI_BFIN=y
792# CONFIG_SPI_BFIN_LOCK is not set 820# CONFIG_SPI_BFIN_LOCK is not set
793# CONFIG_SPI_BFIN_SPORT is not set 821# CONFIG_SPI_BFIN_SPORT is not set
794# CONFIG_SPI_BITBANG is not set 822# CONFIG_SPI_BITBANG is not set
823# CONFIG_SPI_GPIO is not set
795 824
796# 825#
797# SPI Protocol Masters 826# SPI Protocol Masters
798# 827#
799# CONFIG_EEPROM_AT25 is not set
800# CONFIG_SPI_SPIDEV is not set 828# CONFIG_SPI_SPIDEV is not set
801# CONFIG_SPI_TLE62X0 is not set 829# CONFIG_SPI_TLE62X0 is not set
830
831#
832# PPS support
833#
834# CONFIG_PPS is not set
802CONFIG_ARCH_WANT_OPTIONAL_GPIOLIB=y 835CONFIG_ARCH_WANT_OPTIONAL_GPIOLIB=y
803# CONFIG_GPIOLIB is not set 836# CONFIG_GPIOLIB is not set
804# CONFIG_W1 is not set 837# CONFIG_W1 is not set
805# CONFIG_POWER_SUPPLY is not set 838# CONFIG_POWER_SUPPLY is not set
806CONFIG_HWMON=y 839CONFIG_HWMON=y
807# CONFIG_HWMON_VID is not set 840# CONFIG_HWMON_VID is not set
808# CONFIG_SENSORS_AD5252 is not set 841# CONFIG_HWMON_DEBUG_CHIP is not set
842
843#
844# Native drivers
845#
809# CONFIG_SENSORS_AD7414 is not set 846# CONFIG_SENSORS_AD7414 is not set
810# CONFIG_SENSORS_AD7418 is not set 847# CONFIG_SENSORS_AD7418 is not set
811# CONFIG_SENSORS_ADCXX is not set 848# CONFIG_SENSORS_ADCXX is not set
@@ -818,11 +855,13 @@ CONFIG_HWMON=y
818# CONFIG_SENSORS_ADT7462 is not set 855# CONFIG_SENSORS_ADT7462 is not set
819# CONFIG_SENSORS_ADT7470 is not set 856# CONFIG_SENSORS_ADT7470 is not set
820# CONFIG_SENSORS_ADT7473 is not set 857# CONFIG_SENSORS_ADT7473 is not set
858# CONFIG_SENSORS_ADT7475 is not set
821# CONFIG_SENSORS_ATXP1 is not set 859# CONFIG_SENSORS_ATXP1 is not set
822# CONFIG_SENSORS_DS1621 is not set 860# CONFIG_SENSORS_DS1621 is not set
823# CONFIG_SENSORS_F71805F is not set 861# CONFIG_SENSORS_F71805F is not set
824# CONFIG_SENSORS_F71882FG is not set 862# CONFIG_SENSORS_F71882FG is not set
825# CONFIG_SENSORS_F75375S is not set 863# CONFIG_SENSORS_F75375S is not set
864# CONFIG_SENSORS_G760A is not set
826# CONFIG_SENSORS_GL518SM is not set 865# CONFIG_SENSORS_GL518SM is not set
827# CONFIG_SENSORS_GL520SM is not set 866# CONFIG_SENSORS_GL520SM is not set
828# CONFIG_SENSORS_IT87 is not set 867# CONFIG_SENSORS_IT87 is not set
@@ -838,17 +877,24 @@ CONFIG_HWMON=y
838# CONFIG_SENSORS_LM90 is not set 877# CONFIG_SENSORS_LM90 is not set
839# CONFIG_SENSORS_LM92 is not set 878# CONFIG_SENSORS_LM92 is not set
840# CONFIG_SENSORS_LM93 is not set 879# CONFIG_SENSORS_LM93 is not set
880# CONFIG_SENSORS_LTC4215 is not set
881# CONFIG_SENSORS_LTC4245 is not set
882# CONFIG_SENSORS_LM95241 is not set
841# CONFIG_SENSORS_MAX1111 is not set 883# CONFIG_SENSORS_MAX1111 is not set
842# CONFIG_SENSORS_MAX1619 is not set 884# CONFIG_SENSORS_MAX1619 is not set
843# CONFIG_SENSORS_MAX6650 is not set 885# CONFIG_SENSORS_MAX6650 is not set
844# CONFIG_SENSORS_PC87360 is not set 886# CONFIG_SENSORS_PC87360 is not set
845# CONFIG_SENSORS_PC87427 is not set 887# CONFIG_SENSORS_PC87427 is not set
888# CONFIG_SENSORS_PCF8591 is not set
889# CONFIG_SENSORS_SHT15 is not set
846# CONFIG_SENSORS_DME1737 is not set 890# CONFIG_SENSORS_DME1737 is not set
847# CONFIG_SENSORS_SMSC47M1 is not set 891# CONFIG_SENSORS_SMSC47M1 is not set
848# CONFIG_SENSORS_SMSC47M192 is not set 892# CONFIG_SENSORS_SMSC47M192 is not set
849# CONFIG_SENSORS_SMSC47B397 is not set 893# CONFIG_SENSORS_SMSC47B397 is not set
850# CONFIG_SENSORS_ADS7828 is not set 894# CONFIG_SENSORS_ADS7828 is not set
851# CONFIG_SENSORS_THMC50 is not set 895# CONFIG_SENSORS_THMC50 is not set
896# CONFIG_SENSORS_TMP401 is not set
897# CONFIG_SENSORS_TMP421 is not set
852# CONFIG_SENSORS_VT1211 is not set 898# CONFIG_SENSORS_VT1211 is not set
853# CONFIG_SENSORS_W83781D is not set 899# CONFIG_SENSORS_W83781D is not set
854# CONFIG_SENSORS_W83791D is not set 900# CONFIG_SENSORS_W83791D is not set
@@ -858,9 +904,8 @@ CONFIG_HWMON=y
858# CONFIG_SENSORS_W83L786NG is not set 904# CONFIG_SENSORS_W83L786NG is not set
859# CONFIG_SENSORS_W83627HF is not set 905# CONFIG_SENSORS_W83627HF is not set
860# CONFIG_SENSORS_W83627EHF is not set 906# CONFIG_SENSORS_W83627EHF is not set
861# CONFIG_HWMON_DEBUG_CHIP is not set 907# CONFIG_SENSORS_LIS3_SPI is not set
862# CONFIG_THERMAL is not set 908# CONFIG_THERMAL is not set
863# CONFIG_THERMAL_HWMON is not set
864# CONFIG_WATCHDOG is not set 909# CONFIG_WATCHDOG is not set
865CONFIG_SSB_POSSIBLE=y 910CONFIG_SSB_POSSIBLE=y
866 911
@@ -875,28 +920,19 @@ CONFIG_SSB_POSSIBLE=y
875# CONFIG_MFD_CORE is not set 920# CONFIG_MFD_CORE is not set
876# CONFIG_MFD_SM501 is not set 921# CONFIG_MFD_SM501 is not set
877# CONFIG_HTC_PASIC3 is not set 922# CONFIG_HTC_PASIC3 is not set
923# CONFIG_TWL4030_CORE is not set
878# CONFIG_MFD_TMIO is not set 924# CONFIG_MFD_TMIO is not set
879# CONFIG_PMIC_DA903X is not set 925# CONFIG_PMIC_DA903X is not set
880# CONFIG_PMIC_ADP5520 is not set 926# CONFIG_PMIC_ADP5520 is not set
881# CONFIG_MFD_WM8400 is not set 927# CONFIG_MFD_WM8400 is not set
928# CONFIG_MFD_WM831X is not set
882# CONFIG_MFD_WM8350_I2C is not set 929# CONFIG_MFD_WM8350_I2C is not set
930# CONFIG_MFD_PCF50633 is not set
931# CONFIG_MFD_MC13783 is not set
932# CONFIG_AB3100_CORE is not set
933# CONFIG_EZX_PCAP is not set
883# CONFIG_REGULATOR is not set 934# CONFIG_REGULATOR is not set
884 935# CONFIG_MEDIA_SUPPORT is not set
885#
886# Multimedia devices
887#
888
889#
890# Multimedia core support
891#
892# CONFIG_VIDEO_DEV is not set
893# CONFIG_DVB_CORE is not set
894# CONFIG_VIDEO_MEDIA is not set
895
896#
897# Multimedia drivers
898#
899CONFIG_DAB=y
900 936
901# 937#
902# Graphics support 938# Graphics support
@@ -928,24 +964,24 @@ CONFIG_FB_CFB_IMAGEBLIT=y
928# CONFIG_FB_BFIN_T350MCQB is not set 964# CONFIG_FB_BFIN_T350MCQB is not set
929# CONFIG_FB_BFIN_LQ035Q1 is not set 965# CONFIG_FB_BFIN_LQ035Q1 is not set
930CONFIG_FB_BF537_LQ035=y 966CONFIG_FB_BF537_LQ035=y
931CONFIG_LQ035_SLAVE_ADDR=0x58
932CONFIG_FB_BFIN_LANDSCAPE=y
933# CONFIG_FB_BFIN_BGR is not set
934# CONFIG_FB_BFIN_7393 is not set 967# CONFIG_FB_BFIN_7393 is not set
935# CONFIG_FB_HITACHI_TX09 is not set 968# CONFIG_FB_HITACHI_TX09 is not set
936# CONFIG_FB_S1D13XXX is not set 969# CONFIG_FB_S1D13XXX is not set
937# CONFIG_FB_VIRTUAL is not set 970# CONFIG_FB_VIRTUAL is not set
938# CONFIG_FB_METRONOME is not set 971# CONFIG_FB_METRONOME is not set
939# CONFIG_FB_MB862XX is not set 972# CONFIG_FB_MB862XX is not set
973# CONFIG_FB_BROADSHEET is not set
940CONFIG_BACKLIGHT_LCD_SUPPORT=y 974CONFIG_BACKLIGHT_LCD_SUPPORT=y
941CONFIG_LCD_CLASS_DEVICE=y 975CONFIG_LCD_CLASS_DEVICE=y
976# CONFIG_LCD_LMS283GF05 is not set
942# CONFIG_LCD_LTV350QV is not set 977# CONFIG_LCD_LTV350QV is not set
943# CONFIG_LCD_ILI9320 is not set 978# CONFIG_LCD_ILI9320 is not set
944# CONFIG_LCD_TDO24M is not set 979# CONFIG_LCD_TDO24M is not set
945# CONFIG_LCD_VGG2432A4 is not set 980# CONFIG_LCD_VGG2432A4 is not set
946# CONFIG_LCD_PLATFORM is not set 981# CONFIG_LCD_PLATFORM is not set
947CONFIG_BACKLIGHT_CLASS_DEVICE=y 982CONFIG_BACKLIGHT_CLASS_DEVICE=y
948# CONFIG_BACKLIGHT_CORGI is not set 983CONFIG_BACKLIGHT_GENERIC=y
984# CONFIG_BACKLIGHT_ADP8870 is not set
949 985
950# 986#
951# Display device support 987# Display device support
@@ -954,6 +990,7 @@ CONFIG_BACKLIGHT_CLASS_DEVICE=y
954# CONFIG_LOGO is not set 990# CONFIG_LOGO is not set
955CONFIG_SOUND=y 991CONFIG_SOUND=y
956CONFIG_SOUND_OSS_CORE=y 992CONFIG_SOUND_OSS_CORE=y
993CONFIG_SOUND_OSS_CORE_PRECLAIM=y
957CONFIG_SND=m 994CONFIG_SND=m
958# CONFIG_SND_SEQUENCER is not set 995# CONFIG_SND_SEQUENCER is not set
959# CONFIG_SND_MIXER_OSS is not set 996# CONFIG_SND_MIXER_OSS is not set
@@ -963,6 +1000,11 @@ CONFIG_SND=m
963# CONFIG_SND_VERBOSE_PROCFS is not set 1000# CONFIG_SND_VERBOSE_PROCFS is not set
964# CONFIG_SND_VERBOSE_PRINTK is not set 1001# CONFIG_SND_VERBOSE_PRINTK is not set
965# CONFIG_SND_DEBUG is not set 1002# CONFIG_SND_DEBUG is not set
1003# CONFIG_SND_RAWMIDI_SEQ is not set
1004# CONFIG_SND_OPL3_LIB_SEQ is not set
1005# CONFIG_SND_OPL4_LIB_SEQ is not set
1006# CONFIG_SND_SBAWE_SEQ is not set
1007# CONFIG_SND_EMU10K1_SEQ is not set
966CONFIG_SND_DRIVERS=y 1008CONFIG_SND_DRIVERS=y
967# CONFIG_SND_DUMMY is not set 1009# CONFIG_SND_DUMMY is not set
968# CONFIG_SND_MTPAV is not set 1010# CONFIG_SND_MTPAV is not set
@@ -973,7 +1015,6 @@ CONFIG_SND_SPI=y
973# 1015#
974# ALSA Blackfin devices 1016# ALSA Blackfin devices
975# 1017#
976# CONFIG_SND_BLACKFIN_AD1836 is not set
977# CONFIG_SND_BFIN_AD73322 is not set 1018# CONFIG_SND_BFIN_AD73322 is not set
978# CONFIG_SND_SOC is not set 1019# CONFIG_SND_SOC is not set
979CONFIG_SOUND_PRIME=y 1020CONFIG_SOUND_PRIME=y
@@ -993,9 +1034,13 @@ CONFIG_USB_ARCH_HAS_HCD=y
993# 1034#
994 1035
995# 1036#
996# NOTE: USB_STORAGE depends on SCSI but BLK_DEV_SD may also be needed; 1037# NOTE: USB_STORAGE depends on SCSI but BLK_DEV_SD may
997# 1038#
998# CONFIG_USB_GADGET is not set 1039# CONFIG_USB_GADGET is not set
1040
1041#
1042# OTG and related infrastructure
1043#
999# CONFIG_MMC is not set 1044# CONFIG_MMC is not set
1000# CONFIG_MEMSTICK is not set 1045# CONFIG_MEMSTICK is not set
1001# CONFIG_NEW_LEDS is not set 1046# CONFIG_NEW_LEDS is not set
@@ -1031,6 +1076,7 @@ CONFIG_RTC_INTF_DEV=y
1031# CONFIG_RTC_DRV_S35390A is not set 1076# CONFIG_RTC_DRV_S35390A is not set
1032# CONFIG_RTC_DRV_FM3130 is not set 1077# CONFIG_RTC_DRV_FM3130 is not set
1033# CONFIG_RTC_DRV_RX8581 is not set 1078# CONFIG_RTC_DRV_RX8581 is not set
1079# CONFIG_RTC_DRV_RX8025 is not set
1034 1080
1035# 1081#
1036# SPI RTC drivers 1082# SPI RTC drivers
@@ -1042,6 +1088,7 @@ CONFIG_RTC_INTF_DEV=y
1042# CONFIG_RTC_DRV_R9701 is not set 1088# CONFIG_RTC_DRV_R9701 is not set
1043# CONFIG_RTC_DRV_RS5C348 is not set 1089# CONFIG_RTC_DRV_RS5C348 is not set
1044# CONFIG_RTC_DRV_DS3234 is not set 1090# CONFIG_RTC_DRV_DS3234 is not set
1091# CONFIG_RTC_DRV_PCF2123 is not set
1045 1092
1046# 1093#
1047# Platform RTC drivers 1094# Platform RTC drivers
@@ -1062,10 +1109,21 @@ CONFIG_RTC_INTF_DEV=y
1062# 1109#
1063CONFIG_RTC_DRV_BFIN=y 1110CONFIG_RTC_DRV_BFIN=y
1064# CONFIG_DMADEVICES is not set 1111# CONFIG_DMADEVICES is not set
1112# CONFIG_AUXDISPLAY is not set
1065# CONFIG_UIO is not set 1113# CONFIG_UIO is not set
1114
1115#
1116# TI VLYNQ
1117#
1066# CONFIG_STAGING is not set 1118# CONFIG_STAGING is not set
1067 1119
1068# 1120#
1121# Firmware Drivers
1122#
1123# CONFIG_FIRMWARE_MEMMAP is not set
1124# CONFIG_SIGMA is not set
1125
1126#
1069# File systems 1127# File systems
1070# 1128#
1071CONFIG_EXT2_FS=y 1129CONFIG_EXT2_FS=y
@@ -1078,9 +1136,13 @@ CONFIG_FS_MBCACHE=y
1078# CONFIG_REISERFS_FS is not set 1136# CONFIG_REISERFS_FS is not set
1079# CONFIG_JFS_FS is not set 1137# CONFIG_JFS_FS is not set
1080# CONFIG_FS_POSIX_ACL is not set 1138# CONFIG_FS_POSIX_ACL is not set
1081CONFIG_FILE_LOCKING=y
1082# CONFIG_XFS_FS is not set 1139# CONFIG_XFS_FS is not set
1140# CONFIG_GFS2_FS is not set
1083# CONFIG_OCFS2_FS is not set 1141# CONFIG_OCFS2_FS is not set
1142# CONFIG_BTRFS_FS is not set
1143# CONFIG_NILFS2_FS is not set
1144CONFIG_FILE_LOCKING=y
1145CONFIG_FSNOTIFY=y
1084# CONFIG_DNOTIFY is not set 1146# CONFIG_DNOTIFY is not set
1085CONFIG_INOTIFY=y 1147CONFIG_INOTIFY=y
1086CONFIG_INOTIFY_USER=y 1148CONFIG_INOTIFY_USER=y
@@ -1090,6 +1152,11 @@ CONFIG_INOTIFY_USER=y
1090# CONFIG_FUSE_FS is not set 1152# CONFIG_FUSE_FS is not set
1091 1153
1092# 1154#
1155# Caches
1156#
1157# CONFIG_FSCACHE is not set
1158
1159#
1093# CD-ROM/DVD Filesystems 1160# CD-ROM/DVD Filesystems
1094# 1161#
1095# CONFIG_ISO9660_FS is not set 1162# CONFIG_ISO9660_FS is not set
@@ -1108,13 +1175,9 @@ CONFIG_INOTIFY_USER=y
1108CONFIG_PROC_FS=y 1175CONFIG_PROC_FS=y
1109CONFIG_PROC_SYSCTL=y 1176CONFIG_PROC_SYSCTL=y
1110CONFIG_SYSFS=y 1177CONFIG_SYSFS=y
1111# CONFIG_TMPFS is not set
1112# CONFIG_HUGETLB_PAGE is not set 1178# CONFIG_HUGETLB_PAGE is not set
1113# CONFIG_CONFIGFS_FS is not set 1179# CONFIG_CONFIGFS_FS is not set
1114 1180CONFIG_MISC_FILESYSTEMS=y
1115#
1116# Miscellaneous filesystems
1117#
1118# CONFIG_ADFS_FS is not set 1181# CONFIG_ADFS_FS is not set
1119# CONFIG_AFFS_FS is not set 1182# CONFIG_AFFS_FS is not set
1120# CONFIG_HFS_FS is not set 1183# CONFIG_HFS_FS is not set
@@ -1123,17 +1186,8 @@ CONFIG_SYSFS=y
1123# CONFIG_BFS_FS is not set 1186# CONFIG_BFS_FS is not set
1124# CONFIG_EFS_FS is not set 1187# CONFIG_EFS_FS is not set
1125# CONFIG_JFFS2_FS is not set 1188# CONFIG_JFFS2_FS is not set
1126CONFIG_YAFFS_FS=y
1127CONFIG_YAFFS_YAFFS1=y
1128# CONFIG_YAFFS_9BYTE_TAGS is not set
1129# CONFIG_YAFFS_DOES_ECC is not set
1130CONFIG_YAFFS_YAFFS2=y
1131CONFIG_YAFFS_AUTO_YAFFS2=y
1132# CONFIG_YAFFS_DISABLE_LAZY_LOAD is not set
1133# CONFIG_YAFFS_DISABLE_WIDE_TNODES is not set
1134# CONFIG_YAFFS_ALWAYS_CHECK_CHUNK_ERASED is not set
1135CONFIG_YAFFS_SHORT_NAMES_IN_RAM=y
1136# CONFIG_CRAMFS is not set 1189# CONFIG_CRAMFS is not set
1190# CONFIG_SQUASHFS is not set
1137# CONFIG_VXFS_FS is not set 1191# CONFIG_VXFS_FS is not set
1138# CONFIG_MINIX_FS is not set 1192# CONFIG_MINIX_FS is not set
1139# CONFIG_OMFS_FS is not set 1193# CONFIG_OMFS_FS is not set
@@ -1152,7 +1206,6 @@ CONFIG_LOCKD=m
1152CONFIG_LOCKD_V4=y 1206CONFIG_LOCKD_V4=y
1153CONFIG_NFS_COMMON=y 1207CONFIG_NFS_COMMON=y
1154CONFIG_SUNRPC=m 1208CONFIG_SUNRPC=m
1155# CONFIG_SUNRPC_REGISTER_V4 is not set
1156# CONFIG_RPCSEC_GSS_KRB5 is not set 1209# CONFIG_RPCSEC_GSS_KRB5 is not set
1157# CONFIG_RPCSEC_GSS_SPKM3 is not set 1210# CONFIG_RPCSEC_GSS_SPKM3 is not set
1158CONFIG_SMB_FS=m 1211CONFIG_SMB_FS=m
@@ -1217,18 +1270,19 @@ CONFIG_ENABLE_WARN_DEPRECATED=y
1217CONFIG_ENABLE_MUST_CHECK=y 1270CONFIG_ENABLE_MUST_CHECK=y
1218CONFIG_FRAME_WARN=1024 1271CONFIG_FRAME_WARN=1024
1219# CONFIG_MAGIC_SYSRQ is not set 1272# CONFIG_MAGIC_SYSRQ is not set
1273# CONFIG_STRIP_ASM_SYMS is not set
1220# CONFIG_UNUSED_SYMBOLS is not set 1274# CONFIG_UNUSED_SYMBOLS is not set
1221# CONFIG_DEBUG_FS is not set 1275# CONFIG_DEBUG_FS is not set
1222# CONFIG_HEADERS_CHECK is not set 1276# CONFIG_HEADERS_CHECK is not set
1277CONFIG_DEBUG_SECTION_MISMATCH=y
1223# CONFIG_DEBUG_KERNEL is not set 1278# CONFIG_DEBUG_KERNEL is not set
1224# CONFIG_DEBUG_BUGVERBOSE is not set 1279# CONFIG_DEBUG_BUGVERBOSE is not set
1225# CONFIG_DEBUG_MEMORY_INIT is not set 1280# CONFIG_DEBUG_MEMORY_INIT is not set
1226# CONFIG_RCU_CPU_STALL_DETECTOR is not set 1281# CONFIG_RCU_CPU_STALL_DETECTOR is not set
1227 1282CONFIG_HAVE_FUNCTION_TRACER=y
1228# 1283CONFIG_HAVE_FUNCTION_GRAPH_TRACER=y
1229# Tracers 1284CONFIG_TRACING_SUPPORT=y
1230# 1285# CONFIG_FTRACE is not set
1231# CONFIG_DYNAMIC_PRINTK_DEBUG is not set
1232# CONFIG_SAMPLES is not set 1286# CONFIG_SAMPLES is not set
1233CONFIG_HAVE_ARCH_KGDB=y 1287CONFIG_HAVE_ARCH_KGDB=y
1234CONFIG_DEBUG_VERBOSE=y 1288CONFIG_DEBUG_VERBOSE=y
@@ -1245,6 +1299,7 @@ CONFIG_DEBUG_BFIN_HWTRACE_COMPRESSION=0
1245# CONFIG_EARLY_PRINTK is not set 1299# CONFIG_EARLY_PRINTK is not set
1246# CONFIG_CPLB_INFO is not set 1300# CONFIG_CPLB_INFO is not set
1247# CONFIG_ACCESS_CHECK is not set 1301# CONFIG_ACCESS_CHECK is not set
1302# CONFIG_BFIN_ISRAM_SELF_TEST is not set
1248 1303
1249# 1304#
1250# Security options 1305# Security options
@@ -1253,14 +1308,14 @@ CONFIG_DEBUG_BFIN_HWTRACE_COMPRESSION=0
1253CONFIG_SECURITY=y 1308CONFIG_SECURITY=y
1254# CONFIG_SECURITYFS is not set 1309# CONFIG_SECURITYFS is not set
1255# CONFIG_SECURITY_NETWORK is not set 1310# CONFIG_SECURITY_NETWORK is not set
1311# CONFIG_SECURITY_PATH is not set
1256# CONFIG_SECURITY_FILE_CAPABILITIES is not set 1312# CONFIG_SECURITY_FILE_CAPABILITIES is not set
1257CONFIG_SECURITY_DEFAULT_MMAP_MIN_ADDR=0 1313# CONFIG_SECURITY_TOMOYO is not set
1258CONFIG_CRYPTO=y 1314CONFIG_CRYPTO=y
1259 1315
1260# 1316#
1261# Crypto core or helper 1317# Crypto core or helper
1262# 1318#
1263# CONFIG_CRYPTO_FIPS is not set
1264# CONFIG_CRYPTO_MANAGER is not set 1319# CONFIG_CRYPTO_MANAGER is not set
1265# CONFIG_CRYPTO_MANAGER2 is not set 1320# CONFIG_CRYPTO_MANAGER2 is not set
1266# CONFIG_CRYPTO_GF128MUL is not set 1321# CONFIG_CRYPTO_GF128MUL is not set
@@ -1292,11 +1347,13 @@ CONFIG_CRYPTO=y
1292# 1347#
1293# CONFIG_CRYPTO_HMAC is not set 1348# CONFIG_CRYPTO_HMAC is not set
1294# CONFIG_CRYPTO_XCBC is not set 1349# CONFIG_CRYPTO_XCBC is not set
1350# CONFIG_CRYPTO_VMAC is not set
1295 1351
1296# 1352#
1297# Digest 1353# Digest
1298# 1354#
1299# CONFIG_CRYPTO_CRC32C is not set 1355# CONFIG_CRYPTO_CRC32C is not set
1356# CONFIG_CRYPTO_GHASH is not set
1300# CONFIG_CRYPTO_MD4 is not set 1357# CONFIG_CRYPTO_MD4 is not set
1301# CONFIG_CRYPTO_MD5 is not set 1358# CONFIG_CRYPTO_MD5 is not set
1302# CONFIG_CRYPTO_MICHAEL_MIC is not set 1359# CONFIG_CRYPTO_MICHAEL_MIC is not set
@@ -1333,6 +1390,7 @@ CONFIG_CRYPTO=y
1333# Compression 1390# Compression
1334# 1391#
1335# CONFIG_CRYPTO_DEFLATE is not set 1392# CONFIG_CRYPTO_DEFLATE is not set
1393# CONFIG_CRYPTO_ZLIB is not set
1336# CONFIG_CRYPTO_LZO is not set 1394# CONFIG_CRYPTO_LZO is not set
1337 1395
1338# 1396#
@@ -1340,11 +1398,13 @@ CONFIG_CRYPTO=y
1340# 1398#
1341# CONFIG_CRYPTO_ANSI_CPRNG is not set 1399# CONFIG_CRYPTO_ANSI_CPRNG is not set
1342CONFIG_CRYPTO_HW=y 1400CONFIG_CRYPTO_HW=y
1401# CONFIG_BINARY_PRINTF is not set
1343 1402
1344# 1403#
1345# Library routines 1404# Library routines
1346# 1405#
1347CONFIG_BITREVERSE=y 1406CONFIG_BITREVERSE=y
1407CONFIG_GENERIC_FIND_LAST_BIT=y
1348CONFIG_CRC_CCITT=m 1408CONFIG_CRC_CCITT=m
1349# CONFIG_CRC16 is not set 1409# CONFIG_CRC16 is not set
1350# CONFIG_CRC_T10DIF is not set 1410# CONFIG_CRC_T10DIF is not set
@@ -1356,3 +1416,4 @@ CONFIG_ZLIB_INFLATE=y
1356CONFIG_HAS_IOMEM=y 1416CONFIG_HAS_IOMEM=y
1357CONFIG_HAS_IOPORT=y 1417CONFIG_HAS_IOPORT=y
1358CONFIG_HAS_DMA=y 1418CONFIG_HAS_DMA=y
1419CONFIG_NLATTR=y
diff --git a/arch/blackfin/configs/TCM-BF518_defconfig b/arch/blackfin/configs/TCM-BF518_defconfig
new file mode 100644
index 000000000000..4d31e2a4ed46
--- /dev/null
+++ b/arch/blackfin/configs/TCM-BF518_defconfig
@@ -0,0 +1,1375 @@
1#
2# Automatically generated make config: don't edit
3# Linux kernel version: 2.6.32.3
4#
5# CONFIG_MMU is not set
6# CONFIG_FPU is not set
7CONFIG_RWSEM_GENERIC_SPINLOCK=y
8# CONFIG_RWSEM_XCHGADD_ALGORITHM is not set
9CONFIG_BLACKFIN=y
10CONFIG_GENERIC_CSUM=y
11CONFIG_GENERIC_BUG=y
12CONFIG_ZONE_DMA=y
13CONFIG_GENERIC_FIND_NEXT_BIT=y
14CONFIG_GENERIC_HARDIRQS=y
15CONFIG_GENERIC_IRQ_PROBE=y
16CONFIG_GENERIC_HARDIRQS_NO__DO_IRQ=y
17CONFIG_GENERIC_GPIO=y
18CONFIG_FORCE_MAX_ZONEORDER=14
19CONFIG_GENERIC_CALIBRATE_DELAY=y
20CONFIG_LOCKDEP_SUPPORT=y
21CONFIG_STACKTRACE_SUPPORT=y
22CONFIG_TRACE_IRQFLAGS_SUPPORT=y
23CONFIG_DEFCONFIG_LIST="/lib/modules/$UNAME_RELEASE/.config"
24CONFIG_CONSTRUCTORS=y
25
26#
27# General setup
28#
29CONFIG_EXPERIMENTAL=y
30CONFIG_BROKEN_ON_SMP=y
31CONFIG_INIT_ENV_ARG_LIMIT=32
32CONFIG_LOCALVERSION=""
33CONFIG_LOCALVERSION_AUTO=y
34CONFIG_HAVE_KERNEL_GZIP=y
35CONFIG_HAVE_KERNEL_BZIP2=y
36CONFIG_HAVE_KERNEL_LZMA=y
37# CONFIG_KERNEL_GZIP is not set
38# CONFIG_KERNEL_BZIP2 is not set
39CONFIG_KERNEL_LZMA=y
40CONFIG_SYSVIPC=y
41CONFIG_SYSVIPC_SYSCTL=y
42# CONFIG_POSIX_MQUEUE is not set
43# CONFIG_BSD_PROCESS_ACCT is not set
44# CONFIG_TASKSTATS is not set
45# CONFIG_AUDIT is not set
46
47#
48# RCU Subsystem
49#
50CONFIG_TREE_RCU=y
51# CONFIG_TREE_PREEMPT_RCU is not set
52# CONFIG_RCU_TRACE is not set
53CONFIG_RCU_FANOUT=32
54# CONFIG_RCU_FANOUT_EXACT is not set
55# CONFIG_TREE_RCU_TRACE is not set
56CONFIG_IKCONFIG=y
57CONFIG_IKCONFIG_PROC=y
58CONFIG_LOG_BUF_SHIFT=14
59# CONFIG_GROUP_SCHED is not set
60# CONFIG_CGROUPS is not set
61# CONFIG_SYSFS_DEPRECATED_V2 is not set
62# CONFIG_RELAY is not set
63# CONFIG_NAMESPACES is not set
64CONFIG_BLK_DEV_INITRD=y
65CONFIG_INITRAMFS_SOURCE=""
66# CONFIG_RD_GZIP is not set
67# CONFIG_RD_BZIP2 is not set
68CONFIG_RD_LZMA=y
69CONFIG_CC_OPTIMIZE_FOR_SIZE=y
70CONFIG_SYSCTL=y
71CONFIG_ANON_INODES=y
72CONFIG_EMBEDDED=y
73CONFIG_UID16=y
74# CONFIG_SYSCTL_SYSCALL is not set
75CONFIG_KALLSYMS=y
76# CONFIG_KALLSYMS_ALL is not set
77# CONFIG_KALLSYMS_EXTRA_PASS is not set
78CONFIG_HOTPLUG=y
79CONFIG_PRINTK=y
80CONFIG_BUG=y
81# CONFIG_ELF_CORE is not set
82CONFIG_BASE_FULL=y
83# CONFIG_FUTEX is not set
84CONFIG_EPOLL=y
85# CONFIG_SIGNALFD is not set
86# CONFIG_TIMERFD is not set
87# CONFIG_EVENTFD is not set
88# CONFIG_AIO is not set
89
90#
91# Kernel Performance Events And Counters
92#
93CONFIG_VM_EVENT_COUNTERS=y
94CONFIG_COMPAT_BRK=y
95CONFIG_SLAB=y
96# CONFIG_SLUB is not set
97# CONFIG_SLOB is not set
98CONFIG_MMAP_ALLOW_UNINITIALIZED=y
99# CONFIG_PROFILING is not set
100CONFIG_HAVE_OPROFILE=y
101
102#
103# GCOV-based kernel profiling
104#
105# CONFIG_GCOV_KERNEL is not set
106# CONFIG_SLOW_WORK is not set
107# CONFIG_HAVE_GENERIC_DMA_COHERENT is not set
108CONFIG_SLABINFO=y
109CONFIG_BASE_SMALL=0
110CONFIG_MODULES=y
111# CONFIG_MODULE_FORCE_LOAD is not set
112CONFIG_MODULE_UNLOAD=y
113# CONFIG_MODULE_FORCE_UNLOAD is not set
114# CONFIG_MODVERSIONS is not set
115# CONFIG_MODULE_SRCVERSION_ALL is not set
116CONFIG_BLOCK=y
117# CONFIG_LBDAF is not set
118# CONFIG_BLK_DEV_BSG is not set
119# CONFIG_BLK_DEV_INTEGRITY is not set
120
121#
122# IO Schedulers
123#
124CONFIG_IOSCHED_NOOP=y
125# CONFIG_IOSCHED_AS is not set
126# CONFIG_IOSCHED_DEADLINE is not set
127# CONFIG_IOSCHED_CFQ is not set
128# CONFIG_DEFAULT_AS is not set
129# CONFIG_DEFAULT_DEADLINE is not set
130# CONFIG_DEFAULT_CFQ is not set
131CONFIG_DEFAULT_NOOP=y
132CONFIG_DEFAULT_IOSCHED="noop"
133# CONFIG_PREEMPT_NONE is not set
134CONFIG_PREEMPT_VOLUNTARY=y
135# CONFIG_PREEMPT is not set
136# CONFIG_FREEZER is not set
137
138#
139# Blackfin Processor Options
140#
141
142#
143# Processor and Board Settings
144#
145# CONFIG_BF512 is not set
146# CONFIG_BF514 is not set
147# CONFIG_BF516 is not set
148CONFIG_BF518=y
149# CONFIG_BF522 is not set
150# CONFIG_BF523 is not set
151# CONFIG_BF524 is not set
152# CONFIG_BF525 is not set
153# CONFIG_BF526 is not set
154# CONFIG_BF527 is not set
155# CONFIG_BF531 is not set
156# CONFIG_BF532 is not set
157# CONFIG_BF533 is not set
158# CONFIG_BF534 is not set
159# CONFIG_BF536 is not set
160# CONFIG_BF537 is not set
161# CONFIG_BF538 is not set
162# CONFIG_BF539 is not set
163# CONFIG_BF542_std is not set
164# CONFIG_BF542M is not set
165# CONFIG_BF544_std is not set
166# CONFIG_BF544M is not set
167# CONFIG_BF547_std is not set
168# CONFIG_BF547M is not set
169# CONFIG_BF548_std is not set
170# CONFIG_BF548M is not set
171# CONFIG_BF549_std is not set
172# CONFIG_BF549M is not set
173# CONFIG_BF561 is not set
174CONFIG_BF_REV_MIN=0
175CONFIG_BF_REV_MAX=2
176# CONFIG_BF_REV_0_0 is not set
177CONFIG_BF_REV_0_1=y
178# CONFIG_BF_REV_0_2 is not set
179# CONFIG_BF_REV_0_3 is not set
180# CONFIG_BF_REV_0_4 is not set
181# CONFIG_BF_REV_0_5 is not set
182# CONFIG_BF_REV_0_6 is not set
183# CONFIG_BF_REV_ANY is not set
184# CONFIG_BF_REV_NONE is not set
185CONFIG_BF51x=y
186# CONFIG_BFIN518F_EZBRD is not set
187CONFIG_BFIN518F_TCM=y
188
189#
190# BF518 Specific Configuration
191#
192
193#
194# Alternative Multiplexing Scheme
195#
196# CONFIG_BF518_SPORT0_PORTF is not set
197CONFIG_BF518_SPORT0_PORTG=y
198CONFIG_BF518_SPORT0_TSCLK_PG10=y
199# CONFIG_BF518_SPORT0_TSCLK_PG14 is not set
200CONFIG_BF518_UART1_PORTF=y
201# CONFIG_BF518_UART1_PORTG is not set
202
203#
204# Interrupt Priority Assignment
205#
206
207#
208# Priority
209#
210CONFIG_IRQ_PLL_WAKEUP=7
211CONFIG_IRQ_DMA0_ERROR=7
212CONFIG_IRQ_DMAR0_BLK=7
213CONFIG_IRQ_DMAR1_BLK=7
214CONFIG_IRQ_DMAR0_OVR=7
215CONFIG_IRQ_DMAR1_OVR=7
216CONFIG_IRQ_PPI_ERROR=7
217CONFIG_IRQ_MAC_ERROR=7
218CONFIG_IRQ_SPORT0_ERROR=7
219CONFIG_IRQ_SPORT1_ERROR=7
220CONFIG_IRQ_PTP_ERROR=7
221CONFIG_IRQ_UART0_ERROR=7
222CONFIG_IRQ_UART1_ERROR=7
223CONFIG_IRQ_RTC=8
224CONFIG_IRQ_PPI=8
225CONFIG_IRQ_SPORT0_RX=9
226CONFIG_IRQ_SPORT0_TX=9
227CONFIG_IRQ_SPORT1_RX=9
228CONFIG_IRQ_SPORT1_TX=9
229CONFIG_IRQ_TWI=10
230CONFIG_IRQ_SPI0=10
231CONFIG_IRQ_UART0_RX=10
232CONFIG_IRQ_UART0_TX=10
233CONFIG_IRQ_UART1_RX=10
234CONFIG_IRQ_UART1_TX=10
235CONFIG_IRQ_OPTSEC=11
236CONFIG_IRQ_CNT=11
237CONFIG_IRQ_MAC_RX=11
238CONFIG_IRQ_PORTH_INTA=11
239CONFIG_IRQ_MAC_TX=11
240CONFIG_IRQ_PORTH_INTB=11
241CONFIG_IRQ_TIMER0=12
242CONFIG_IRQ_TIMER1=12
243CONFIG_IRQ_TIMER2=12
244CONFIG_IRQ_TIMER3=12
245CONFIG_IRQ_TIMER4=12
246CONFIG_IRQ_TIMER5=12
247CONFIG_IRQ_TIMER6=12
248CONFIG_IRQ_TIMER7=12
249CONFIG_IRQ_PORTG_INTA=12
250CONFIG_IRQ_PORTG_INTB=12
251CONFIG_IRQ_MEM_DMA0=13
252CONFIG_IRQ_MEM_DMA1=13
253CONFIG_IRQ_WATCH=13
254CONFIG_IRQ_PORTF_INTA=13
255CONFIG_IRQ_PORTF_INTB=13
256CONFIG_IRQ_SPI0_ERROR=7
257CONFIG_IRQ_SPI1_ERROR=7
258CONFIG_IRQ_RSI_INT0=7
259CONFIG_IRQ_RSI_INT1=7
260CONFIG_IRQ_PWM_TRIP=10
261CONFIG_IRQ_PWM_SYNC=10
262CONFIG_IRQ_PTP_STAT=10
263
264#
265# Board customizations
266#
267# CONFIG_CMDLINE_BOOL is not set
268CONFIG_BOOT_LOAD=0x1000
269
270#
271# Clock/PLL Setup
272#
273CONFIG_CLKIN_HZ=25000000
274# CONFIG_BFIN_KERNEL_CLOCK is not set
275CONFIG_MAX_VCO_HZ=400000000
276CONFIG_MIN_VCO_HZ=50000000
277CONFIG_MAX_SCLK_HZ=133333333
278CONFIG_MIN_SCLK_HZ=27000000
279
280#
281# Kernel Timer/Scheduler
282#
283# CONFIG_HZ_100 is not set
284CONFIG_HZ_250=y
285# CONFIG_HZ_300 is not set
286# CONFIG_HZ_1000 is not set
287CONFIG_HZ=250
288# CONFIG_SCHED_HRTICK is not set
289CONFIG_GENERIC_TIME=y
290CONFIG_GENERIC_CLOCKEVENTS=y
291
292#
293# Clock event device
294#
295# CONFIG_TICKSOURCE_GPTMR0 is not set
296CONFIG_TICKSOURCE_CORETMR=y
297
298#
299# Clock souce
300#
301# CONFIG_CYCLES_CLOCKSOURCE is not set
302# CONFIG_GPTMR0_CLOCKSOURCE is not set
303# CONFIG_NO_HZ is not set
304# CONFIG_HIGH_RES_TIMERS is not set
305CONFIG_GENERIC_CLOCKEVENTS_BUILD=y
306
307#
308# Misc
309#
310CONFIG_BFIN_SCRATCH_REG_RETN=y
311# CONFIG_BFIN_SCRATCH_REG_RETE is not set
312# CONFIG_BFIN_SCRATCH_REG_CYCLES is not set
313
314#
315# Blackfin Kernel Optimizations
316#
317
318#
319# Memory Optimizations
320#
321CONFIG_I_ENTRY_L1=y
322CONFIG_EXCPT_IRQ_SYSC_L1=y
323CONFIG_DO_IRQ_L1=y
324CONFIG_CORE_TIMER_IRQ_L1=y
325CONFIG_IDLE_L1=y
326# CONFIG_SCHEDULE_L1 is not set
327CONFIG_ARITHMETIC_OPS_L1=y
328CONFIG_ACCESS_OK_L1=y
329# CONFIG_MEMSET_L1 is not set
330# CONFIG_MEMCPY_L1 is not set
331# CONFIG_SYS_BFIN_SPINLOCK_L1 is not set
332# CONFIG_IP_CHECKSUM_L1 is not set
333CONFIG_CACHELINE_ALIGNED_L1=y
334# CONFIG_SYSCALL_TAB_L1 is not set
335# CONFIG_CPLB_SWITCH_TAB_L1 is not set
336CONFIG_APP_STACK_L1=y
337
338#
339# Speed Optimizations
340#
341CONFIG_BFIN_INS_LOWOVERHEAD=y
342CONFIG_RAMKERNEL=y
343# CONFIG_ROMKERNEL is not set
344CONFIG_SELECT_MEMORY_MODEL=y
345CONFIG_FLATMEM_MANUAL=y
346# CONFIG_DISCONTIGMEM_MANUAL is not set
347# CONFIG_SPARSEMEM_MANUAL is not set
348CONFIG_FLATMEM=y
349CONFIG_FLAT_NODE_MEM_MAP=y
350CONFIG_PAGEFLAGS_EXTENDED=y
351CONFIG_SPLIT_PTLOCK_CPUS=4
352# CONFIG_PHYS_ADDR_T_64BIT is not set
353CONFIG_ZONE_DMA_FLAG=1
354CONFIG_VIRT_TO_BUS=y
355CONFIG_NOMMU_INITIAL_TRIM_EXCESS=0
356CONFIG_BFIN_GPTIMERS=m
357# CONFIG_DMA_UNCACHED_4M is not set
358# CONFIG_DMA_UNCACHED_2M is not set
359CONFIG_DMA_UNCACHED_1M=y
360# CONFIG_DMA_UNCACHED_512K is not set
361# CONFIG_DMA_UNCACHED_256K is not set
362# CONFIG_DMA_UNCACHED_128K is not set
363# CONFIG_DMA_UNCACHED_NONE is not set
364
365#
366# Cache Support
367#
368CONFIG_BFIN_ICACHE=y
369CONFIG_BFIN_EXTMEM_ICACHEABLE=y
370CONFIG_BFIN_DCACHE=y
371# CONFIG_BFIN_DCACHE_BANKA is not set
372CONFIG_BFIN_EXTMEM_DCACHEABLE=y
373CONFIG_BFIN_EXTMEM_WRITEBACK=y
374# CONFIG_BFIN_EXTMEM_WRITETHROUGH is not set
375
376#
377# Memory Protection Unit
378#
379# CONFIG_MPU is not set
380
381#
382# Asynchronous Memory Configuration
383#
384
385#
386# EBIU_AMGCTL Global Control
387#
388CONFIG_C_AMCKEN=y
389CONFIG_C_CDPRIO=y
390# CONFIG_C_AMBEN is not set
391# CONFIG_C_AMBEN_B0 is not set
392# CONFIG_C_AMBEN_B0_B1 is not set
393# CONFIG_C_AMBEN_B0_B1_B2 is not set
394CONFIG_C_AMBEN_ALL=y
395
396#
397# EBIU_AMBCTL Control
398#
399CONFIG_BANK_0=0x7BB0
400CONFIG_BANK_1=0x7BB0
401CONFIG_BANK_2=0x7BB0
402CONFIG_BANK_3=0x99B2
403
404#
405# Bus options (PCI, PCMCIA, EISA, MCA, ISA)
406#
407# CONFIG_ARCH_SUPPORTS_MSI is not set
408# CONFIG_PCCARD is not set
409
410#
411# Executable file formats
412#
413CONFIG_BINFMT_ELF_FDPIC=y
414CONFIG_BINFMT_FLAT=y
415CONFIG_BINFMT_ZFLAT=y
416# CONFIG_BINFMT_SHARED_FLAT is not set
417# CONFIG_HAVE_AOUT is not set
418# CONFIG_BINFMT_MISC is not set
419
420#
421# Power management options
422#
423# CONFIG_PM is not set
424CONFIG_ARCH_SUSPEND_POSSIBLE=y
425
426#
427# CPU Frequency scaling
428#
429# CONFIG_CPU_FREQ is not set
430CONFIG_NET=y
431
432#
433# Networking options
434#
435CONFIG_PACKET=y
436# CONFIG_PACKET_MMAP is not set
437CONFIG_UNIX=y
438# CONFIG_NET_KEY is not set
439CONFIG_INET=y
440# CONFIG_IP_MULTICAST is not set
441# CONFIG_IP_ADVANCED_ROUTER is not set
442CONFIG_IP_FIB_HASH=y
443CONFIG_IP_PNP=y
444# CONFIG_IP_PNP_DHCP is not set
445# CONFIG_IP_PNP_BOOTP is not set
446# CONFIG_IP_PNP_RARP is not set
447# CONFIG_NET_IPIP is not set
448# CONFIG_NET_IPGRE is not set
449# CONFIG_ARPD is not set
450# CONFIG_SYN_COOKIES is not set
451# CONFIG_INET_AH is not set
452# CONFIG_INET_ESP is not set
453# CONFIG_INET_IPCOMP is not set
454# CONFIG_INET_XFRM_TUNNEL is not set
455# CONFIG_INET_TUNNEL is not set
456# CONFIG_INET_XFRM_MODE_TRANSPORT is not set
457# CONFIG_INET_XFRM_MODE_TUNNEL is not set
458# CONFIG_INET_XFRM_MODE_BEET is not set
459# CONFIG_INET_LRO is not set
460# CONFIG_INET_DIAG is not set
461# CONFIG_TCP_CONG_ADVANCED is not set
462CONFIG_TCP_CONG_CUBIC=y
463CONFIG_DEFAULT_TCP_CONG="cubic"
464# CONFIG_TCP_MD5SIG is not set
465# CONFIG_IPV6 is not set
466# CONFIG_NETLABEL is not set
467# CONFIG_NETWORK_SECMARK is not set
468# CONFIG_NETFILTER is not set
469# CONFIG_IP_DCCP is not set
470# CONFIG_IP_SCTP is not set
471# CONFIG_RDS is not set
472# CONFIG_TIPC is not set
473# CONFIG_ATM is not set
474# CONFIG_BRIDGE is not set
475# CONFIG_NET_DSA is not set
476# CONFIG_VLAN_8021Q is not set
477# CONFIG_DECNET is not set
478# CONFIG_LLC2 is not set
479# CONFIG_IPX is not set
480# CONFIG_ATALK is not set
481# CONFIG_X25 is not set
482# CONFIG_LAPB is not set
483# CONFIG_ECONET is not set
484# CONFIG_WAN_ROUTER is not set
485# CONFIG_PHONET is not set
486# CONFIG_IEEE802154 is not set
487# CONFIG_NET_SCHED is not set
488# CONFIG_DCB is not set
489
490#
491# Network testing
492#
493# CONFIG_NET_PKTGEN is not set
494# CONFIG_HAMRADIO is not set
495# CONFIG_CAN is not set
496# CONFIG_IRDA is not set
497# CONFIG_BT is not set
498# CONFIG_AF_RXRPC is not set
499# CONFIG_WIRELESS is not set
500# CONFIG_WIMAX is not set
501# CONFIG_RFKILL is not set
502# CONFIG_NET_9P is not set
503
504#
505# Device Drivers
506#
507
508#
509# Generic Driver Options
510#
511CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
512CONFIG_STANDALONE=y
513CONFIG_PREVENT_FIRMWARE_BUILD=y
514# CONFIG_FW_LOADER is not set
515# CONFIG_DEBUG_DRIVER is not set
516# CONFIG_DEBUG_DEVRES is not set
517# CONFIG_SYS_HYPERVISOR is not set
518# CONFIG_CONNECTOR is not set
519CONFIG_MTD=y
520# CONFIG_MTD_DEBUG is not set
521# CONFIG_MTD_TESTS is not set
522# CONFIG_MTD_CONCAT is not set
523CONFIG_MTD_PARTITIONS=y
524# CONFIG_MTD_REDBOOT_PARTS is not set
525CONFIG_MTD_CMDLINE_PARTS=y
526# CONFIG_MTD_AR7_PARTS is not set
527
528#
529# User Modules And Translation Layers
530#
531CONFIG_MTD_CHAR=y
532CONFIG_MTD_BLKDEVS=y
533CONFIG_MTD_BLOCK=y
534# CONFIG_FTL is not set
535# CONFIG_NFTL is not set
536# CONFIG_INFTL is not set
537# CONFIG_RFD_FTL is not set
538# CONFIG_SSFDC is not set
539# CONFIG_MTD_OOPS is not set
540
541#
542# RAM/ROM/Flash chip drivers
543#
544CONFIG_MTD_CFI=y
545# CONFIG_MTD_JEDECPROBE is not set
546CONFIG_MTD_GEN_PROBE=y
547CONFIG_MTD_CFI_ADV_OPTIONS=y
548CONFIG_MTD_CFI_NOSWAP=y
549# CONFIG_MTD_CFI_BE_BYTE_SWAP is not set
550# CONFIG_MTD_CFI_LE_BYTE_SWAP is not set
551CONFIG_MTD_CFI_GEOMETRY=y
552# CONFIG_MTD_MAP_BANK_WIDTH_1 is not set
553CONFIG_MTD_MAP_BANK_WIDTH_2=y
554# CONFIG_MTD_MAP_BANK_WIDTH_4 is not set
555# CONFIG_MTD_MAP_BANK_WIDTH_8 is not set
556# CONFIG_MTD_MAP_BANK_WIDTH_16 is not set
557# CONFIG_MTD_MAP_BANK_WIDTH_32 is not set
558CONFIG_MTD_CFI_I1=y
559# CONFIG_MTD_CFI_I2 is not set
560# CONFIG_MTD_CFI_I4 is not set
561# CONFIG_MTD_CFI_I8 is not set
562# CONFIG_MTD_OTP is not set
563CONFIG_MTD_CFI_INTELEXT=y
564# CONFIG_MTD_CFI_AMDSTD is not set
565# CONFIG_MTD_CFI_STAA is not set
566CONFIG_MTD_CFI_UTIL=y
567CONFIG_MTD_RAM=y
568CONFIG_MTD_ROM=m
569# CONFIG_MTD_ABSENT is not set
570
571#
572# Mapping drivers for chip access
573#
574# CONFIG_MTD_COMPLEX_MAPPINGS is not set
575CONFIG_MTD_PHYSMAP=y
576# CONFIG_MTD_PHYSMAP_COMPAT is not set
577# CONFIG_MTD_UCLINUX is not set
578# CONFIG_MTD_PLATRAM is not set
579
580#
581# Self-contained MTD device drivers
582#
583# CONFIG_MTD_DATAFLASH is not set
584# CONFIG_MTD_M25P80 is not set
585# CONFIG_MTD_SST25L is not set
586# CONFIG_MTD_SLRAM is not set
587# CONFIG_MTD_PHRAM is not set
588# CONFIG_MTD_MTDRAM is not set
589# CONFIG_MTD_BLOCK2MTD is not set
590
591#
592# Disk-On-Chip Device Drivers
593#
594# CONFIG_MTD_DOC2000 is not set
595# CONFIG_MTD_DOC2001 is not set
596# CONFIG_MTD_DOC2001PLUS is not set
597# CONFIG_MTD_NAND is not set
598# CONFIG_MTD_ONENAND is not set
599
600#
601# LPDDR flash memory drivers
602#
603# CONFIG_MTD_LPDDR is not set
604
605#
606# UBI - Unsorted block images
607#
608# CONFIG_MTD_UBI is not set
609# CONFIG_PARPORT is not set
610CONFIG_BLK_DEV=y
611# CONFIG_BLK_DEV_COW_COMMON is not set
612# CONFIG_BLK_DEV_LOOP is not set
613# CONFIG_BLK_DEV_NBD is not set
614CONFIG_BLK_DEV_RAM=y
615CONFIG_BLK_DEV_RAM_COUNT=16
616CONFIG_BLK_DEV_RAM_SIZE=4096
617# CONFIG_BLK_DEV_XIP is not set
618# CONFIG_CDROM_PKTCDVD is not set
619# CONFIG_ATA_OVER_ETH is not set
620# CONFIG_BLK_DEV_HD is not set
621CONFIG_MISC_DEVICES=y
622# CONFIG_AD525X_DPOT is not set
623# CONFIG_ICS932S401 is not set
624# CONFIG_ENCLOSURE_SERVICES is not set
625# CONFIG_ISL29003 is not set
626# CONFIG_C2PORT is not set
627
628#
629# EEPROM support
630#
631# CONFIG_EEPROM_AT24 is not set
632# CONFIG_EEPROM_AT25 is not set
633# CONFIG_EEPROM_LEGACY is not set
634# CONFIG_EEPROM_MAX6875 is not set
635# CONFIG_EEPROM_93CX6 is not set
636CONFIG_HAVE_IDE=y
637# CONFIG_IDE is not set
638
639#
640# SCSI device support
641#
642# CONFIG_RAID_ATTRS is not set
643# CONFIG_SCSI is not set
644# CONFIG_SCSI_DMA is not set
645# CONFIG_SCSI_NETLINK is not set
646# CONFIG_ATA is not set
647# CONFIG_MD is not set
648CONFIG_NETDEVICES=y
649# CONFIG_DUMMY is not set
650# CONFIG_BONDING is not set
651# CONFIG_MACVLAN is not set
652# CONFIG_EQUALIZER is not set
653# CONFIG_TUN is not set
654# CONFIG_VETH is not set
655CONFIG_PHYLIB=y
656
657#
658# MII PHY device drivers
659#
660# CONFIG_MARVELL_PHY is not set
661# CONFIG_DAVICOM_PHY is not set
662# CONFIG_QSEMI_PHY is not set
663# CONFIG_LXT_PHY is not set
664# CONFIG_CICADA_PHY is not set
665# CONFIG_VITESSE_PHY is not set
666# CONFIG_SMSC_PHY is not set
667# CONFIG_BROADCOM_PHY is not set
668# CONFIG_ICPLUS_PHY is not set
669# CONFIG_REALTEK_PHY is not set
670# CONFIG_NATIONAL_PHY is not set
671# CONFIG_STE10XP is not set
672# CONFIG_LSI_ET1011C_PHY is not set
673# CONFIG_FIXED_PHY is not set
674# CONFIG_MDIO_BITBANG is not set
675CONFIG_NET_ETHERNET=y
676CONFIG_MII=y
677CONFIG_BFIN_MAC=y
678CONFIG_BFIN_TX_DESC_NUM=10
679CONFIG_BFIN_RX_DESC_NUM=20
680# CONFIG_BFIN_MAC_RMII is not set
681CONFIG_BFIN_MAC_USE_HWSTAMP=y
682# CONFIG_SMC91X is not set
683# CONFIG_DM9000 is not set
684# CONFIG_ENC28J60 is not set
685# CONFIG_ETHOC is not set
686# CONFIG_SMSC911X is not set
687# CONFIG_DNET is not set
688# CONFIG_ADF702X is not set
689# CONFIG_IBM_NEW_EMAC_ZMII is not set
690# CONFIG_IBM_NEW_EMAC_RGMII is not set
691# CONFIG_IBM_NEW_EMAC_TAH is not set
692# CONFIG_IBM_NEW_EMAC_EMAC4 is not set
693# CONFIG_IBM_NEW_EMAC_NO_FLOW_CTRL is not set
694# CONFIG_IBM_NEW_EMAC_MAL_CLR_ICINTSTAT is not set
695# CONFIG_IBM_NEW_EMAC_MAL_COMMON_ERR is not set
696# CONFIG_B44 is not set
697# CONFIG_KS8842 is not set
698# CONFIG_KS8851 is not set
699# CONFIG_KS8851_MLL is not set
700# CONFIG_NETDEV_1000 is not set
701# CONFIG_NETDEV_10000 is not set
702# CONFIG_WLAN is not set
703
704#
705# Enable WiMAX (Networking options) to see the WiMAX drivers
706#
707# CONFIG_WAN is not set
708# CONFIG_PPP is not set
709# CONFIG_SLIP is not set
710# CONFIG_NETCONSOLE is not set
711# CONFIG_NETPOLL is not set
712# CONFIG_NET_POLL_CONTROLLER is not set
713# CONFIG_ISDN is not set
714# CONFIG_PHONE is not set
715
716#
717# Input device support
718#
719CONFIG_INPUT=y
720# CONFIG_INPUT_FF_MEMLESS is not set
721# CONFIG_INPUT_POLLDEV is not set
722
723#
724# Userland interfaces
725#
726# CONFIG_INPUT_MOUSEDEV is not set
727# CONFIG_INPUT_JOYDEV is not set
728# CONFIG_INPUT_EVDEV is not set
729# CONFIG_INPUT_EVBUG is not set
730
731#
732# Input Device Drivers
733#
734# CONFIG_INPUT_KEYBOARD is not set
735# CONFIG_INPUT_MOUSE is not set
736# CONFIG_INPUT_JOYSTICK is not set
737# CONFIG_INPUT_TABLET is not set
738# CONFIG_INPUT_TOUCHSCREEN is not set
739CONFIG_INPUT_MISC=y
740# CONFIG_INPUT_UINPUT is not set
741# CONFIG_INPUT_GPIO_ROTARY_ENCODER is not set
742# CONFIG_INPUT_AD714X is not set
743# CONFIG_INPUT_ADXL34X is not set
744# CONFIG_INPUT_PCF8574 is not set
745
746#
747# Hardware I/O ports
748#
749# CONFIG_SERIO is not set
750# CONFIG_GAMEPORT is not set
751
752#
753# Character devices
754#
755CONFIG_BFIN_DMA_INTERFACE=m
756# CONFIG_BFIN_PPI is not set
757# CONFIG_BFIN_PPIFCD is not set
758# CONFIG_BFIN_SIMPLE_TIMER is not set
759# CONFIG_BFIN_SPI_ADC is not set
760# CONFIG_BFIN_SPORT is not set
761# CONFIG_BFIN_TWI_LCD is not set
762CONFIG_VT=y
763CONFIG_CONSOLE_TRANSLATIONS=y
764CONFIG_VT_CONSOLE=y
765CONFIG_HW_CONSOLE=y
766# CONFIG_VT_HW_CONSOLE_BINDING is not set
767# CONFIG_DEVKMEM is not set
768CONFIG_BFIN_JTAG_COMM=m
769# CONFIG_SERIAL_NONSTANDARD is not set
770
771#
772# Serial drivers
773#
774# CONFIG_SERIAL_8250 is not set
775
776#
777# Non-8250 serial port support
778#
779# CONFIG_SERIAL_MAX3100 is not set
780CONFIG_SERIAL_BFIN=y
781CONFIG_SERIAL_BFIN_CONSOLE=y
782CONFIG_SERIAL_BFIN_DMA=y
783# CONFIG_SERIAL_BFIN_PIO is not set
784CONFIG_SERIAL_BFIN_UART0=y
785# CONFIG_BFIN_UART0_CTSRTS is not set
786# CONFIG_SERIAL_BFIN_UART1 is not set
787CONFIG_SERIAL_CORE=y
788CONFIG_SERIAL_CORE_CONSOLE=y
789# CONFIG_SERIAL_BFIN_SPORT is not set
790CONFIG_UNIX98_PTYS=y
791# CONFIG_DEVPTS_MULTIPLE_INSTANCES is not set
792# CONFIG_LEGACY_PTYS is not set
793CONFIG_BFIN_OTP=y
794# CONFIG_BFIN_OTP_WRITE_ENABLE is not set
795# CONFIG_IPMI_HANDLER is not set
796# CONFIG_HW_RANDOM is not set
797# CONFIG_R3964 is not set
798# CONFIG_RAW_DRIVER is not set
799# CONFIG_TCG_TPM is not set
800CONFIG_I2C=y
801CONFIG_I2C_BOARDINFO=y
802CONFIG_I2C_COMPAT=y
803CONFIG_I2C_CHARDEV=y
804CONFIG_I2C_HELPER_AUTO=y
805
806#
807# I2C Hardware Bus support
808#
809
810#
811# I2C system bus drivers (mostly embedded / system-on-chip)
812#
813CONFIG_I2C_BLACKFIN_TWI=y
814CONFIG_I2C_BLACKFIN_TWI_CLK_KHZ=100
815# CONFIG_I2C_GPIO is not set
816# CONFIG_I2C_OCORES is not set
817# CONFIG_I2C_SIMTEC is not set
818
819#
820# External I2C/SMBus adapter drivers
821#
822# CONFIG_I2C_PARPORT_LIGHT is not set
823# CONFIG_I2C_TAOS_EVM is not set
824
825#
826# Other I2C/SMBus bus drivers
827#
828# CONFIG_I2C_PCA_PLATFORM is not set
829# CONFIG_I2C_STUB is not set
830
831#
832# Miscellaneous I2C Chip support
833#
834# CONFIG_DS1682 is not set
835# CONFIG_SENSORS_TSL2550 is not set
836# CONFIG_I2C_DEBUG_CORE is not set
837# CONFIG_I2C_DEBUG_ALGO is not set
838# CONFIG_I2C_DEBUG_BUS is not set
839# CONFIG_I2C_DEBUG_CHIP is not set
840CONFIG_SPI=y
841# CONFIG_SPI_DEBUG is not set
842CONFIG_SPI_MASTER=y
843
844#
845# SPI Master Controller Drivers
846#
847CONFIG_SPI_BFIN=y
848CONFIG_SPI_BFIN_LOCK=y
849# CONFIG_SPI_BFIN_SPORT is not set
850# CONFIG_SPI_BITBANG is not set
851# CONFIG_SPI_GPIO is not set
852
853#
854# SPI Protocol Masters
855#
856# CONFIG_SPI_SPIDEV is not set
857# CONFIG_SPI_TLE62X0 is not set
858
859#
860# PPS support
861#
862# CONFIG_PPS is not set
863CONFIG_ARCH_WANT_OPTIONAL_GPIOLIB=y
864CONFIG_GPIOLIB=y
865# CONFIG_DEBUG_GPIO is not set
866CONFIG_GPIO_SYSFS=y
867
868#
869# Memory mapped GPIO expanders:
870#
871
872#
873# I2C GPIO expanders:
874#
875# CONFIG_GPIO_MAX732X is not set
876# CONFIG_GPIO_PCA953X is not set
877# CONFIG_GPIO_PCF857X is not set
878# CONFIG_GPIO_ADP5588 is not set
879
880#
881# PCI GPIO expanders:
882#
883
884#
885# SPI GPIO expanders:
886#
887# CONFIG_GPIO_MAX7301 is not set
888# CONFIG_GPIO_MCP23S08 is not set
889# CONFIG_GPIO_MC33880 is not set
890
891#
892# AC97 GPIO expanders:
893#
894# CONFIG_W1 is not set
895# CONFIG_POWER_SUPPLY is not set
896# CONFIG_HWMON is not set
897# CONFIG_THERMAL is not set
898CONFIG_WATCHDOG=y
899# CONFIG_WATCHDOG_NOWAYOUT is not set
900
901#
902# Watchdog Device Drivers
903#
904# CONFIG_SOFT_WATCHDOG is not set
905CONFIG_BFIN_WDT=y
906CONFIG_SSB_POSSIBLE=y
907
908#
909# Sonics Silicon Backplane
910#
911# CONFIG_SSB is not set
912
913#
914# Multifunction device drivers
915#
916# CONFIG_MFD_CORE is not set
917# CONFIG_MFD_SM501 is not set
918# CONFIG_HTC_PASIC3 is not set
919# CONFIG_TPS65010 is not set
920# CONFIG_TWL4030_CORE is not set
921# CONFIG_MFD_TMIO is not set
922# CONFIG_PMIC_DA903X is not set
923# CONFIG_PMIC_ADP5520 is not set
924# CONFIG_MFD_WM8400 is not set
925# CONFIG_MFD_WM831X is not set
926# CONFIG_MFD_WM8350_I2C is not set
927# CONFIG_MFD_PCF50633 is not set
928# CONFIG_MFD_MC13783 is not set
929# CONFIG_AB3100_CORE is not set
930# CONFIG_EZX_PCAP is not set
931# CONFIG_REGULATOR is not set
932# CONFIG_MEDIA_SUPPORT is not set
933
934#
935# Graphics support
936#
937# CONFIG_VGASTATE is not set
938# CONFIG_VIDEO_OUTPUT_CONTROL is not set
939# CONFIG_FB is not set
940# CONFIG_BACKLIGHT_LCD_SUPPORT is not set
941
942#
943# Display device support
944#
945# CONFIG_DISPLAY_SUPPORT is not set
946
947#
948# Console display driver support
949#
950CONFIG_DUMMY_CONSOLE=y
951# CONFIG_SOUND is not set
952# CONFIG_HID_SUPPORT is not set
953# CONFIG_USB_SUPPORT is not set
954CONFIG_MMC=y
955CONFIG_MMC_DEBUG=y
956# CONFIG_MMC_UNSAFE_RESUME is not set
957
958#
959# MMC/SD/SDIO Card Drivers
960#
961CONFIG_MMC_BLOCK=y
962CONFIG_MMC_BLOCK_BOUNCE=y
963# CONFIG_SDIO_UART is not set
964# CONFIG_MMC_TEST is not set
965
966#
967# MMC/SD/SDIO Host Controller Drivers
968#
969# CONFIG_MMC_SDHCI is not set
970# CONFIG_MMC_AT91 is not set
971# CONFIG_MMC_ATMELMCI is not set
972CONFIG_MMC_SPI=y
973# CONFIG_SDH_BFIN is not set
974# CONFIG_MEMSTICK is not set
975# CONFIG_NEW_LEDS is not set
976# CONFIG_ACCESSIBILITY is not set
977CONFIG_RTC_LIB=y
978CONFIG_RTC_CLASS=y
979CONFIG_RTC_HCTOSYS=y
980CONFIG_RTC_HCTOSYS_DEVICE="rtc0"
981# CONFIG_RTC_DEBUG is not set
982
983#
984# RTC interfaces
985#
986CONFIG_RTC_INTF_SYSFS=y
987CONFIG_RTC_INTF_PROC=y
988CONFIG_RTC_INTF_DEV=y
989# CONFIG_RTC_INTF_DEV_UIE_EMUL is not set
990# CONFIG_RTC_DRV_TEST is not set
991
992#
993# I2C RTC drivers
994#
995# CONFIG_RTC_DRV_DS1307 is not set
996# CONFIG_RTC_DRV_DS1374 is not set
997# CONFIG_RTC_DRV_DS1672 is not set
998# CONFIG_RTC_DRV_MAX6900 is not set
999# CONFIG_RTC_DRV_RS5C372 is not set
1000# CONFIG_RTC_DRV_ISL1208 is not set
1001# CONFIG_RTC_DRV_X1205 is not set
1002# CONFIG_RTC_DRV_PCF8563 is not set
1003# CONFIG_RTC_DRV_PCF8583 is not set
1004# CONFIG_RTC_DRV_M41T80 is not set
1005# CONFIG_RTC_DRV_S35390A is not set
1006# CONFIG_RTC_DRV_FM3130 is not set
1007# CONFIG_RTC_DRV_RX8581 is not set
1008# CONFIG_RTC_DRV_RX8025 is not set
1009
1010#
1011# SPI RTC drivers
1012#
1013# CONFIG_RTC_DRV_M41T94 is not set
1014# CONFIG_RTC_DRV_DS1305 is not set
1015# CONFIG_RTC_DRV_DS1390 is not set
1016# CONFIG_RTC_DRV_MAX6902 is not set
1017# CONFIG_RTC_DRV_R9701 is not set
1018# CONFIG_RTC_DRV_RS5C348 is not set
1019# CONFIG_RTC_DRV_DS3234 is not set
1020# CONFIG_RTC_DRV_PCF2123 is not set
1021
1022#
1023# Platform RTC drivers
1024#
1025# CONFIG_RTC_DRV_DS1286 is not set
1026# CONFIG_RTC_DRV_DS1511 is not set
1027# CONFIG_RTC_DRV_DS1553 is not set
1028# CONFIG_RTC_DRV_DS1742 is not set
1029# CONFIG_RTC_DRV_STK17TA8 is not set
1030# CONFIG_RTC_DRV_M48T86 is not set
1031# CONFIG_RTC_DRV_M48T35 is not set
1032# CONFIG_RTC_DRV_M48T59 is not set
1033# CONFIG_RTC_DRV_BQ4802 is not set
1034# CONFIG_RTC_DRV_V3020 is not set
1035
1036#
1037# on-CPU RTC drivers
1038#
1039CONFIG_RTC_DRV_BFIN=y
1040# CONFIG_DMADEVICES is not set
1041# CONFIG_AUXDISPLAY is not set
1042# CONFIG_UIO is not set
1043
1044#
1045# TI VLYNQ
1046#
1047# CONFIG_STAGING is not set
1048
1049#
1050# Firmware Drivers
1051#
1052# CONFIG_FIRMWARE_MEMMAP is not set
1053# CONFIG_SIGMA is not set
1054
1055#
1056# File systems
1057#
1058CONFIG_EXT2_FS=y
1059# CONFIG_EXT2_FS_XATTR is not set
1060# CONFIG_EXT3_FS is not set
1061# CONFIG_EXT4_FS is not set
1062# CONFIG_REISERFS_FS is not set
1063# CONFIG_JFS_FS is not set
1064# CONFIG_FS_POSIX_ACL is not set
1065# CONFIG_XFS_FS is not set
1066# CONFIG_OCFS2_FS is not set
1067# CONFIG_BTRFS_FS is not set
1068# CONFIG_NILFS2_FS is not set
1069CONFIG_FILE_LOCKING=y
1070CONFIG_FSNOTIFY=y
1071# CONFIG_DNOTIFY is not set
1072CONFIG_INOTIFY=y
1073CONFIG_INOTIFY_USER=y
1074# CONFIG_QUOTA is not set
1075# CONFIG_AUTOFS_FS is not set
1076# CONFIG_AUTOFS4_FS is not set
1077# CONFIG_FUSE_FS is not set
1078
1079#
1080# Caches
1081#
1082# CONFIG_FSCACHE is not set
1083
1084#
1085# CD-ROM/DVD Filesystems
1086#
1087# CONFIG_ISO9660_FS is not set
1088# CONFIG_UDF_FS is not set
1089
1090#
1091# DOS/FAT/NT Filesystems
1092#
1093CONFIG_FAT_FS=m
1094# CONFIG_MSDOS_FS is not set
1095CONFIG_VFAT_FS=m
1096CONFIG_FAT_DEFAULT_CODEPAGE=437
1097CONFIG_FAT_DEFAULT_IOCHARSET="iso8859-1"
1098# CONFIG_NTFS_FS is not set
1099
1100#
1101# Pseudo filesystems
1102#
1103CONFIG_PROC_FS=y
1104CONFIG_PROC_SYSCTL=y
1105CONFIG_SYSFS=y
1106# CONFIG_HUGETLB_PAGE is not set
1107# CONFIG_CONFIGFS_FS is not set
1108# CONFIG_MISC_FILESYSTEMS is not set
1109CONFIG_NETWORK_FILESYSTEMS=y
1110CONFIG_NFS_FS=y
1111CONFIG_NFS_V3=y
1112# CONFIG_NFS_V3_ACL is not set
1113# CONFIG_NFS_V4 is not set
1114CONFIG_ROOT_NFS=y
1115# CONFIG_NFSD is not set
1116CONFIG_LOCKD=y
1117CONFIG_LOCKD_V4=y
1118CONFIG_NFS_COMMON=y
1119CONFIG_SUNRPC=y
1120# CONFIG_RPCSEC_GSS_KRB5 is not set
1121# CONFIG_RPCSEC_GSS_SPKM3 is not set
1122# CONFIG_SMB_FS is not set
1123# CONFIG_CIFS is not set
1124# CONFIG_NCP_FS is not set
1125# CONFIG_CODA_FS is not set
1126# CONFIG_AFS_FS is not set
1127
1128#
1129# Partition Types
1130#
1131# CONFIG_PARTITION_ADVANCED is not set
1132CONFIG_MSDOS_PARTITION=y
1133CONFIG_NLS=m
1134CONFIG_NLS_DEFAULT="iso8859-1"
1135CONFIG_NLS_CODEPAGE_437=m
1136# CONFIG_NLS_CODEPAGE_737 is not set
1137# CONFIG_NLS_CODEPAGE_775 is not set
1138# CONFIG_NLS_CODEPAGE_850 is not set
1139# CONFIG_NLS_CODEPAGE_852 is not set
1140# CONFIG_NLS_CODEPAGE_855 is not set
1141# CONFIG_NLS_CODEPAGE_857 is not set
1142# CONFIG_NLS_CODEPAGE_860 is not set
1143# CONFIG_NLS_CODEPAGE_861 is not set
1144# CONFIG_NLS_CODEPAGE_862 is not set
1145# CONFIG_NLS_CODEPAGE_863 is not set
1146# CONFIG_NLS_CODEPAGE_864 is not set
1147# CONFIG_NLS_CODEPAGE_865 is not set
1148# CONFIG_NLS_CODEPAGE_866 is not set
1149# CONFIG_NLS_CODEPAGE_869 is not set
1150# CONFIG_NLS_CODEPAGE_936 is not set
1151# CONFIG_NLS_CODEPAGE_950 is not set
1152# CONFIG_NLS_CODEPAGE_932 is not set
1153# CONFIG_NLS_CODEPAGE_949 is not set
1154# CONFIG_NLS_CODEPAGE_874 is not set
1155# CONFIG_NLS_ISO8859_8 is not set
1156# CONFIG_NLS_CODEPAGE_1250 is not set
1157# CONFIG_NLS_CODEPAGE_1251 is not set
1158# CONFIG_NLS_ASCII is not set
1159CONFIG_NLS_ISO8859_1=m
1160# CONFIG_NLS_ISO8859_2 is not set
1161# CONFIG_NLS_ISO8859_3 is not set
1162# CONFIG_NLS_ISO8859_4 is not set
1163# CONFIG_NLS_ISO8859_5 is not set
1164# CONFIG_NLS_ISO8859_6 is not set
1165# CONFIG_NLS_ISO8859_7 is not set
1166# CONFIG_NLS_ISO8859_9 is not set
1167# CONFIG_NLS_ISO8859_13 is not set
1168# CONFIG_NLS_ISO8859_14 is not set
1169# CONFIG_NLS_ISO8859_15 is not set
1170# CONFIG_NLS_KOI8_R is not set
1171# CONFIG_NLS_KOI8_U is not set
1172CONFIG_NLS_UTF8=m
1173# CONFIG_DLM is not set
1174
1175#
1176# Kernel hacking
1177#
1178# CONFIG_PRINTK_TIME is not set
1179CONFIG_ENABLE_WARN_DEPRECATED=y
1180CONFIG_ENABLE_MUST_CHECK=y
1181CONFIG_FRAME_WARN=1024
1182# CONFIG_MAGIC_SYSRQ is not set
1183# CONFIG_STRIP_ASM_SYMS is not set
1184# CONFIG_UNUSED_SYMBOLS is not set
1185CONFIG_DEBUG_FS=y
1186# CONFIG_HEADERS_CHECK is not set
1187CONFIG_DEBUG_SECTION_MISMATCH=y
1188CONFIG_DEBUG_KERNEL=y
1189CONFIG_DEBUG_SHIRQ=y
1190CONFIG_DETECT_SOFTLOCKUP=y
1191# CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC is not set
1192CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC_VALUE=0
1193CONFIG_DETECT_HUNG_TASK=y
1194# CONFIG_BOOTPARAM_HUNG_TASK_PANIC is not set
1195CONFIG_BOOTPARAM_HUNG_TASK_PANIC_VALUE=0
1196CONFIG_SCHED_DEBUG=y
1197# CONFIG_SCHEDSTATS is not set
1198# CONFIG_TIMER_STATS is not set
1199# CONFIG_DEBUG_OBJECTS is not set
1200# CONFIG_DEBUG_SLAB is not set
1201# CONFIG_DEBUG_SPINLOCK is not set
1202# CONFIG_DEBUG_MUTEXES is not set
1203# CONFIG_DEBUG_LOCK_ALLOC is not set
1204# CONFIG_PROVE_LOCKING is not set
1205# CONFIG_LOCK_STAT is not set
1206# CONFIG_DEBUG_SPINLOCK_SLEEP is not set
1207# CONFIG_DEBUG_LOCKING_API_SELFTESTS is not set
1208# CONFIG_DEBUG_KOBJECT is not set
1209CONFIG_DEBUG_BUGVERBOSE=y
1210CONFIG_DEBUG_INFO=y
1211# CONFIG_DEBUG_VM is not set
1212# CONFIG_DEBUG_NOMMU_REGIONS is not set
1213# CONFIG_DEBUG_WRITECOUNT is not set
1214# CONFIG_DEBUG_MEMORY_INIT is not set
1215# CONFIG_DEBUG_LIST is not set
1216# CONFIG_DEBUG_SG is not set
1217# CONFIG_DEBUG_NOTIFIERS is not set
1218# CONFIG_DEBUG_CREDENTIALS is not set
1219# CONFIG_FRAME_POINTER is not set
1220# CONFIG_BOOT_PRINTK_DELAY is not set
1221# CONFIG_RCU_TORTURE_TEST is not set
1222# CONFIG_RCU_CPU_STALL_DETECTOR is not set
1223# CONFIG_BACKTRACE_SELF_TEST is not set
1224# CONFIG_DEBUG_BLOCK_EXT_DEVT is not set
1225# CONFIG_DEBUG_FORCE_WEAK_PER_CPU is not set
1226# CONFIG_FAULT_INJECTION is not set
1227# CONFIG_PAGE_POISONING is not set
1228CONFIG_HAVE_FUNCTION_TRACER=y
1229CONFIG_HAVE_FUNCTION_GRAPH_TRACER=y
1230CONFIG_TRACING_SUPPORT=y
1231# CONFIG_FTRACE is not set
1232# CONFIG_DYNAMIC_DEBUG is not set
1233# CONFIG_SAMPLES is not set
1234CONFIG_HAVE_ARCH_KGDB=y
1235# CONFIG_KGDB is not set
1236# CONFIG_DEBUG_STACKOVERFLOW is not set
1237# CONFIG_DEBUG_STACK_USAGE is not set
1238CONFIG_DEBUG_VERBOSE=y
1239CONFIG_DEBUG_MMRS=y
1240CONFIG_DEBUG_HWERR=y
1241CONFIG_EXACT_HWERR=y
1242CONFIG_DEBUG_DOUBLEFAULT=y
1243CONFIG_DEBUG_DOUBLEFAULT_PRINT=y
1244# CONFIG_DEBUG_DOUBLEFAULT_RESET is not set
1245# CONFIG_DEBUG_ICACHE_CHECK is not set
1246CONFIG_DEBUG_HUNT_FOR_ZERO=y
1247CONFIG_DEBUG_BFIN_HWTRACE_ON=y
1248# CONFIG_DEBUG_BFIN_HWTRACE_COMPRESSION_OFF is not set
1249CONFIG_DEBUG_BFIN_HWTRACE_COMPRESSION_ONE=y
1250# CONFIG_DEBUG_BFIN_HWTRACE_COMPRESSION_TWO is not set
1251CONFIG_DEBUG_BFIN_HWTRACE_COMPRESSION=1
1252# CONFIG_DEBUG_BFIN_HWTRACE_EXPAND is not set
1253CONFIG_DEBUG_BFIN_NO_KERN_HWTRACE=y
1254CONFIG_EARLY_PRINTK=y
1255CONFIG_CPLB_INFO=y
1256CONFIG_ACCESS_CHECK=y
1257# CONFIG_BFIN_ISRAM_SELF_TEST is not set
1258
1259#
1260# Security options
1261#
1262# CONFIG_KEYS is not set
1263CONFIG_SECURITY=y
1264# CONFIG_SECURITYFS is not set
1265# CONFIG_SECURITY_NETWORK is not set
1266# CONFIG_SECURITY_PATH is not set
1267# CONFIG_SECURITY_FILE_CAPABILITIES is not set
1268# CONFIG_SECURITY_TOMOYO is not set
1269CONFIG_CRYPTO=y
1270
1271#
1272# Crypto core or helper
1273#
1274# CONFIG_CRYPTO_MANAGER is not set
1275# CONFIG_CRYPTO_MANAGER2 is not set
1276# CONFIG_CRYPTO_GF128MUL is not set
1277# CONFIG_CRYPTO_NULL is not set
1278# CONFIG_CRYPTO_CRYPTD is not set
1279# CONFIG_CRYPTO_AUTHENC is not set
1280# CONFIG_CRYPTO_TEST is not set
1281
1282#
1283# Authenticated Encryption with Associated Data
1284#
1285# CONFIG_CRYPTO_CCM is not set
1286# CONFIG_CRYPTO_GCM is not set
1287# CONFIG_CRYPTO_SEQIV is not set
1288
1289#
1290# Block modes
1291#
1292# CONFIG_CRYPTO_CBC is not set
1293# CONFIG_CRYPTO_CTR is not set
1294# CONFIG_CRYPTO_CTS is not set
1295# CONFIG_CRYPTO_ECB is not set
1296# CONFIG_CRYPTO_LRW is not set
1297# CONFIG_CRYPTO_PCBC is not set
1298# CONFIG_CRYPTO_XTS is not set
1299
1300#
1301# Hash modes
1302#
1303# CONFIG_CRYPTO_HMAC is not set
1304# CONFIG_CRYPTO_XCBC is not set
1305# CONFIG_CRYPTO_VMAC is not set
1306
1307#
1308# Digest
1309#
1310# CONFIG_CRYPTO_CRC32C is not set
1311# CONFIG_CRYPTO_GHASH is not set
1312# CONFIG_CRYPTO_MD4 is not set
1313# CONFIG_CRYPTO_MD5 is not set
1314# CONFIG_CRYPTO_MICHAEL_MIC is not set
1315# CONFIG_CRYPTO_RMD128 is not set
1316# CONFIG_CRYPTO_RMD160 is not set
1317# CONFIG_CRYPTO_RMD256 is not set
1318# CONFIG_CRYPTO_RMD320 is not set
1319# CONFIG_CRYPTO_SHA1 is not set
1320# CONFIG_CRYPTO_SHA256 is not set
1321# CONFIG_CRYPTO_SHA512 is not set
1322# CONFIG_CRYPTO_TGR192 is not set
1323# CONFIG_CRYPTO_WP512 is not set
1324
1325#
1326# Ciphers
1327#
1328# CONFIG_CRYPTO_AES is not set
1329# CONFIG_CRYPTO_ANUBIS is not set
1330# CONFIG_CRYPTO_ARC4 is not set
1331# CONFIG_CRYPTO_BLOWFISH is not set
1332# CONFIG_CRYPTO_CAMELLIA is not set
1333# CONFIG_CRYPTO_CAST5 is not set
1334# CONFIG_CRYPTO_CAST6 is not set
1335# CONFIG_CRYPTO_DES is not set
1336# CONFIG_CRYPTO_FCRYPT is not set
1337# CONFIG_CRYPTO_KHAZAD is not set
1338# CONFIG_CRYPTO_SALSA20 is not set
1339# CONFIG_CRYPTO_SEED is not set
1340# CONFIG_CRYPTO_SERPENT is not set
1341# CONFIG_CRYPTO_TEA is not set
1342# CONFIG_CRYPTO_TWOFISH is not set
1343
1344#
1345# Compression
1346#
1347# CONFIG_CRYPTO_DEFLATE is not set
1348# CONFIG_CRYPTO_ZLIB is not set
1349# CONFIG_CRYPTO_LZO is not set
1350
1351#
1352# Random Number Generation
1353#
1354# CONFIG_CRYPTO_ANSI_CPRNG is not set
1355CONFIG_CRYPTO_HW=y
1356# CONFIG_BINARY_PRINTF is not set
1357
1358#
1359# Library routines
1360#
1361CONFIG_BITREVERSE=y
1362CONFIG_GENERIC_FIND_LAST_BIT=y
1363CONFIG_CRC_CCITT=m
1364# CONFIG_CRC16 is not set
1365# CONFIG_CRC_T10DIF is not set
1366CONFIG_CRC_ITU_T=y
1367CONFIG_CRC32=y
1368CONFIG_CRC7=y
1369# CONFIG_LIBCRC32C is not set
1370CONFIG_ZLIB_INFLATE=y
1371CONFIG_DECOMPRESS_LZMA=y
1372CONFIG_HAS_IOMEM=y
1373CONFIG_HAS_IOPORT=y
1374CONFIG_HAS_DMA=y
1375CONFIG_NLATTR=y
diff --git a/arch/blackfin/include/asm/bfin-lq035q1.h b/arch/blackfin/include/asm/bfin-lq035q1.h
index 57bc21ac2296..836895156b5b 100644
--- a/arch/blackfin/include/asm/bfin-lq035q1.h
+++ b/arch/blackfin/include/asm/bfin-lq035q1.h
@@ -8,6 +8,9 @@
8#ifndef BFIN_LQ035Q1_H 8#ifndef BFIN_LQ035Q1_H
9#define BFIN_LQ035Q1_H 9#define BFIN_LQ035Q1_H
10 10
11/*
12 * LCD Modes
13 */
11#define LQ035_RL (0 << 8) /* Right -> Left Scan */ 14#define LQ035_RL (0 << 8) /* Right -> Left Scan */
12#define LQ035_LR (1 << 8) /* Left -> Right Scan */ 15#define LQ035_LR (1 << 8) /* Left -> Right Scan */
13#define LQ035_TB (1 << 9) /* Top -> Botton Scan */ 16#define LQ035_TB (1 << 9) /* Top -> Botton Scan */
@@ -17,9 +20,18 @@
17#define LQ035_NORM (1 << 13) /* Reversal */ 20#define LQ035_NORM (1 << 13) /* Reversal */
18#define LQ035_REV (0 << 13) /* Reversal */ 21#define LQ035_REV (0 << 13) /* Reversal */
19 22
23/*
24 * PPI Modes
25 */
26
27#define USE_RGB565_16_BIT_PPI 1
28#define USE_RGB565_8_BIT_PPI 2
29#define USE_RGB888_8_BIT_PPI 3
30
20struct bfin_lq035q1fb_disp_info { 31struct bfin_lq035q1fb_disp_info {
21 32
22 unsigned mode; 33 unsigned mode;
34 unsigned ppi_mode;
23 /* GPIOs */ 35 /* GPIOs */
24 int use_bl; 36 int use_bl;
25 unsigned gpio_bl; 37 unsigned gpio_bl;
diff --git a/arch/blackfin/include/asm/bfin_can.h b/arch/blackfin/include/asm/bfin_can.h
new file mode 100644
index 000000000000..eec0076a385b
--- /dev/null
+++ b/arch/blackfin/include/asm/bfin_can.h
@@ -0,0 +1,725 @@
1/*
2 * bfin_can.h - interface to Blackfin CANs
3 *
4 * Copyright 2004-2009 Analog Devices Inc.
5 *
6 * Licensed under the GPL-2 or later.
7 */
8
9#ifndef __ASM_BFIN_CAN_H__
10#define __ASM_BFIN_CAN_H__
11
12/*
13 * transmit and receive channels
14 */
15#define TRANSMIT_CHL 24
16#define RECEIVE_STD_CHL 0
17#define RECEIVE_EXT_CHL 4
18#define RECEIVE_RTR_CHL 8
19#define RECEIVE_EXT_RTR_CHL 12
20#define MAX_CHL_NUMBER 32
21
22/*
23 * All Blackfin system MMRs are padded to 32bits even if the register
24 * itself is only 16bits. So use a helper macro to streamline this.
25 */
26#define __BFP(m) u16 m; u16 __pad_##m
27
28/*
29 * bfin can registers layout
30 */
31struct bfin_can_mask_regs {
32 __BFP(aml);
33 __BFP(amh);
34};
35
36struct bfin_can_channel_regs {
37 u16 data[8];
38 __BFP(dlc);
39 __BFP(tsv);
40 __BFP(id0);
41 __BFP(id1);
42};
43
44struct bfin_can_regs {
45 /*
46 * global control and status registers
47 */
48 __BFP(mc1); /* offset 0x00 */
49 __BFP(md1); /* offset 0x04 */
50 __BFP(trs1); /* offset 0x08 */
51 __BFP(trr1); /* offset 0x0c */
52 __BFP(ta1); /* offset 0x10 */
53 __BFP(aa1); /* offset 0x14 */
54 __BFP(rmp1); /* offset 0x18 */
55 __BFP(rml1); /* offset 0x1c */
56 __BFP(mbtif1); /* offset 0x20 */
57 __BFP(mbrif1); /* offset 0x24 */
58 __BFP(mbim1); /* offset 0x28 */
59 __BFP(rfh1); /* offset 0x2c */
60 __BFP(opss1); /* offset 0x30 */
61 u32 __pad1[3];
62 __BFP(mc2); /* offset 0x40 */
63 __BFP(md2); /* offset 0x44 */
64 __BFP(trs2); /* offset 0x48 */
65 __BFP(trr2); /* offset 0x4c */
66 __BFP(ta2); /* offset 0x50 */
67 __BFP(aa2); /* offset 0x54 */
68 __BFP(rmp2); /* offset 0x58 */
69 __BFP(rml2); /* offset 0x5c */
70 __BFP(mbtif2); /* offset 0x60 */
71 __BFP(mbrif2); /* offset 0x64 */
72 __BFP(mbim2); /* offset 0x68 */
73 __BFP(rfh2); /* offset 0x6c */
74 __BFP(opss2); /* offset 0x70 */
75 u32 __pad2[3];
76 __BFP(clock); /* offset 0x80 */
77 __BFP(timing); /* offset 0x84 */
78 __BFP(debug); /* offset 0x88 */
79 __BFP(status); /* offset 0x8c */
80 __BFP(cec); /* offset 0x90 */
81 __BFP(gis); /* offset 0x94 */
82 __BFP(gim); /* offset 0x98 */
83 __BFP(gif); /* offset 0x9c */
84 __BFP(control); /* offset 0xa0 */
85 __BFP(intr); /* offset 0xa4 */
86 u32 __pad3[1];
87 __BFP(mbtd); /* offset 0xac */
88 __BFP(ewr); /* offset 0xb0 */
89 __BFP(esr); /* offset 0xb4 */
90 u32 __pad4[2];
91 __BFP(ucreg); /* offset 0xc0 */
92 __BFP(uccnt); /* offset 0xc4 */
93 __BFP(ucrc); /* offset 0xc8 */
94 __BFP(uccnf); /* offset 0xcc */
95 u32 __pad5[12];
96
97 /*
98 * channel(mailbox) mask and message registers
99 */
100 struct bfin_can_mask_regs msk[MAX_CHL_NUMBER]; /* offset 0x100 */
101 struct bfin_can_channel_regs chl[MAX_CHL_NUMBER]; /* offset 0x200 */
102};
103
104#undef __BFP
105
106/* CAN_CONTROL Masks */
107#define SRS 0x0001 /* Software Reset */
108#define DNM 0x0002 /* Device Net Mode */
109#define ABO 0x0004 /* Auto-Bus On Enable */
110#define TXPRIO 0x0008 /* TX Priority (Priority/Mailbox*) */
111#define WBA 0x0010 /* Wake-Up On CAN Bus Activity Enable */
112#define SMR 0x0020 /* Sleep Mode Request */
113#define CSR 0x0040 /* CAN Suspend Mode Request */
114#define CCR 0x0080 /* CAN Configuration Mode Request */
115
116/* CAN_STATUS Masks */
117#define WT 0x0001 /* TX Warning Flag */
118#define WR 0x0002 /* RX Warning Flag */
119#define EP 0x0004 /* Error Passive Mode */
120#define EBO 0x0008 /* Error Bus Off Mode */
121#define SMA 0x0020 /* Sleep Mode Acknowledge */
122#define CSA 0x0040 /* Suspend Mode Acknowledge */
123#define CCA 0x0080 /* Configuration Mode Acknowledge */
124#define MBPTR 0x1F00 /* Mailbox Pointer */
125#define TRM 0x4000 /* Transmit Mode */
126#define REC 0x8000 /* Receive Mode */
127
128/* CAN_CLOCK Masks */
129#define BRP 0x03FF /* Bit-Rate Pre-Scaler */
130
131/* CAN_TIMING Masks */
132#define TSEG1 0x000F /* Time Segment 1 */
133#define TSEG2 0x0070 /* Time Segment 2 */
134#define SAM 0x0080 /* Sampling */
135#define SJW 0x0300 /* Synchronization Jump Width */
136
137/* CAN_DEBUG Masks */
138#define DEC 0x0001 /* Disable CAN Error Counters */
139#define DRI 0x0002 /* Disable CAN RX Input */
140#define DTO 0x0004 /* Disable CAN TX Output */
141#define DIL 0x0008 /* Disable CAN Internal Loop */
142#define MAA 0x0010 /* Mode Auto-Acknowledge Enable */
143#define MRB 0x0020 /* Mode Read Back Enable */
144#define CDE 0x8000 /* CAN Debug Enable */
145
146/* CAN_CEC Masks */
147#define RXECNT 0x00FF /* Receive Error Counter */
148#define TXECNT 0xFF00 /* Transmit Error Counter */
149
150/* CAN_INTR Masks */
151#define MBRIRQ 0x0001 /* Mailbox Receive Interrupt */
152#define MBTIRQ 0x0002 /* Mailbox Transmit Interrupt */
153#define GIRQ 0x0004 /* Global Interrupt */
154#define SMACK 0x0008 /* Sleep Mode Acknowledge */
155#define CANTX 0x0040 /* CAN TX Bus Value */
156#define CANRX 0x0080 /* CAN RX Bus Value */
157
158/* CAN_MBxx_ID1 and CAN_MBxx_ID0 Masks */
159#define DFC 0xFFFF /* Data Filtering Code (If Enabled) (ID0) */
160#define EXTID_LO 0xFFFF /* Lower 16 Bits of Extended Identifier (ID0) */
161#define EXTID_HI 0x0003 /* Upper 2 Bits of Extended Identifier (ID1) */
162#define BASEID 0x1FFC /* Base Identifier */
163#define IDE 0x2000 /* Identifier Extension */
164#define RTR 0x4000 /* Remote Frame Transmission Request */
165#define AME 0x8000 /* Acceptance Mask Enable */
166
167/* CAN_MBxx_TIMESTAMP Masks */
168#define TSV 0xFFFF /* Timestamp */
169
170/* CAN_MBxx_LENGTH Masks */
171#define DLC 0x000F /* Data Length Code */
172
173/* CAN_AMxxH and CAN_AMxxL Masks */
174#define DFM 0xFFFF /* Data Field Mask (If Enabled) (CAN_AMxxL) */
175#define EXTID_LO 0xFFFF /* Lower 16 Bits of Extended Identifier (CAN_AMxxL) */
176#define EXTID_HI 0x0003 /* Upper 2 Bits of Extended Identifier (CAN_AMxxH) */
177#define BASEID 0x1FFC /* Base Identifier */
178#define AMIDE 0x2000 /* Acceptance Mask ID Extension Enable */
179#define FMD 0x4000 /* Full Mask Data Field Enable */
180#define FDF 0x8000 /* Filter On Data Field Enable */
181
182/* CAN_MC1 Masks */
183#define MC0 0x0001 /* Enable Mailbox 0 */
184#define MC1 0x0002 /* Enable Mailbox 1 */
185#define MC2 0x0004 /* Enable Mailbox 2 */
186#define MC3 0x0008 /* Enable Mailbox 3 */
187#define MC4 0x0010 /* Enable Mailbox 4 */
188#define MC5 0x0020 /* Enable Mailbox 5 */
189#define MC6 0x0040 /* Enable Mailbox 6 */
190#define MC7 0x0080 /* Enable Mailbox 7 */
191#define MC8 0x0100 /* Enable Mailbox 8 */
192#define MC9 0x0200 /* Enable Mailbox 9 */
193#define MC10 0x0400 /* Enable Mailbox 10 */
194#define MC11 0x0800 /* Enable Mailbox 11 */
195#define MC12 0x1000 /* Enable Mailbox 12 */
196#define MC13 0x2000 /* Enable Mailbox 13 */
197#define MC14 0x4000 /* Enable Mailbox 14 */
198#define MC15 0x8000 /* Enable Mailbox 15 */
199
200/* CAN_MC2 Masks */
201#define MC16 0x0001 /* Enable Mailbox 16 */
202#define MC17 0x0002 /* Enable Mailbox 17 */
203#define MC18 0x0004 /* Enable Mailbox 18 */
204#define MC19 0x0008 /* Enable Mailbox 19 */
205#define MC20 0x0010 /* Enable Mailbox 20 */
206#define MC21 0x0020 /* Enable Mailbox 21 */
207#define MC22 0x0040 /* Enable Mailbox 22 */
208#define MC23 0x0080 /* Enable Mailbox 23 */
209#define MC24 0x0100 /* Enable Mailbox 24 */
210#define MC25 0x0200 /* Enable Mailbox 25 */
211#define MC26 0x0400 /* Enable Mailbox 26 */
212#define MC27 0x0800 /* Enable Mailbox 27 */
213#define MC28 0x1000 /* Enable Mailbox 28 */
214#define MC29 0x2000 /* Enable Mailbox 29 */
215#define MC30 0x4000 /* Enable Mailbox 30 */
216#define MC31 0x8000 /* Enable Mailbox 31 */
217
218/* CAN_MD1 Masks */
219#define MD0 0x0001 /* Enable Mailbox 0 For Receive */
220#define MD1 0x0002 /* Enable Mailbox 1 For Receive */
221#define MD2 0x0004 /* Enable Mailbox 2 For Receive */
222#define MD3 0x0008 /* Enable Mailbox 3 For Receive */
223#define MD4 0x0010 /* Enable Mailbox 4 For Receive */
224#define MD5 0x0020 /* Enable Mailbox 5 For Receive */
225#define MD6 0x0040 /* Enable Mailbox 6 For Receive */
226#define MD7 0x0080 /* Enable Mailbox 7 For Receive */
227#define MD8 0x0100 /* Enable Mailbox 8 For Receive */
228#define MD9 0x0200 /* Enable Mailbox 9 For Receive */
229#define MD10 0x0400 /* Enable Mailbox 10 For Receive */
230#define MD11 0x0800 /* Enable Mailbox 11 For Receive */
231#define MD12 0x1000 /* Enable Mailbox 12 For Receive */
232#define MD13 0x2000 /* Enable Mailbox 13 For Receive */
233#define MD14 0x4000 /* Enable Mailbox 14 For Receive */
234#define MD15 0x8000 /* Enable Mailbox 15 For Receive */
235
236/* CAN_MD2 Masks */
237#define MD16 0x0001 /* Enable Mailbox 16 For Receive */
238#define MD17 0x0002 /* Enable Mailbox 17 For Receive */
239#define MD18 0x0004 /* Enable Mailbox 18 For Receive */
240#define MD19 0x0008 /* Enable Mailbox 19 For Receive */
241#define MD20 0x0010 /* Enable Mailbox 20 For Receive */
242#define MD21 0x0020 /* Enable Mailbox 21 For Receive */
243#define MD22 0x0040 /* Enable Mailbox 22 For Receive */
244#define MD23 0x0080 /* Enable Mailbox 23 For Receive */
245#define MD24 0x0100 /* Enable Mailbox 24 For Receive */
246#define MD25 0x0200 /* Enable Mailbox 25 For Receive */
247#define MD26 0x0400 /* Enable Mailbox 26 For Receive */
248#define MD27 0x0800 /* Enable Mailbox 27 For Receive */
249#define MD28 0x1000 /* Enable Mailbox 28 For Receive */
250#define MD29 0x2000 /* Enable Mailbox 29 For Receive */
251#define MD30 0x4000 /* Enable Mailbox 30 For Receive */
252#define MD31 0x8000 /* Enable Mailbox 31 For Receive */
253
254/* CAN_RMP1 Masks */
255#define RMP0 0x0001 /* RX Message Pending In Mailbox 0 */
256#define RMP1 0x0002 /* RX Message Pending In Mailbox 1 */
257#define RMP2 0x0004 /* RX Message Pending In Mailbox 2 */
258#define RMP3 0x0008 /* RX Message Pending In Mailbox 3 */
259#define RMP4 0x0010 /* RX Message Pending In Mailbox 4 */
260#define RMP5 0x0020 /* RX Message Pending In Mailbox 5 */
261#define RMP6 0x0040 /* RX Message Pending In Mailbox 6 */
262#define RMP7 0x0080 /* RX Message Pending In Mailbox 7 */
263#define RMP8 0x0100 /* RX Message Pending In Mailbox 8 */
264#define RMP9 0x0200 /* RX Message Pending In Mailbox 9 */
265#define RMP10 0x0400 /* RX Message Pending In Mailbox 10 */
266#define RMP11 0x0800 /* RX Message Pending In Mailbox 11 */
267#define RMP12 0x1000 /* RX Message Pending In Mailbox 12 */
268#define RMP13 0x2000 /* RX Message Pending In Mailbox 13 */
269#define RMP14 0x4000 /* RX Message Pending In Mailbox 14 */
270#define RMP15 0x8000 /* RX Message Pending In Mailbox 15 */
271
272/* CAN_RMP2 Masks */
273#define RMP16 0x0001 /* RX Message Pending In Mailbox 16 */
274#define RMP17 0x0002 /* RX Message Pending In Mailbox 17 */
275#define RMP18 0x0004 /* RX Message Pending In Mailbox 18 */
276#define RMP19 0x0008 /* RX Message Pending In Mailbox 19 */
277#define RMP20 0x0010 /* RX Message Pending In Mailbox 20 */
278#define RMP21 0x0020 /* RX Message Pending In Mailbox 21 */
279#define RMP22 0x0040 /* RX Message Pending In Mailbox 22 */
280#define RMP23 0x0080 /* RX Message Pending In Mailbox 23 */
281#define RMP24 0x0100 /* RX Message Pending In Mailbox 24 */
282#define RMP25 0x0200 /* RX Message Pending In Mailbox 25 */
283#define RMP26 0x0400 /* RX Message Pending In Mailbox 26 */
284#define RMP27 0x0800 /* RX Message Pending In Mailbox 27 */
285#define RMP28 0x1000 /* RX Message Pending In Mailbox 28 */
286#define RMP29 0x2000 /* RX Message Pending In Mailbox 29 */
287#define RMP30 0x4000 /* RX Message Pending In Mailbox 30 */
288#define RMP31 0x8000 /* RX Message Pending In Mailbox 31 */
289
290/* CAN_RML1 Masks */
291#define RML0 0x0001 /* RX Message Lost In Mailbox 0 */
292#define RML1 0x0002 /* RX Message Lost In Mailbox 1 */
293#define RML2 0x0004 /* RX Message Lost In Mailbox 2 */
294#define RML3 0x0008 /* RX Message Lost In Mailbox 3 */
295#define RML4 0x0010 /* RX Message Lost In Mailbox 4 */
296#define RML5 0x0020 /* RX Message Lost In Mailbox 5 */
297#define RML6 0x0040 /* RX Message Lost In Mailbox 6 */
298#define RML7 0x0080 /* RX Message Lost In Mailbox 7 */
299#define RML8 0x0100 /* RX Message Lost In Mailbox 8 */
300#define RML9 0x0200 /* RX Message Lost In Mailbox 9 */
301#define RML10 0x0400 /* RX Message Lost In Mailbox 10 */
302#define RML11 0x0800 /* RX Message Lost In Mailbox 11 */
303#define RML12 0x1000 /* RX Message Lost In Mailbox 12 */
304#define RML13 0x2000 /* RX Message Lost In Mailbox 13 */
305#define RML14 0x4000 /* RX Message Lost In Mailbox 14 */
306#define RML15 0x8000 /* RX Message Lost In Mailbox 15 */
307
308/* CAN_RML2 Masks */
309#define RML16 0x0001 /* RX Message Lost In Mailbox 16 */
310#define RML17 0x0002 /* RX Message Lost In Mailbox 17 */
311#define RML18 0x0004 /* RX Message Lost In Mailbox 18 */
312#define RML19 0x0008 /* RX Message Lost In Mailbox 19 */
313#define RML20 0x0010 /* RX Message Lost In Mailbox 20 */
314#define RML21 0x0020 /* RX Message Lost In Mailbox 21 */
315#define RML22 0x0040 /* RX Message Lost In Mailbox 22 */
316#define RML23 0x0080 /* RX Message Lost In Mailbox 23 */
317#define RML24 0x0100 /* RX Message Lost In Mailbox 24 */
318#define RML25 0x0200 /* RX Message Lost In Mailbox 25 */
319#define RML26 0x0400 /* RX Message Lost In Mailbox 26 */
320#define RML27 0x0800 /* RX Message Lost In Mailbox 27 */
321#define RML28 0x1000 /* RX Message Lost In Mailbox 28 */
322#define RML29 0x2000 /* RX Message Lost In Mailbox 29 */
323#define RML30 0x4000 /* RX Message Lost In Mailbox 30 */
324#define RML31 0x8000 /* RX Message Lost In Mailbox 31 */
325
326/* CAN_OPSS1 Masks */
327#define OPSS0 0x0001 /* Enable RX Overwrite Protection or TX Single-Shot For Mailbox 0 */
328#define OPSS1 0x0002 /* Enable RX Overwrite Protection or TX Single-Shot For Mailbox 1 */
329#define OPSS2 0x0004 /* Enable RX Overwrite Protection or TX Single-Shot For Mailbox 2 */
330#define OPSS3 0x0008 /* Enable RX Overwrite Protection or TX Single-Shot For Mailbox 3 */
331#define OPSS4 0x0010 /* Enable RX Overwrite Protection or TX Single-Shot For Mailbox 4 */
332#define OPSS5 0x0020 /* Enable RX Overwrite Protection or TX Single-Shot For Mailbox 5 */
333#define OPSS6 0x0040 /* Enable RX Overwrite Protection or TX Single-Shot For Mailbox 6 */
334#define OPSS7 0x0080 /* Enable RX Overwrite Protection or TX Single-Shot For Mailbox 7 */
335#define OPSS8 0x0100 /* Enable RX Overwrite Protection or TX Single-Shot For Mailbox 8 */
336#define OPSS9 0x0200 /* Enable RX Overwrite Protection or TX Single-Shot For Mailbox 9 */
337#define OPSS10 0x0400 /* Enable RX Overwrite Protection or TX Single-Shot For Mailbox 10 */
338#define OPSS11 0x0800 /* Enable RX Overwrite Protection or TX Single-Shot For Mailbox 11 */
339#define OPSS12 0x1000 /* Enable RX Overwrite Protection or TX Single-Shot For Mailbox 12 */
340#define OPSS13 0x2000 /* Enable RX Overwrite Protection or TX Single-Shot For Mailbox 13 */
341#define OPSS14 0x4000 /* Enable RX Overwrite Protection or TX Single-Shot For Mailbox 14 */
342#define OPSS15 0x8000 /* Enable RX Overwrite Protection or TX Single-Shot For Mailbox 15 */
343
344/* CAN_OPSS2 Masks */
345#define OPSS16 0x0001 /* Enable RX Overwrite Protection or TX Single-Shot For Mailbox 16 */
346#define OPSS17 0x0002 /* Enable RX Overwrite Protection or TX Single-Shot For Mailbox 17 */
347#define OPSS18 0x0004 /* Enable RX Overwrite Protection or TX Single-Shot For Mailbox 18 */
348#define OPSS19 0x0008 /* Enable RX Overwrite Protection or TX Single-Shot For Mailbox 19 */
349#define OPSS20 0x0010 /* Enable RX Overwrite Protection or TX Single-Shot For Mailbox 20 */
350#define OPSS21 0x0020 /* Enable RX Overwrite Protection or TX Single-Shot For Mailbox 21 */
351#define OPSS22 0x0040 /* Enable RX Overwrite Protection or TX Single-Shot For Mailbox 22 */
352#define OPSS23 0x0080 /* Enable RX Overwrite Protection or TX Single-Shot For Mailbox 23 */
353#define OPSS24 0x0100 /* Enable RX Overwrite Protection or TX Single-Shot For Mailbox 24 */
354#define OPSS25 0x0200 /* Enable RX Overwrite Protection or TX Single-Shot For Mailbox 25 */
355#define OPSS26 0x0400 /* Enable RX Overwrite Protection or TX Single-Shot For Mailbox 26 */
356#define OPSS27 0x0800 /* Enable RX Overwrite Protection or TX Single-Shot For Mailbox 27 */
357#define OPSS28 0x1000 /* Enable RX Overwrite Protection or TX Single-Shot For Mailbox 28 */
358#define OPSS29 0x2000 /* Enable RX Overwrite Protection or TX Single-Shot For Mailbox 29 */
359#define OPSS30 0x4000 /* Enable RX Overwrite Protection or TX Single-Shot For Mailbox 30 */
360#define OPSS31 0x8000 /* Enable RX Overwrite Protection or TX Single-Shot For Mailbox 31 */
361
362/* CAN_TRR1 Masks */
363#define TRR0 0x0001 /* Deny But Don't Lock Access To Mailbox 0 */
364#define TRR1 0x0002 /* Deny But Don't Lock Access To Mailbox 1 */
365#define TRR2 0x0004 /* Deny But Don't Lock Access To Mailbox 2 */
366#define TRR3 0x0008 /* Deny But Don't Lock Access To Mailbox 3 */
367#define TRR4 0x0010 /* Deny But Don't Lock Access To Mailbox 4 */
368#define TRR5 0x0020 /* Deny But Don't Lock Access To Mailbox 5 */
369#define TRR6 0x0040 /* Deny But Don't Lock Access To Mailbox 6 */
370#define TRR7 0x0080 /* Deny But Don't Lock Access To Mailbox 7 */
371#define TRR8 0x0100 /* Deny But Don't Lock Access To Mailbox 8 */
372#define TRR9 0x0200 /* Deny But Don't Lock Access To Mailbox 9 */
373#define TRR10 0x0400 /* Deny But Don't Lock Access To Mailbox 10 */
374#define TRR11 0x0800 /* Deny But Don't Lock Access To Mailbox 11 */
375#define TRR12 0x1000 /* Deny But Don't Lock Access To Mailbox 12 */
376#define TRR13 0x2000 /* Deny But Don't Lock Access To Mailbox 13 */
377#define TRR14 0x4000 /* Deny But Don't Lock Access To Mailbox 14 */
378#define TRR15 0x8000 /* Deny But Don't Lock Access To Mailbox 15 */
379
380/* CAN_TRR2 Masks */
381#define TRR16 0x0001 /* Deny But Don't Lock Access To Mailbox 16 */
382#define TRR17 0x0002 /* Deny But Don't Lock Access To Mailbox 17 */
383#define TRR18 0x0004 /* Deny But Don't Lock Access To Mailbox 18 */
384#define TRR19 0x0008 /* Deny But Don't Lock Access To Mailbox 19 */
385#define TRR20 0x0010 /* Deny But Don't Lock Access To Mailbox 20 */
386#define TRR21 0x0020 /* Deny But Don't Lock Access To Mailbox 21 */
387#define TRR22 0x0040 /* Deny But Don't Lock Access To Mailbox 22 */
388#define TRR23 0x0080 /* Deny But Don't Lock Access To Mailbox 23 */
389#define TRR24 0x0100 /* Deny But Don't Lock Access To Mailbox 24 */
390#define TRR25 0x0200 /* Deny But Don't Lock Access To Mailbox 25 */
391#define TRR26 0x0400 /* Deny But Don't Lock Access To Mailbox 26 */
392#define TRR27 0x0800 /* Deny But Don't Lock Access To Mailbox 27 */
393#define TRR28 0x1000 /* Deny But Don't Lock Access To Mailbox 28 */
394#define TRR29 0x2000 /* Deny But Don't Lock Access To Mailbox 29 */
395#define TRR30 0x4000 /* Deny But Don't Lock Access To Mailbox 30 */
396#define TRR31 0x8000 /* Deny But Don't Lock Access To Mailbox 31 */
397
398/* CAN_TRS1 Masks */
399#define TRS0 0x0001 /* Remote Frame Request For Mailbox 0 */
400#define TRS1 0x0002 /* Remote Frame Request For Mailbox 1 */
401#define TRS2 0x0004 /* Remote Frame Request For Mailbox 2 */
402#define TRS3 0x0008 /* Remote Frame Request For Mailbox 3 */
403#define TRS4 0x0010 /* Remote Frame Request For Mailbox 4 */
404#define TRS5 0x0020 /* Remote Frame Request For Mailbox 5 */
405#define TRS6 0x0040 /* Remote Frame Request For Mailbox 6 */
406#define TRS7 0x0080 /* Remote Frame Request For Mailbox 7 */
407#define TRS8 0x0100 /* Remote Frame Request For Mailbox 8 */
408#define TRS9 0x0200 /* Remote Frame Request For Mailbox 9 */
409#define TRS10 0x0400 /* Remote Frame Request For Mailbox 10 */
410#define TRS11 0x0800 /* Remote Frame Request For Mailbox 11 */
411#define TRS12 0x1000 /* Remote Frame Request For Mailbox 12 */
412#define TRS13 0x2000 /* Remote Frame Request For Mailbox 13 */
413#define TRS14 0x4000 /* Remote Frame Request For Mailbox 14 */
414#define TRS15 0x8000 /* Remote Frame Request For Mailbox 15 */
415
416/* CAN_TRS2 Masks */
417#define TRS16 0x0001 /* Remote Frame Request For Mailbox 16 */
418#define TRS17 0x0002 /* Remote Frame Request For Mailbox 17 */
419#define TRS18 0x0004 /* Remote Frame Request For Mailbox 18 */
420#define TRS19 0x0008 /* Remote Frame Request For Mailbox 19 */
421#define TRS20 0x0010 /* Remote Frame Request For Mailbox 20 */
422#define TRS21 0x0020 /* Remote Frame Request For Mailbox 21 */
423#define TRS22 0x0040 /* Remote Frame Request For Mailbox 22 */
424#define TRS23 0x0080 /* Remote Frame Request For Mailbox 23 */
425#define TRS24 0x0100 /* Remote Frame Request For Mailbox 24 */
426#define TRS25 0x0200 /* Remote Frame Request For Mailbox 25 */
427#define TRS26 0x0400 /* Remote Frame Request For Mailbox 26 */
428#define TRS27 0x0800 /* Remote Frame Request For Mailbox 27 */
429#define TRS28 0x1000 /* Remote Frame Request For Mailbox 28 */
430#define TRS29 0x2000 /* Remote Frame Request For Mailbox 29 */
431#define TRS30 0x4000 /* Remote Frame Request For Mailbox 30 */
432#define TRS31 0x8000 /* Remote Frame Request For Mailbox 31 */
433
434/* CAN_AA1 Masks */
435#define AA0 0x0001 /* Aborted Message In Mailbox 0 */
436#define AA1 0x0002 /* Aborted Message In Mailbox 1 */
437#define AA2 0x0004 /* Aborted Message In Mailbox 2 */
438#define AA3 0x0008 /* Aborted Message In Mailbox 3 */
439#define AA4 0x0010 /* Aborted Message In Mailbox 4 */
440#define AA5 0x0020 /* Aborted Message In Mailbox 5 */
441#define AA6 0x0040 /* Aborted Message In Mailbox 6 */
442#define AA7 0x0080 /* Aborted Message In Mailbox 7 */
443#define AA8 0x0100 /* Aborted Message In Mailbox 8 */
444#define AA9 0x0200 /* Aborted Message In Mailbox 9 */
445#define AA10 0x0400 /* Aborted Message In Mailbox 10 */
446#define AA11 0x0800 /* Aborted Message In Mailbox 11 */
447#define AA12 0x1000 /* Aborted Message In Mailbox 12 */
448#define AA13 0x2000 /* Aborted Message In Mailbox 13 */
449#define AA14 0x4000 /* Aborted Message In Mailbox 14 */
450#define AA15 0x8000 /* Aborted Message In Mailbox 15 */
451
452/* CAN_AA2 Masks */
453#define AA16 0x0001 /* Aborted Message In Mailbox 16 */
454#define AA17 0x0002 /* Aborted Message In Mailbox 17 */
455#define AA18 0x0004 /* Aborted Message In Mailbox 18 */
456#define AA19 0x0008 /* Aborted Message In Mailbox 19 */
457#define AA20 0x0010 /* Aborted Message In Mailbox 20 */
458#define AA21 0x0020 /* Aborted Message In Mailbox 21 */
459#define AA22 0x0040 /* Aborted Message In Mailbox 22 */
460#define AA23 0x0080 /* Aborted Message In Mailbox 23 */
461#define AA24 0x0100 /* Aborted Message In Mailbox 24 */
462#define AA25 0x0200 /* Aborted Message In Mailbox 25 */
463#define AA26 0x0400 /* Aborted Message In Mailbox 26 */
464#define AA27 0x0800 /* Aborted Message In Mailbox 27 */
465#define AA28 0x1000 /* Aborted Message In Mailbox 28 */
466#define AA29 0x2000 /* Aborted Message In Mailbox 29 */
467#define AA30 0x4000 /* Aborted Message In Mailbox 30 */
468#define AA31 0x8000 /* Aborted Message In Mailbox 31 */
469
470/* CAN_TA1 Masks */
471#define TA0 0x0001 /* Transmit Successful From Mailbox 0 */
472#define TA1 0x0002 /* Transmit Successful From Mailbox 1 */
473#define TA2 0x0004 /* Transmit Successful From Mailbox 2 */
474#define TA3 0x0008 /* Transmit Successful From Mailbox 3 */
475#define TA4 0x0010 /* Transmit Successful From Mailbox 4 */
476#define TA5 0x0020 /* Transmit Successful From Mailbox 5 */
477#define TA6 0x0040 /* Transmit Successful From Mailbox 6 */
478#define TA7 0x0080 /* Transmit Successful From Mailbox 7 */
479#define TA8 0x0100 /* Transmit Successful From Mailbox 8 */
480#define TA9 0x0200 /* Transmit Successful From Mailbox 9 */
481#define TA10 0x0400 /* Transmit Successful From Mailbox 10 */
482#define TA11 0x0800 /* Transmit Successful From Mailbox 11 */
483#define TA12 0x1000 /* Transmit Successful From Mailbox 12 */
484#define TA13 0x2000 /* Transmit Successful From Mailbox 13 */
485#define TA14 0x4000 /* Transmit Successful From Mailbox 14 */
486#define TA15 0x8000 /* Transmit Successful From Mailbox 15 */
487
488/* CAN_TA2 Masks */
489#define TA16 0x0001 /* Transmit Successful From Mailbox 16 */
490#define TA17 0x0002 /* Transmit Successful From Mailbox 17 */
491#define TA18 0x0004 /* Transmit Successful From Mailbox 18 */
492#define TA19 0x0008 /* Transmit Successful From Mailbox 19 */
493#define TA20 0x0010 /* Transmit Successful From Mailbox 20 */
494#define TA21 0x0020 /* Transmit Successful From Mailbox 21 */
495#define TA22 0x0040 /* Transmit Successful From Mailbox 22 */
496#define TA23 0x0080 /* Transmit Successful From Mailbox 23 */
497#define TA24 0x0100 /* Transmit Successful From Mailbox 24 */
498#define TA25 0x0200 /* Transmit Successful From Mailbox 25 */
499#define TA26 0x0400 /* Transmit Successful From Mailbox 26 */
500#define TA27 0x0800 /* Transmit Successful From Mailbox 27 */
501#define TA28 0x1000 /* Transmit Successful From Mailbox 28 */
502#define TA29 0x2000 /* Transmit Successful From Mailbox 29 */
503#define TA30 0x4000 /* Transmit Successful From Mailbox 30 */
504#define TA31 0x8000 /* Transmit Successful From Mailbox 31 */
505
506/* CAN_MBTD Masks */
507#define TDPTR 0x001F /* Mailbox To Temporarily Disable */
508#define TDA 0x0040 /* Temporary Disable Acknowledge */
509#define TDR 0x0080 /* Temporary Disable Request */
510
511/* CAN_RFH1 Masks */
512#define RFH0 0x0001 /* Enable Automatic Remote Frame Handling For Mailbox 0 */
513#define RFH1 0x0002 /* Enable Automatic Remote Frame Handling For Mailbox 1 */
514#define RFH2 0x0004 /* Enable Automatic Remote Frame Handling For Mailbox 2 */
515#define RFH3 0x0008 /* Enable Automatic Remote Frame Handling For Mailbox 3 */
516#define RFH4 0x0010 /* Enable Automatic Remote Frame Handling For Mailbox 4 */
517#define RFH5 0x0020 /* Enable Automatic Remote Frame Handling For Mailbox 5 */
518#define RFH6 0x0040 /* Enable Automatic Remote Frame Handling For Mailbox 6 */
519#define RFH7 0x0080 /* Enable Automatic Remote Frame Handling For Mailbox 7 */
520#define RFH8 0x0100 /* Enable Automatic Remote Frame Handling For Mailbox 8 */
521#define RFH9 0x0200 /* Enable Automatic Remote Frame Handling For Mailbox 9 */
522#define RFH10 0x0400 /* Enable Automatic Remote Frame Handling For Mailbox 10 */
523#define RFH11 0x0800 /* Enable Automatic Remote Frame Handling For Mailbox 11 */
524#define RFH12 0x1000 /* Enable Automatic Remote Frame Handling For Mailbox 12 */
525#define RFH13 0x2000 /* Enable Automatic Remote Frame Handling For Mailbox 13 */
526#define RFH14 0x4000 /* Enable Automatic Remote Frame Handling For Mailbox 14 */
527#define RFH15 0x8000 /* Enable Automatic Remote Frame Handling For Mailbox 15 */
528
529/* CAN_RFH2 Masks */
530#define RFH16 0x0001 /* Enable Automatic Remote Frame Handling For Mailbox 16 */
531#define RFH17 0x0002 /* Enable Automatic Remote Frame Handling For Mailbox 17 */
532#define RFH18 0x0004 /* Enable Automatic Remote Frame Handling For Mailbox 18 */
533#define RFH19 0x0008 /* Enable Automatic Remote Frame Handling For Mailbox 19 */
534#define RFH20 0x0010 /* Enable Automatic Remote Frame Handling For Mailbox 20 */
535#define RFH21 0x0020 /* Enable Automatic Remote Frame Handling For Mailbox 21 */
536#define RFH22 0x0040 /* Enable Automatic Remote Frame Handling For Mailbox 22 */
537#define RFH23 0x0080 /* Enable Automatic Remote Frame Handling For Mailbox 23 */
538#define RFH24 0x0100 /* Enable Automatic Remote Frame Handling For Mailbox 24 */
539#define RFH25 0x0200 /* Enable Automatic Remote Frame Handling For Mailbox 25 */
540#define RFH26 0x0400 /* Enable Automatic Remote Frame Handling For Mailbox 26 */
541#define RFH27 0x0800 /* Enable Automatic Remote Frame Handling For Mailbox 27 */
542#define RFH28 0x1000 /* Enable Automatic Remote Frame Handling For Mailbox 28 */
543#define RFH29 0x2000 /* Enable Automatic Remote Frame Handling For Mailbox 29 */
544#define RFH30 0x4000 /* Enable Automatic Remote Frame Handling For Mailbox 30 */
545#define RFH31 0x8000 /* Enable Automatic Remote Frame Handling For Mailbox 31 */
546
547/* CAN_MBTIF1 Masks */
548#define MBTIF0 0x0001 /* TX Interrupt Active In Mailbox 0 */
549#define MBTIF1 0x0002 /* TX Interrupt Active In Mailbox 1 */
550#define MBTIF2 0x0004 /* TX Interrupt Active In Mailbox 2 */
551#define MBTIF3 0x0008 /* TX Interrupt Active In Mailbox 3 */
552#define MBTIF4 0x0010 /* TX Interrupt Active In Mailbox 4 */
553#define MBTIF5 0x0020 /* TX Interrupt Active In Mailbox 5 */
554#define MBTIF6 0x0040 /* TX Interrupt Active In Mailbox 6 */
555#define MBTIF7 0x0080 /* TX Interrupt Active In Mailbox 7 */
556#define MBTIF8 0x0100 /* TX Interrupt Active In Mailbox 8 */
557#define MBTIF9 0x0200 /* TX Interrupt Active In Mailbox 9 */
558#define MBTIF10 0x0400 /* TX Interrupt Active In Mailbox 10 */
559#define MBTIF11 0x0800 /* TX Interrupt Active In Mailbox 11 */
560#define MBTIF12 0x1000 /* TX Interrupt Active In Mailbox 12 */
561#define MBTIF13 0x2000 /* TX Interrupt Active In Mailbox 13 */
562#define MBTIF14 0x4000 /* TX Interrupt Active In Mailbox 14 */
563#define MBTIF15 0x8000 /* TX Interrupt Active In Mailbox 15 */
564
565/* CAN_MBTIF2 Masks */
566#define MBTIF16 0x0001 /* TX Interrupt Active In Mailbox 16 */
567#define MBTIF17 0x0002 /* TX Interrupt Active In Mailbox 17 */
568#define MBTIF18 0x0004 /* TX Interrupt Active In Mailbox 18 */
569#define MBTIF19 0x0008 /* TX Interrupt Active In Mailbox 19 */
570#define MBTIF20 0x0010 /* TX Interrupt Active In Mailbox 20 */
571#define MBTIF21 0x0020 /* TX Interrupt Active In Mailbox 21 */
572#define MBTIF22 0x0040 /* TX Interrupt Active In Mailbox 22 */
573#define MBTIF23 0x0080 /* TX Interrupt Active In Mailbox 23 */
574#define MBTIF24 0x0100 /* TX Interrupt Active In Mailbox 24 */
575#define MBTIF25 0x0200 /* TX Interrupt Active In Mailbox 25 */
576#define MBTIF26 0x0400 /* TX Interrupt Active In Mailbox 26 */
577#define MBTIF27 0x0800 /* TX Interrupt Active In Mailbox 27 */
578#define MBTIF28 0x1000 /* TX Interrupt Active In Mailbox 28 */
579#define MBTIF29 0x2000 /* TX Interrupt Active In Mailbox 29 */
580#define MBTIF30 0x4000 /* TX Interrupt Active In Mailbox 30 */
581#define MBTIF31 0x8000 /* TX Interrupt Active In Mailbox 31 */
582
583/* CAN_MBRIF1 Masks */
584#define MBRIF0 0x0001 /* RX Interrupt Active In Mailbox 0 */
585#define MBRIF1 0x0002 /* RX Interrupt Active In Mailbox 1 */
586#define MBRIF2 0x0004 /* RX Interrupt Active In Mailbox 2 */
587#define MBRIF3 0x0008 /* RX Interrupt Active In Mailbox 3 */
588#define MBRIF4 0x0010 /* RX Interrupt Active In Mailbox 4 */
589#define MBRIF5 0x0020 /* RX Interrupt Active In Mailbox 5 */
590#define MBRIF6 0x0040 /* RX Interrupt Active In Mailbox 6 */
591#define MBRIF7 0x0080 /* RX Interrupt Active In Mailbox 7 */
592#define MBRIF8 0x0100 /* RX Interrupt Active In Mailbox 8 */
593#define MBRIF9 0x0200 /* RX Interrupt Active In Mailbox 9 */
594#define MBRIF10 0x0400 /* RX Interrupt Active In Mailbox 10 */
595#define MBRIF11 0x0800 /* RX Interrupt Active In Mailbox 11 */
596#define MBRIF12 0x1000 /* RX Interrupt Active In Mailbox 12 */
597#define MBRIF13 0x2000 /* RX Interrupt Active In Mailbox 13 */
598#define MBRIF14 0x4000 /* RX Interrupt Active In Mailbox 14 */
599#define MBRIF15 0x8000 /* RX Interrupt Active In Mailbox 15 */
600
601/* CAN_MBRIF2 Masks */
602#define MBRIF16 0x0001 /* RX Interrupt Active In Mailbox 16 */
603#define MBRIF17 0x0002 /* RX Interrupt Active In Mailbox 17 */
604#define MBRIF18 0x0004 /* RX Interrupt Active In Mailbox 18 */
605#define MBRIF19 0x0008 /* RX Interrupt Active In Mailbox 19 */
606#define MBRIF20 0x0010 /* RX Interrupt Active In Mailbox 20 */
607#define MBRIF21 0x0020 /* RX Interrupt Active In Mailbox 21 */
608#define MBRIF22 0x0040 /* RX Interrupt Active In Mailbox 22 */
609#define MBRIF23 0x0080 /* RX Interrupt Active In Mailbox 23 */
610#define MBRIF24 0x0100 /* RX Interrupt Active In Mailbox 24 */
611#define MBRIF25 0x0200 /* RX Interrupt Active In Mailbox 25 */
612#define MBRIF26 0x0400 /* RX Interrupt Active In Mailbox 26 */
613#define MBRIF27 0x0800 /* RX Interrupt Active In Mailbox 27 */
614#define MBRIF28 0x1000 /* RX Interrupt Active In Mailbox 28 */
615#define MBRIF29 0x2000 /* RX Interrupt Active In Mailbox 29 */
616#define MBRIF30 0x4000 /* RX Interrupt Active In Mailbox 30 */
617#define MBRIF31 0x8000 /* RX Interrupt Active In Mailbox 31 */
618
619/* CAN_MBIM1 Masks */
620#define MBIM0 0x0001 /* Enable Interrupt For Mailbox 0 */
621#define MBIM1 0x0002 /* Enable Interrupt For Mailbox 1 */
622#define MBIM2 0x0004 /* Enable Interrupt For Mailbox 2 */
623#define MBIM3 0x0008 /* Enable Interrupt For Mailbox 3 */
624#define MBIM4 0x0010 /* Enable Interrupt For Mailbox 4 */
625#define MBIM5 0x0020 /* Enable Interrupt For Mailbox 5 */
626#define MBIM6 0x0040 /* Enable Interrupt For Mailbox 6 */
627#define MBIM7 0x0080 /* Enable Interrupt For Mailbox 7 */
628#define MBIM8 0x0100 /* Enable Interrupt For Mailbox 8 */
629#define MBIM9 0x0200 /* Enable Interrupt For Mailbox 9 */
630#define MBIM10 0x0400 /* Enable Interrupt For Mailbox 10 */
631#define MBIM11 0x0800 /* Enable Interrupt For Mailbox 11 */
632#define MBIM12 0x1000 /* Enable Interrupt For Mailbox 12 */
633#define MBIM13 0x2000 /* Enable Interrupt For Mailbox 13 */
634#define MBIM14 0x4000 /* Enable Interrupt For Mailbox 14 */
635#define MBIM15 0x8000 /* Enable Interrupt For Mailbox 15 */
636
637/* CAN_MBIM2 Masks */
638#define MBIM16 0x0001 /* Enable Interrupt For Mailbox 16 */
639#define MBIM17 0x0002 /* Enable Interrupt For Mailbox 17 */
640#define MBIM18 0x0004 /* Enable Interrupt For Mailbox 18 */
641#define MBIM19 0x0008 /* Enable Interrupt For Mailbox 19 */
642#define MBIM20 0x0010 /* Enable Interrupt For Mailbox 20 */
643#define MBIM21 0x0020 /* Enable Interrupt For Mailbox 21 */
644#define MBIM22 0x0040 /* Enable Interrupt For Mailbox 22 */
645#define MBIM23 0x0080 /* Enable Interrupt For Mailbox 23 */
646#define MBIM24 0x0100 /* Enable Interrupt For Mailbox 24 */
647#define MBIM25 0x0200 /* Enable Interrupt For Mailbox 25 */
648#define MBIM26 0x0400 /* Enable Interrupt For Mailbox 26 */
649#define MBIM27 0x0800 /* Enable Interrupt For Mailbox 27 */
650#define MBIM28 0x1000 /* Enable Interrupt For Mailbox 28 */
651#define MBIM29 0x2000 /* Enable Interrupt For Mailbox 29 */
652#define MBIM30 0x4000 /* Enable Interrupt For Mailbox 30 */
653#define MBIM31 0x8000 /* Enable Interrupt For Mailbox 31 */
654
655/* CAN_GIM Masks */
656#define EWTIM 0x0001 /* Enable TX Error Count Interrupt */
657#define EWRIM 0x0002 /* Enable RX Error Count Interrupt */
658#define EPIM 0x0004 /* Enable Error-Passive Mode Interrupt */
659#define BOIM 0x0008 /* Enable Bus Off Interrupt */
660#define WUIM 0x0010 /* Enable Wake-Up Interrupt */
661#define UIAIM 0x0020 /* Enable Access To Unimplemented Address Interrupt */
662#define AAIM 0x0040 /* Enable Abort Acknowledge Interrupt */
663#define RMLIM 0x0080 /* Enable RX Message Lost Interrupt */
664#define UCEIM 0x0100 /* Enable Universal Counter Overflow Interrupt */
665#define EXTIM 0x0200 /* Enable External Trigger Output Interrupt */
666#define ADIM 0x0400 /* Enable Access Denied Interrupt */
667
668/* CAN_GIS Masks */
669#define EWTIS 0x0001 /* TX Error Count IRQ Status */
670#define EWRIS 0x0002 /* RX Error Count IRQ Status */
671#define EPIS 0x0004 /* Error-Passive Mode IRQ Status */
672#define BOIS 0x0008 /* Bus Off IRQ Status */
673#define WUIS 0x0010 /* Wake-Up IRQ Status */
674#define UIAIS 0x0020 /* Access To Unimplemented Address IRQ Status */
675#define AAIS 0x0040 /* Abort Acknowledge IRQ Status */
676#define RMLIS 0x0080 /* RX Message Lost IRQ Status */
677#define UCEIS 0x0100 /* Universal Counter Overflow IRQ Status */
678#define EXTIS 0x0200 /* External Trigger Output IRQ Status */
679#define ADIS 0x0400 /* Access Denied IRQ Status */
680
681/* CAN_GIF Masks */
682#define EWTIF 0x0001 /* TX Error Count IRQ Flag */
683#define EWRIF 0x0002 /* RX Error Count IRQ Flag */
684#define EPIF 0x0004 /* Error-Passive Mode IRQ Flag */
685#define BOIF 0x0008 /* Bus Off IRQ Flag */
686#define WUIF 0x0010 /* Wake-Up IRQ Flag */
687#define UIAIF 0x0020 /* Access To Unimplemented Address IRQ Flag */
688#define AAIF 0x0040 /* Abort Acknowledge IRQ Flag */
689#define RMLIF 0x0080 /* RX Message Lost IRQ Flag */
690#define UCEIF 0x0100 /* Universal Counter Overflow IRQ Flag */
691#define EXTIF 0x0200 /* External Trigger Output IRQ Flag */
692#define ADIF 0x0400 /* Access Denied IRQ Flag */
693
694/* CAN_UCCNF Masks */
695#define UCCNF 0x000F /* Universal Counter Mode */
696#define UC_STAMP 0x0001 /* Timestamp Mode */
697#define UC_WDOG 0x0002 /* Watchdog Mode */
698#define UC_AUTOTX 0x0003 /* Auto-Transmit Mode */
699#define UC_ERROR 0x0006 /* CAN Error Frame Count */
700#define UC_OVER 0x0007 /* CAN Overload Frame Count */
701#define UC_LOST 0x0008 /* Arbitration Lost During TX Count */
702#define UC_AA 0x0009 /* TX Abort Count */
703#define UC_TA 0x000A /* TX Successful Count */
704#define UC_REJECT 0x000B /* RX Message Rejected Count */
705#define UC_RML 0x000C /* RX Message Lost Count */
706#define UC_RX 0x000D /* Total Successful RX Messages Count */
707#define UC_RMP 0x000E /* Successful RX W/Matching ID Count */
708#define UC_ALL 0x000F /* Correct Message On CAN Bus Line Count */
709#define UCRC 0x0020 /* Universal Counter Reload/Clear */
710#define UCCT 0x0040 /* Universal Counter CAN Trigger */
711#define UCE 0x0080 /* Universal Counter Enable */
712
713/* CAN_ESR Masks */
714#define ACKE 0x0004 /* Acknowledge Error */
715#define SER 0x0008 /* Stuff Error */
716#define CRCE 0x0010 /* CRC Error */
717#define SA0 0x0020 /* Stuck At Dominant Error */
718#define BEF 0x0040 /* Bit Error Flag */
719#define FER 0x0080 /* Form Error Flag */
720
721/* CAN_EWR Masks */
722#define EWLREC 0x00FF /* RX Error Count Limit (For EWRIS) */
723#define EWLTEC 0xFF00 /* TX Error Count Limit (For EWTIS) */
724
725#endif
diff --git a/arch/blackfin/include/asm/bfin_sport.h b/arch/blackfin/include/asm/bfin_sport.h
index b558908e1c79..9626cf7e4251 100644
--- a/arch/blackfin/include/asm/bfin_sport.h
+++ b/arch/blackfin/include/asm/bfin_sport.h
@@ -1,7 +1,7 @@
1/* 1/*
2 * bfin_sport.h - userspace header for bfin sport driver 2 * bfin_sport.h - interface to Blackfin SPORTs
3 * 3 *
4 * Copyright 2004-2008 Analog Devices Inc. 4 * Copyright 2004-2009 Analog Devices Inc.
5 * 5 *
6 * Licensed under the GPL-2 or later. 6 * Licensed under the GPL-2 or later.
7 */ 7 */
@@ -9,16 +9,6 @@
9#ifndef __BFIN_SPORT_H__ 9#ifndef __BFIN_SPORT_H__
10#define __BFIN_SPORT_H__ 10#define __BFIN_SPORT_H__
11 11
12#ifdef __KERNEL__
13#include <linux/cdev.h>
14#include <linux/mutex.h>
15#include <linux/sched.h>
16#include <linux/wait.h>
17#endif
18
19#define SPORT_MAJOR 237
20#define SPORT_NR_DEVS 2
21
22/* Sport mode: it can be set to TDM, i2s or others */ 12/* Sport mode: it can be set to TDM, i2s or others */
23#define NORM_MODE 0x0 13#define NORM_MODE 0x0
24#define TDM_MODE 0x1 14#define TDM_MODE 0x1
@@ -35,7 +25,7 @@ struct sport_config {
35 unsigned int mode:3; 25 unsigned int mode:3;
36 26
37 /* if TDM mode is selected, channels must be set */ 27 /* if TDM mode is selected, channels must be set */
38 int channels; /* Must be in 8 units */ 28 int channels; /* Must be in 8 units */
39 unsigned int frame_delay:4; /* Delay between frame sync pulse and first bit */ 29 unsigned int frame_delay:4; /* Delay between frame sync pulse and first bit */
40 30
41 /* I2S mode */ 31 /* I2S mode */
@@ -69,94 +59,137 @@ struct sport_config {
69 59
70#ifdef __KERNEL__ 60#ifdef __KERNEL__
71 61
72struct sport_register { 62#include <linux/types.h>
73 unsigned short tcr1;
74 unsigned short reserved0;
75 unsigned short tcr2;
76 unsigned short reserved1;
77 unsigned short tclkdiv;
78 unsigned short reserved2;
79 unsigned short tfsdiv;
80 unsigned short reserved3;
81 unsigned long tx;
82 unsigned long reserved_l0;
83 unsigned long rx;
84 unsigned long reserved_l1;
85 unsigned short rcr1;
86 unsigned short reserved4;
87 unsigned short rcr2;
88 unsigned short reserved5;
89 unsigned short rclkdiv;
90 unsigned short reserved6;
91 unsigned short rfsdiv;
92 unsigned short reserved7;
93 unsigned short stat;
94 unsigned short reserved8;
95 unsigned short chnl;
96 unsigned short reserved9;
97 unsigned short mcmc1;
98 unsigned short reserved10;
99 unsigned short mcmc2;
100 unsigned short reserved11;
101 unsigned long mtcs0;
102 unsigned long mtcs1;
103 unsigned long mtcs2;
104 unsigned long mtcs3;
105 unsigned long mrcs0;
106 unsigned long mrcs1;
107 unsigned long mrcs2;
108 unsigned long mrcs3;
109};
110
111struct sport_dev {
112 struct cdev cdev; /* Char device structure */
113
114 int sport_num;
115 63
116 int dma_rx_chan; 64/*
117 int dma_tx_chan; 65 * All Blackfin system MMRs are padded to 32bits even if the register
118 66 * itself is only 16bits. So use a helper macro to streamline this.
119 int rx_irq; 67 */
120 unsigned char *rx_buf; /* Buffer store the received data */ 68#define __BFP(m) u16 m; u16 __pad_##m
121 int rx_len; /* How many bytes will be received */ 69struct sport_register {
122 int rx_received; /* How many bytes has been received */ 70 __BFP(tcr1);
123 71 __BFP(tcr2);
124 int tx_irq; 72 __BFP(tclkdiv);
125 const unsigned char *tx_buf; 73 __BFP(tfsdiv);
126 int tx_len; 74 union {
127 int tx_sent; 75 u32 tx32;
128 76 u16 tx16;
129 int err_irq; 77 };
130 78 u32 __pad_tx;
131 struct mutex mutex; /* mutual exclusion semaphore */ 79 union {
132 struct task_struct *task; 80 u32 rx32; /* use the anomaly wrapper below */
133 81 u16 rx16;
134 wait_queue_head_t waitq; 82 };
135 int wait_con; 83 u32 __pad_rx;
136 struct sport_register *regs; 84 __BFP(rcr1);
137 struct sport_config config; 85 __BFP(rcr2);
86 __BFP(rclkdiv);
87 __BFP(rfsdiv);
88 __BFP(stat);
89 __BFP(chnl);
90 __BFP(mcmc1);
91 __BFP(mcmc2);
92 u32 mtcs0;
93 u32 mtcs1;
94 u32 mtcs2;
95 u32 mtcs3;
96 u32 mrcs0;
97 u32 mrcs1;
98 u32 mrcs2;
99 u32 mrcs3;
138}; 100};
101#undef __BFP
102
103#define bfin_read_sport_rx32(base) \
104({ \
105 struct sport_register *__mmrs = (void *)base; \
106 u32 __ret; \
107 unsigned long flags; \
108 if (ANOMALY_05000473) \
109 local_irq_save(flags); \
110 __ret = __mmrs->rx32; \
111 if (ANOMALY_05000473) \
112 local_irq_restore(flags); \
113 __ret; \
114})
139 115
140#endif 116#endif
141 117
142#define SPORT_TCR1 0 118/* Workaround defBF*.h SPORT MMRs till they get cleansed */
143#define SPORT_TCR2 1 119#undef DTYPE_NORM
144#define SPORT_TCLKDIV 2 120#undef SLEN
145#define SPORT_TFSDIV 3 121#undef SP_WOFF
146#define SPORT_RCR1 8 122#undef SP_WSIZE
147#define SPORT_RCR2 9 123
148#define SPORT_RCLKDIV 10 124/* SPORT_TCR1 Masks */
149#define SPORT_RFSDIV 11 125#define TSPEN 0x0001 /* TX enable */
150#define SPORT_CHANNEL 13 126#define ITCLK 0x0002 /* Internal TX Clock Select */
151#define SPORT_MCMC1 14 127#define TDTYPE 0x000C /* TX Data Formatting Select */
152#define SPORT_MCMC2 15 128#define DTYPE_NORM 0x0000 /* Data Format Normal */
153#define SPORT_MTCS0 16 129#define DTYPE_ULAW 0x0008 /* Compand Using u-Law */
154#define SPORT_MTCS1 17 130#define DTYPE_ALAW 0x000C /* Compand Using A-Law */
155#define SPORT_MTCS2 18 131#define TLSBIT 0x0010 /* TX Bit Order */
156#define SPORT_MTCS3 19 132#define ITFS 0x0200 /* Internal TX Frame Sync Select */
157#define SPORT_MRCS0 20 133#define TFSR 0x0400 /* TX Frame Sync Required Select */
158#define SPORT_MRCS1 21 134#define DITFS 0x0800 /* Data Independent TX Frame Sync Select */
159#define SPORT_MRCS2 22 135#define LTFS 0x1000 /* Low TX Frame Sync Select */
160#define SPORT_MRCS3 23 136#define LATFS 0x2000 /* Late TX Frame Sync Select */
137#define TCKFE 0x4000 /* TX Clock Falling Edge Select */
138
139/* SPORT_TCR2 Masks */
140#define SLEN 0x001F /* SPORT TX Word Length (2 - 31) */
141#define DP_SLEN(x) BFIN_DEPOSIT(SLEN, x)
142#define EX_SLEN(x) BFIN_EXTRACT(SLEN, x)
143#define TXSE 0x0100 /* TX Secondary Enable */
144#define TSFSE 0x0200 /* TX Stereo Frame Sync Enable */
145#define TRFST 0x0400 /* TX Right-First Data Order */
146
147/* SPORT_RCR1 Masks */
148#define RSPEN 0x0001 /* RX enable */
149#define IRCLK 0x0002 /* Internal RX Clock Select */
150#define RDTYPE 0x000C /* RX Data Formatting Select */
151/* DTYPE_* defined above */
152#define RLSBIT 0x0010 /* RX Bit Order */
153#define IRFS 0x0200 /* Internal RX Frame Sync Select */
154#define RFSR 0x0400 /* RX Frame Sync Required Select */
155#define LRFS 0x1000 /* Low RX Frame Sync Select */
156#define LARFS 0x2000 /* Late RX Frame Sync Select */
157#define RCKFE 0x4000 /* RX Clock Falling Edge Select */
158
159/* SPORT_RCR2 Masks */
160/* SLEN defined above */
161#define RXSE 0x0100 /* RX Secondary Enable */
162#define RSFSE 0x0200 /* RX Stereo Frame Sync Enable */
163#define RRFST 0x0400 /* Right-First Data Order */
164
165/* SPORT_STAT Masks */
166#define RXNE 0x0001 /* RX FIFO Not Empty Status */
167#define RUVF 0x0002 /* RX Underflow Status */
168#define ROVF 0x0004 /* RX Overflow Status */
169#define TXF 0x0008 /* TX FIFO Full Status */
170#define TUVF 0x0010 /* TX Underflow Status */
171#define TOVF 0x0020 /* TX Overflow Status */
172#define TXHRE 0x0040 /* TX Hold Register Empty */
173
174/* SPORT_MCMC1 Masks */
175#define SP_WOFF 0x03FF /* Multichannel Window Offset Field */
176#define DP_SP_WOFF(x) BFIN_DEPOSIT(SP_WOFF, x)
177#define EX_SP_WOFF(x) BFIN_EXTRACT(SP_WOFF, x)
178#define SP_WSIZE 0xF000 /* Multichannel Window Size Field */
179#define DP_SP_WSIZE(x) BFIN_DEPOSIT(SP_WSIZE, x)
180#define EX_SP_WSIZE(x) BFIN_EXTRACT(SP_WSIZE, x)
181
182/* SPORT_MCMC2 Masks */
183#define MCCRM 0x0003 /* Multichannel Clock Recovery Mode */
184#define REC_BYPASS 0x0000 /* Bypass Mode (No Clock Recovery) */
185#define REC_2FROM4 0x0002 /* Recover 2 MHz Clock from 4 MHz Clock */
186#define REC_8FROM16 0x0003 /* Recover 8 MHz Clock from 16 MHz Clock */
187#define MCDTXPE 0x0004 /* Multichannel DMA Transmit Packing */
188#define MCDRXPE 0x0008 /* Multichannel DMA Receive Packing */
189#define MCMEN 0x0010 /* Multichannel Frame Mode Enable */
190#define FSDR 0x0080 /* Multichannel Frame Sync to Data Relationship */
191#define MFD 0xF000 /* Multichannel Frame Delay */
192#define DP_MFD(x) BFIN_DEPOSIT(MFD, x)
193#define EX_MFD(x) BFIN_EXTRACT(MFD, x)
161 194
162#endif 195#endif
diff --git a/arch/blackfin/include/asm/bfin_watchdog.h b/arch/blackfin/include/asm/bfin_watchdog.h
new file mode 100644
index 000000000000..dce09829a095
--- /dev/null
+++ b/arch/blackfin/include/asm/bfin_watchdog.h
@@ -0,0 +1,30 @@
1/*
2 * bfin_watchdog.h - Blackfin watchdog definitions
3 *
4 * Copyright 2006-2010 Analog Devices Inc.
5 *
6 * Licensed under the GPL-2 or later.
7 */
8
9#ifndef _BFIN_WATCHDOG_H
10#define _BFIN_WATCHDOG_H
11
12/* Bit in SWRST that indicates boot caused by watchdog */
13#define SWRST_RESET_WDOG 0x4000
14
15/* Bit in WDOG_CTL that indicates watchdog has expired (WDR0) */
16#define WDOG_EXPIRED 0x8000
17
18/* Masks for WDEV field in WDOG_CTL register */
19#define ICTL_RESET 0x0
20#define ICTL_NMI 0x2
21#define ICTL_GPI 0x4
22#define ICTL_NONE 0x6
23#define ICTL_MASK 0x6
24
25/* Masks for WDEN field in WDOG_CTL register */
26#define WDEN_MASK 0x0FF0
27#define WDEN_ENABLE 0x0000
28#define WDEN_DISABLE 0x0AD0
29
30#endif
diff --git a/arch/blackfin/include/asm/bitops.h b/arch/blackfin/include/asm/bitops.h
index a2ff3fb3568d..605ba8e9b2e4 100644
--- a/arch/blackfin/include/asm/bitops.h
+++ b/arch/blackfin/include/asm/bitops.h
@@ -7,22 +7,41 @@
7#ifndef _BLACKFIN_BITOPS_H 7#ifndef _BLACKFIN_BITOPS_H
8#define _BLACKFIN_BITOPS_H 8#define _BLACKFIN_BITOPS_H
9 9
10#ifndef CONFIG_SMP 10#include <linux/compiler.h>
11# include <asm-generic/bitops.h> 11
12#else 12#include <asm-generic/bitops/__ffs.h>
13#include <asm-generic/bitops/ffz.h>
14#include <asm-generic/bitops/fls.h>
15#include <asm-generic/bitops/__fls.h>
16#include <asm-generic/bitops/fls64.h>
17#include <asm-generic/bitops/find.h>
13 18
14#ifndef _LINUX_BITOPS_H 19#ifndef _LINUX_BITOPS_H
15#error only <linux/bitops.h> can be included directly 20#error only <linux/bitops.h> can be included directly
16#endif 21#endif
17 22
18#include <linux/compiler.h>
19#include <asm/byteorder.h> /* swab32 */
20
21#include <asm-generic/bitops/ffs.h>
22#include <asm-generic/bitops/__ffs.h>
23#include <asm-generic/bitops/sched.h> 23#include <asm-generic/bitops/sched.h>
24#include <asm-generic/bitops/ffz.h> 24#include <asm-generic/bitops/ffs.h>
25#include <asm-generic/bitops/lock.h>
26#include <asm-generic/bitops/ext2-non-atomic.h>
27#include <asm-generic/bitops/ext2-atomic.h>
28#include <asm-generic/bitops/minix.h>
29
30#ifndef CONFIG_SMP
31#include <linux/irqflags.h>
32
33/*
34 * clear_bit may not imply a memory barrier
35 */
36#ifndef smp_mb__before_clear_bit
37#define smp_mb__before_clear_bit() smp_mb()
38#define smp_mb__after_clear_bit() smp_mb()
39#endif
40#include <asm-generic/bitops/atomic.h>
41#include <asm-generic/bitops/non-atomic.h>
42#else
25 43
44#include <asm/byteorder.h> /* swab32 */
26#include <linux/linkage.h> 45#include <linux/linkage.h>
27 46
28asmlinkage int __raw_bit_set_asm(volatile unsigned long *addr, int nr); 47asmlinkage int __raw_bit_set_asm(volatile unsigned long *addr, int nr);
@@ -89,19 +108,36 @@ static inline int test_and_change_bit(int nr, volatile unsigned long *addr)
89 108
90#include <asm-generic/bitops/non-atomic.h> 109#include <asm-generic/bitops/non-atomic.h>
91 110
92#include <asm-generic/bitops/find.h> 111#endif /* CONFIG_SMP */
93#include <asm-generic/bitops/hweight.h>
94#include <asm-generic/bitops/lock.h>
95 112
96#include <asm-generic/bitops/ext2-atomic.h> 113/*
97#include <asm-generic/bitops/ext2-non-atomic.h> 114 * hweightN: returns the hamming weight (i.e. the number
115 * of bits set) of a N-bit word
116 */
98 117
99#include <asm-generic/bitops/minix.h> 118static inline unsigned int hweight32(unsigned int w)
119{
120 unsigned int res;
100 121
101#include <asm-generic/bitops/fls.h> 122 __asm__ ("%0.l = ONES %0;"
102#include <asm-generic/bitops/__fls.h> 123 "%0 = %0.l (Z);"
103#include <asm-generic/bitops/fls64.h> 124 : "=d" (res) : "d" (w));
125 return res;
126}
104 127
105#endif /* CONFIG_SMP */ 128static inline unsigned int hweight64(__u64 w)
129{
130 return hweight32((unsigned int)(w >> 32)) + hweight32((unsigned int)w);
131}
132
133static inline unsigned int hweight16(unsigned int w)
134{
135 return hweight32(w & 0xffff);
136}
137
138static inline unsigned int hweight8(unsigned int w)
139{
140 return hweight32(w & 0xff);
141}
106 142
107#endif /* _BLACKFIN_BITOPS_H */ 143#endif /* _BLACKFIN_BITOPS_H */
diff --git a/arch/blackfin/include/asm/context.S b/arch/blackfin/include/asm/context.S
index 5dffaf582a22..1f9060395a0a 100644
--- a/arch/blackfin/include/asm/context.S
+++ b/arch/blackfin/include/asm/context.S
@@ -73,6 +73,11 @@
73#else 73#else
74 cli r0; 74 cli r0;
75#endif 75#endif
76#ifdef CONFIG_TRACE_IRQFLAGS
77 sp += -12;
78 call _trace_hardirqs_off;
79 sp += 12;
80#endif
76 [--sp] = RETI; /*orig_pc*/ 81 [--sp] = RETI; /*orig_pc*/
77 /* Clear all L registers. */ 82 /* Clear all L registers. */
78 r0 = 0 (x); 83 r0 = 0 (x);
@@ -279,6 +284,13 @@
279 RETN = [sp++]; 284 RETN = [sp++];
280 RETX = [sp++]; 285 RETX = [sp++];
281 RETI = [sp++]; 286 RETI = [sp++];
287
288#ifdef CONFIG_TRACE_IRQFLAGS
289 sp += -12;
290 call _trace_hardirqs_on;
291 sp += 12;
292#endif
293
282 RETS = [sp++]; 294 RETS = [sp++];
283 295
284#ifdef CONFIG_SMP 296#ifdef CONFIG_SMP
@@ -374,3 +386,13 @@
374 386
375 (R7:0, P5:0) = [SP++]; 387 (R7:0, P5:0) = [SP++];
376.endm 388.endm
389
390.macro pseudo_long_call func:req, scratch:req
391#ifdef CONFIG_ROMKERNEL
392 \scratch\().l = \func;
393 \scratch\().h = \func;
394 call (\scratch);
395#else
396 call \func;
397#endif
398.endm
diff --git a/arch/blackfin/include/asm/cpu.h b/arch/blackfin/include/asm/cpu.h
index b191dc662bd8..16883e582e3c 100644
--- a/arch/blackfin/include/asm/cpu.h
+++ b/arch/blackfin/include/asm/cpu.h
@@ -17,8 +17,6 @@ struct blackfin_cpudata {
17 struct task_struct *idle; 17 struct task_struct *idle;
18 unsigned int imemctl; 18 unsigned int imemctl;
19 unsigned int dmemctl; 19 unsigned int dmemctl;
20 unsigned long dcache_invld_count;
21 unsigned long icache_invld_count;
22}; 20};
23 21
24DECLARE_PER_CPU(struct blackfin_cpudata, cpu_data); 22DECLARE_PER_CPU(struct blackfin_cpudata, cpu_data);
diff --git a/arch/blackfin/include/asm/def_LPBlackfin.h b/arch/blackfin/include/asm/def_LPBlackfin.h
index 25906468622f..f342ff0319df 100644
--- a/arch/blackfin/include/asm/def_LPBlackfin.h
+++ b/arch/blackfin/include/asm/def_LPBlackfin.h
@@ -12,6 +12,8 @@
12#include <mach/anomaly.h> 12#include <mach/anomaly.h>
13 13
14#define MK_BMSK_(x) (1<<x) 14#define MK_BMSK_(x) (1<<x)
15#define BFIN_DEPOSIT(mask, x) (((x) << __ffs(mask)) & (mask))
16#define BFIN_EXTRACT(mask, x) (((x) & (mask)) >> __ffs(mask))
15 17
16#ifndef __ASSEMBLY__ 18#ifndef __ASSEMBLY__
17 19
@@ -23,62 +25,30 @@
23# define NOP_PAD_ANOMALY_05000198 25# define NOP_PAD_ANOMALY_05000198
24#endif 26#endif
25 27
26#define bfin_read8(addr) ({ \ 28#define _bfin_readX(addr, size, asm_size, asm_ext) ({ \
27 uint32_t __v; \ 29 u32 __v; \
28 __asm__ __volatile__( \ 30 __asm__ __volatile__( \
29 NOP_PAD_ANOMALY_05000198 \ 31 NOP_PAD_ANOMALY_05000198 \
30 "%0 = b[%1] (z);" \ 32 "%0 = " #asm_size "[%1]" #asm_ext ";" \
31 : "=d" (__v) \ 33 : "=d" (__v) \
32 : "a" (addr) \ 34 : "a" (addr) \
33 ); \ 35 ); \
34 __v; }) 36 __v; })
35 37#define _bfin_writeX(addr, val, size, asm_size) \
36#define bfin_read16(addr) ({ \
37 uint32_t __v; \
38 __asm__ __volatile__( \
39 NOP_PAD_ANOMALY_05000198 \
40 "%0 = w[%1] (z);" \
41 : "=d" (__v) \
42 : "a" (addr) \
43 ); \
44 __v; })
45
46#define bfin_read32(addr) ({ \
47 uint32_t __v; \
48 __asm__ __volatile__( \
49 NOP_PAD_ANOMALY_05000198 \
50 "%0 = [%1];" \
51 : "=d" (__v) \
52 : "a" (addr) \
53 ); \
54 __v; })
55
56#define bfin_write8(addr, val) \
57 __asm__ __volatile__( \ 38 __asm__ __volatile__( \
58 NOP_PAD_ANOMALY_05000198 \ 39 NOP_PAD_ANOMALY_05000198 \
59 "b[%0] = %1;" \ 40 #asm_size "[%0] = %1;" \
60 : \ 41 : \
61 : "a" (addr), "d" ((uint8_t)(val)) \ 42 : "a" (addr), "d" ((u##size)(val)) \
62 : "memory" \ 43 : "memory" \
63 ) 44 )
64 45
65#define bfin_write16(addr, val) \ 46#define bfin_read8(addr) _bfin_readX(addr, 8, b, (z))
66 __asm__ __volatile__( \ 47#define bfin_read16(addr) _bfin_readX(addr, 16, w, (z))
67 NOP_PAD_ANOMALY_05000198 \ 48#define bfin_read32(addr) _bfin_readX(addr, 32, , )
68 "w[%0] = %1;" \ 49#define bfin_write8(addr, val) _bfin_writeX(addr, val, 8, b)
69 : \ 50#define bfin_write16(addr, val) _bfin_writeX(addr, val, 16, w)
70 : "a" (addr), "d" ((uint16_t)(val)) \ 51#define bfin_write32(addr, val) _bfin_writeX(addr, val, 32, )
71 : "memory" \
72 )
73
74#define bfin_write32(addr, val) \
75 __asm__ __volatile__( \
76 NOP_PAD_ANOMALY_05000198 \
77 "[%0] = %1;" \
78 : \
79 : "a" (addr), "d" (val) \
80 : "memory" \
81 )
82 52
83#endif /* __ASSEMBLY__ */ 53#endif /* __ASSEMBLY__ */
84 54
diff --git a/arch/blackfin/include/asm/delay.h b/arch/blackfin/include/asm/delay.h
index c31f91cc1d5d..171d8deb04a5 100644
--- a/arch/blackfin/include/asm/delay.h
+++ b/arch/blackfin/include/asm/delay.h
@@ -30,10 +30,22 @@ __asm__ __volatile__ (
30 30
31#define HZSCALE (268435456 / (1000000/HZ)) 31#define HZSCALE (268435456 / (1000000/HZ))
32 32
33static inline void udelay(unsigned long usecs) 33static inline unsigned long __to_delay(unsigned long scale)
34{ 34{
35 extern unsigned long loops_per_jiffy; 35 extern unsigned long loops_per_jiffy;
36 __delay((((usecs * HZSCALE) >> 11) * (loops_per_jiffy >> 11)) >> 6); 36 return (((scale * HZSCALE) >> 11) * (loops_per_jiffy >> 11)) >> 6;
37}
38
39static inline void udelay(unsigned long usecs)
40{
41 __delay(__to_delay(usecs));
37} 42}
38 43
44static inline void ndelay(unsigned long nsecs)
45{
46 __delay(__to_delay(1) * nsecs / 1000);
47}
48
49#define ndelay ndelay
50
39#endif 51#endif
diff --git a/arch/blackfin/include/asm/dma-mapping.h b/arch/blackfin/include/asm/dma-mapping.h
index 413a30314a6f..212cb80fd74b 100644
--- a/arch/blackfin/include/asm/dma-mapping.h
+++ b/arch/blackfin/include/asm/dma-mapping.h
@@ -44,13 +44,8 @@ dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
44extern void 44extern void
45__dma_sync(dma_addr_t addr, size_t size, enum dma_data_direction dir); 45__dma_sync(dma_addr_t addr, size_t size, enum dma_data_direction dir);
46static inline void 46static inline void
47_dma_sync(dma_addr_t addr, size_t size, enum dma_data_direction dir) 47__dma_sync_inline(dma_addr_t addr, size_t size, enum dma_data_direction dir)
48{ 48{
49 if (!__builtin_constant_p(dir)) {
50 __dma_sync(addr, size, dir);
51 return;
52 }
53
54 switch (dir) { 49 switch (dir) {
55 case DMA_NONE: 50 case DMA_NONE:
56 BUG(); 51 BUG();
@@ -64,6 +59,14 @@ _dma_sync(dma_addr_t addr, size_t size, enum dma_data_direction dir)
64 break; 59 break;
65 } 60 }
66} 61}
62static inline void
63_dma_sync(dma_addr_t addr, size_t size, enum dma_data_direction dir)
64{
65 if (__builtin_constant_p(dir))
66 __dma_sync_inline(addr, size, dir);
67 else
68 __dma_sync(addr, size, dir);
69}
67 70
68static inline dma_addr_t 71static inline dma_addr_t
69dma_map_single(struct device *dev, void *ptr, size_t size, 72dma_map_single(struct device *dev, void *ptr, size_t size,
diff --git a/arch/blackfin/include/asm/dma.h b/arch/blackfin/include/asm/dma.h
index bd2e62243abe..2c09b1d50ec9 100644
--- a/arch/blackfin/include/asm/dma.h
+++ b/arch/blackfin/include/asm/dma.h
@@ -262,6 +262,10 @@ static inline void dma_disable_irq(unsigned int channel)
262{ 262{
263 disable_irq(dma_ch[channel].irq); 263 disable_irq(dma_ch[channel].irq);
264} 264}
265static inline void dma_disable_irq_nosync(unsigned int channel)
266{
267 disable_irq_nosync(dma_ch[channel].irq);
268}
265static inline void dma_enable_irq(unsigned int channel) 269static inline void dma_enable_irq(unsigned int channel)
266{ 270{
267 enable_irq(dma_ch[channel].irq); 271 enable_irq(dma_ch[channel].irq);
diff --git a/arch/blackfin/include/asm/dpmc.h b/arch/blackfin/include/asm/dpmc.h
index 1597ae5041ee..efcc3aebeae4 100644
--- a/arch/blackfin/include/asm/dpmc.h
+++ b/arch/blackfin/include/asm/dpmc.h
@@ -75,7 +75,7 @@
75 75
76#define VLEV 0x00F0 /* Internal Voltage Level */ 76#define VLEV 0x00F0 /* Internal Voltage Level */
77#ifdef __ADSPBF52x__ 77#ifdef __ADSPBF52x__
78#define VLEV_085 0x0040 /* VLEV = 0.85 V (-5% - +10% Accuracy) */ 78#define VLEV_085 0x0040 /* VLEV = 0.85 V (-5% - +10% Accuracy) */
79#define VLEV_090 0x0050 /* VLEV = 0.90 V (-5% - +10% Accuracy) */ 79#define VLEV_090 0x0050 /* VLEV = 0.90 V (-5% - +10% Accuracy) */
80#define VLEV_095 0x0060 /* VLEV = 0.95 V (-5% - +10% Accuracy) */ 80#define VLEV_095 0x0060 /* VLEV = 0.95 V (-5% - +10% Accuracy) */
81#define VLEV_100 0x0070 /* VLEV = 1.00 V (-5% - +10% Accuracy) */ 81#define VLEV_100 0x0070 /* VLEV = 1.00 V (-5% - +10% Accuracy) */
@@ -84,7 +84,7 @@
84#define VLEV_115 0x00A0 /* VLEV = 1.15 V (-5% - +10% Accuracy) */ 84#define VLEV_115 0x00A0 /* VLEV = 1.15 V (-5% - +10% Accuracy) */
85#define VLEV_120 0x00B0 /* VLEV = 1.20 V (-5% - +10% Accuracy) */ 85#define VLEV_120 0x00B0 /* VLEV = 1.20 V (-5% - +10% Accuracy) */
86#else 86#else
87#define VLEV_085 0x0060 /* VLEV = 0.85 V (-5% - +10% Accuracy) */ 87#define VLEV_085 0x0060 /* VLEV = 0.85 V (-5% - +10% Accuracy) */
88#define VLEV_090 0x0070 /* VLEV = 0.90 V (-5% - +10% Accuracy) */ 88#define VLEV_090 0x0070 /* VLEV = 0.90 V (-5% - +10% Accuracy) */
89#define VLEV_095 0x0080 /* VLEV = 0.95 V (-5% - +10% Accuracy) */ 89#define VLEV_095 0x0080 /* VLEV = 0.95 V (-5% - +10% Accuracy) */
90#define VLEV_100 0x0090 /* VLEV = 1.00 V (-5% - +10% Accuracy) */ 90#define VLEV_100 0x0090 /* VLEV = 1.00 V (-5% - +10% Accuracy) */
diff --git a/arch/blackfin/include/asm/elf.h b/arch/blackfin/include/asm/elf.h
index 5b50f0ecacf8..117713adea7f 100644
--- a/arch/blackfin/include/asm/elf.h
+++ b/arch/blackfin/include/asm/elf.h
@@ -22,12 +22,15 @@
22#define EF_BFIN_CODE_IN_L2 0x00000040 /* --code-in-l2 */ 22#define EF_BFIN_CODE_IN_L2 0x00000040 /* --code-in-l2 */
23#define EF_BFIN_DATA_IN_L2 0x00000080 /* --data-in-l2 */ 23#define EF_BFIN_DATA_IN_L2 0x00000080 /* --data-in-l2 */
24 24
25#if 1 /* core dumps not supported, but linux/elfcore.h needs these */
25typedef unsigned long elf_greg_t; 26typedef unsigned long elf_greg_t;
26 27
27#define ELF_NGREG 40 /* (sizeof(struct user_regs_struct) / sizeof(elf_greg_t)) */ 28#define ELF_NGREG (sizeof(struct pt_regs) / sizeof(elf_greg_t))
28typedef elf_greg_t elf_gregset_t[ELF_NGREG]; 29typedef elf_greg_t elf_gregset_t[ELF_NGREG];
29 30
30typedef struct { } elf_fpregset_t; 31typedef struct { } elf_fpregset_t;
32#endif
33
31/* 34/*
32 * This is used to ensure we don't load something for the wrong architecture. 35 * This is used to ensure we don't load something for the wrong architecture.
33 */ 36 */
@@ -55,6 +58,9 @@ do { \
55 _regs->p2 = _dynamic_addr; \ 58 _regs->p2 = _dynamic_addr; \
56} while(0) 59} while(0)
57 60
61#if 0
62#define CORE_DUMP_USE_REGSET
63#endif
58#define ELF_FDPIC_CORE_EFLAGS EF_BFIN_FDPIC 64#define ELF_FDPIC_CORE_EFLAGS EF_BFIN_FDPIC
59#define ELF_EXEC_PAGESIZE 4096 65#define ELF_EXEC_PAGESIZE 4096
60 66
diff --git a/arch/blackfin/include/asm/ftrace.h b/arch/blackfin/include/asm/ftrace.h
index 90c9b400ba6d..4cfe2d9ba7e8 100644
--- a/arch/blackfin/include/asm/ftrace.h
+++ b/arch/blackfin/include/asm/ftrace.h
@@ -10,4 +10,57 @@
10 10
11#define MCOUNT_INSN_SIZE 6 /* sizeof "[++sp] = rets; call __mcount;" */ 11#define MCOUNT_INSN_SIZE 6 /* sizeof "[++sp] = rets; call __mcount;" */
12 12
13#ifndef __ASSEMBLY__
14
15#ifdef CONFIG_FRAME_POINTER
16#include <linux/mm.h>
17
18extern inline void *return_address(unsigned int level)
19{
20 unsigned long *endstack, *fp, *ret_addr;
21 unsigned int current_level = 0;
22
23 if (level == 0)
24 return __builtin_return_address(0);
25
26 fp = (unsigned long *)__builtin_frame_address(0);
27 endstack = (unsigned long *)PAGE_ALIGN((unsigned long)&level);
28
29 while (((unsigned long)fp & 0x3) == 0 && fp &&
30 (fp + 1) < endstack && current_level < level) {
31 fp = (unsigned long *)*fp;
32 current_level++;
33 }
34
35 if (((unsigned long)fp & 0x3) == 0 && fp &&
36 (fp + 1) < endstack)
37 ret_addr = (unsigned long *)*(fp + 1);
38 else
39 ret_addr = NULL;
40
41 return ret_addr;
42}
43
44#else
45
46extern inline void *return_address(unsigned int level)
47{
48 return NULL;
49}
50
51#endif /* CONFIG_FRAME_POINTER */
52
53#define HAVE_ARCH_CALLER_ADDR
54
55/* inline function or macro may lead to unexpected result */
56#define CALLER_ADDR0 ((unsigned long)__builtin_return_address(0))
57#define CALLER_ADDR1 ((unsigned long)return_address(1))
58#define CALLER_ADDR2 ((unsigned long)return_address(2))
59#define CALLER_ADDR3 ((unsigned long)return_address(3))
60#define CALLER_ADDR4 ((unsigned long)return_address(4))
61#define CALLER_ADDR5 ((unsigned long)return_address(5))
62#define CALLER_ADDR6 ((unsigned long)return_address(6))
63
64#endif /* __ASSEMBLY__ */
65
13#endif 66#endif
diff --git a/arch/blackfin/include/asm/gpio.h b/arch/blackfin/include/asm/gpio.h
index 539468a05057..91bd2d7b9d55 100644
--- a/arch/blackfin/include/asm/gpio.h
+++ b/arch/blackfin/include/asm/gpio.h
@@ -70,6 +70,8 @@
70 70
71#ifndef __ASSEMBLY__ 71#ifndef __ASSEMBLY__
72 72
73#include <linux/compiler.h>
74
73/*********************************************************** 75/***********************************************************
74* 76*
75* FUNCTIONS: Blackfin General Purpose Ports Access Functions 77* FUNCTIONS: Blackfin General Purpose Ports Access Functions
@@ -223,6 +225,9 @@ int bfin_gpio_direction_output(unsigned gpio, int value);
223int bfin_gpio_get_value(unsigned gpio); 225int bfin_gpio_get_value(unsigned gpio);
224void bfin_gpio_set_value(unsigned gpio, int value); 226void bfin_gpio_set_value(unsigned gpio, int value);
225 227
228#include <asm/irq.h>
229#include <asm/errno.h>
230
226#ifdef CONFIG_GPIOLIB 231#ifdef CONFIG_GPIOLIB
227#include <asm-generic/gpio.h> /* cansleep wrappers */ 232#include <asm-generic/gpio.h> /* cansleep wrappers */
228 233
@@ -247,6 +252,11 @@ static inline int gpio_cansleep(unsigned int gpio)
247 return __gpio_cansleep(gpio); 252 return __gpio_cansleep(gpio);
248} 253}
249 254
255static inline int gpio_to_irq(unsigned gpio)
256{
257 return __gpio_to_irq(gpio);
258}
259
250#else /* !CONFIG_GPIOLIB */ 260#else /* !CONFIG_GPIOLIB */
251 261
252static inline int gpio_request(unsigned gpio, const char *label) 262static inline int gpio_request(unsigned gpio, const char *label)
@@ -279,10 +289,6 @@ static inline void gpio_set_value(unsigned gpio, int value)
279 return bfin_gpio_set_value(gpio, value); 289 return bfin_gpio_set_value(gpio, value);
280} 290}
281 291
282#include <asm-generic/gpio.h> /* cansleep wrappers */
283#endif /* !CONFIG_GPIOLIB */
284#include <asm/irq.h>
285
286static inline int gpio_to_irq(unsigned gpio) 292static inline int gpio_to_irq(unsigned gpio)
287{ 293{
288 if (likely(gpio < MAX_BLACKFIN_GPIOS)) 294 if (likely(gpio < MAX_BLACKFIN_GPIOS))
@@ -291,6 +297,9 @@ static inline int gpio_to_irq(unsigned gpio)
291 return -EINVAL; 297 return -EINVAL;
292} 298}
293 299
300#include <asm-generic/gpio.h> /* cansleep wrappers */
301#endif /* !CONFIG_GPIOLIB */
302
294static inline int irq_to_gpio(unsigned irq) 303static inline int irq_to_gpio(unsigned irq)
295{ 304{
296 return (irq - GPIO_IRQ_BASE); 305 return (irq - GPIO_IRQ_BASE);
diff --git a/arch/blackfin/include/asm/irq.h b/arch/blackfin/include/asm/irq.h
index e7c0623f9091..12f4060a31b0 100644
--- a/arch/blackfin/include/asm/irq.h
+++ b/arch/blackfin/include/asm/irq.h
@@ -12,6 +12,9 @@
12 12
13#include <linux/irqflags.h> 13#include <linux/irqflags.h>
14 14
15/* IRQs that may be used by external irq_chip controllers */
16#define NR_SPARE_IRQS 32
17
15#include <mach/anomaly.h> 18#include <mach/anomaly.h>
16 19
17/* SYS_IRQS and NR_IRQS are defined in <mach-bf5xx/irq.h> */ 20/* SYS_IRQS and NR_IRQS are defined in <mach-bf5xx/irq.h> */
@@ -35,4 +38,8 @@
35 38
36#include <asm-generic/irq.h> 39#include <asm-generic/irq.h>
37 40
41#ifdef CONFIG_NMI_WATCHDOG
42# define ARCH_HAS_NMI_WATCHDOG
43#endif
44
38#endif /* _BFIN_IRQ_H_ */ 45#endif /* _BFIN_IRQ_H_ */
diff --git a/arch/blackfin/include/asm/mmu_context.h b/arch/blackfin/include/asm/mmu_context.h
index ae8ef4ffd806..7f363d7e43a5 100644
--- a/arch/blackfin/include/asm/mmu_context.h
+++ b/arch/blackfin/include/asm/mmu_context.h
@@ -13,6 +13,7 @@
13#include <asm/page.h> 13#include <asm/page.h>
14#include <asm/pgalloc.h> 14#include <asm/pgalloc.h>
15#include <asm/cplbinit.h> 15#include <asm/cplbinit.h>
16#include <asm/sections.h>
16 17
17/* Note: L1 stacks are CPU-private things, so we bluntly disable this 18/* Note: L1 stacks are CPU-private things, so we bluntly disable this
18 feature in SMP mode, and use the per-CPU scratch SRAM bank only to 19 feature in SMP mode, and use the per-CPU scratch SRAM bank only to
@@ -117,9 +118,16 @@ static inline void protect_page(struct mm_struct *mm, unsigned long addr,
117 unsigned long flags) 118 unsigned long flags)
118{ 119{
119 unsigned long *mask = mm->context.page_rwx_mask; 120 unsigned long *mask = mm->context.page_rwx_mask;
120 unsigned long page = addr >> 12; 121 unsigned long page;
121 unsigned long idx = page >> 5; 122 unsigned long idx;
122 unsigned long bit = 1 << (page & 31); 123 unsigned long bit;
124
125 if (unlikely(addr >= ASYNC_BANK0_BASE && addr < ASYNC_BANK3_BASE + ASYNC_BANK3_SIZE))
126 page = (addr - (ASYNC_BANK0_BASE - _ramend)) >> 12;
127 else
128 page = addr >> 12;
129 idx = page >> 5;
130 bit = 1 << (page & 31);
123 131
124 if (flags & VM_READ) 132 if (flags & VM_READ)
125 mask[idx] |= bit; 133 mask[idx] |= bit;
diff --git a/arch/blackfin/include/asm/nmi.h b/arch/blackfin/include/asm/nmi.h
new file mode 100644
index 000000000000..b9caac4fcfd8
--- /dev/null
+++ b/arch/blackfin/include/asm/nmi.h
@@ -0,0 +1,12 @@
1/*
2 * Copyright 2010 Analog Devices Inc.
3 *
4 * Licensed under the GPL-2
5 */
6
7#ifndef _BFIN_NMI_H_
8#define _BFIN_NMI_H_
9
10#include <linux/nmi.h>
11
12#endif
diff --git a/arch/blackfin/include/asm/page.h b/arch/blackfin/include/asm/page.h
index 1d04e4078340..d0ce975bcd48 100644
--- a/arch/blackfin/include/asm/page.h
+++ b/arch/blackfin/include/asm/page.h
@@ -15,4 +15,7 @@
15 ((current->personality & READ_IMPLIES_EXEC) ? VM_EXEC : 0 ) | \ 15 ((current->personality & READ_IMPLIES_EXEC) ? VM_EXEC : 0 ) | \
16 VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC) 16 VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
17 17
18#include <asm-generic/memory_model.h>
19#include <asm-generic/getorder.h>
20
18#endif 21#endif
diff --git a/arch/blackfin/include/asm/ptrace.h b/arch/blackfin/include/asm/ptrace.h
index b33a4488f498..aaa1c6c2bc19 100644
--- a/arch/blackfin/include/asm/ptrace.h
+++ b/arch/blackfin/include/asm/ptrace.h
@@ -24,6 +24,8 @@
24 24
25#ifndef __ASSEMBLY__ 25#ifndef __ASSEMBLY__
26 26
27struct task_struct;
28
27/* this struct defines the way the registers are stored on the 29/* this struct defines the way the registers are stored on the
28 stack during a system call. */ 30 stack during a system call. */
29 31
@@ -101,9 +103,30 @@ struct pt_regs {
101 master interrupt enable. */ 103 master interrupt enable. */
102#define user_mode(regs) (!(((regs)->ipend & ~0x10) & (((regs)->ipend & ~0x10) - 1))) 104#define user_mode(regs) (!(((regs)->ipend & ~0x10) & (((regs)->ipend & ~0x10) - 1)))
103#define instruction_pointer(regs) ((regs)->pc) 105#define instruction_pointer(regs) ((regs)->pc)
106#define user_stack_pointer(regs) ((regs)->usp)
104#define profile_pc(regs) instruction_pointer(regs) 107#define profile_pc(regs) instruction_pointer(regs)
105extern void show_regs(struct pt_regs *); 108extern void show_regs(struct pt_regs *);
106 109
110#define arch_has_single_step() (1)
111extern void user_enable_single_step(struct task_struct *child);
112extern void user_disable_single_step(struct task_struct *child);
113/* common code demands this function */
114#define ptrace_disable(child) user_disable_single_step(child)
115
116/*
117 * Get the address of the live pt_regs for the specified task.
118 * These are saved onto the top kernel stack when the process
119 * is not running.
120 *
121 * Note: if a user thread is execve'd from kernel space, the
122 * kernel stack will not be empty on entry to the kernel, so
123 * ptracing these tasks will fail.
124 */
125#define task_pt_regs(task) \
126 (struct pt_regs *) \
127 ((unsigned long)task_stack_page(task) + \
128 (THREAD_SIZE - sizeof(struct pt_regs)))
129
107#endif /* __KERNEL__ */ 130#endif /* __KERNEL__ */
108 131
109#endif /* __ASSEMBLY__ */ 132#endif /* __ASSEMBLY__ */
@@ -173,4 +196,6 @@ extern void show_regs(struct pt_regs *);
173#define PT_FDPIC_EXEC 232 196#define PT_FDPIC_EXEC 232
174#define PT_FDPIC_INTERP 236 197#define PT_FDPIC_INTERP 236
175 198
199#define PT_LAST_PSEUDO PT_FDPIC_INTERP
200
176#endif /* _BFIN_PTRACE_H */ 201#endif /* _BFIN_PTRACE_H */
diff --git a/arch/blackfin/include/asm/sections.h b/arch/blackfin/include/asm/sections.h
index 42f6c53c59c6..14a3e66d9167 100644
--- a/arch/blackfin/include/asm/sections.h
+++ b/arch/blackfin/include/asm/sections.h
@@ -21,6 +21,9 @@ extern unsigned long memory_start, memory_end, physical_mem_end;
21extern char _stext_l1[], _etext_l1[], _text_l1_lma[], __weak _text_l1_len[]; 21extern char _stext_l1[], _etext_l1[], _text_l1_lma[], __weak _text_l1_len[];
22extern char _sdata_l1[], _edata_l1[], _sbss_l1[], _ebss_l1[], 22extern char _sdata_l1[], _edata_l1[], _sbss_l1[], _ebss_l1[],
23 _data_l1_lma[], __weak _data_l1_len[]; 23 _data_l1_lma[], __weak _data_l1_len[];
24#ifdef CONFIG_ROMKERNEL
25extern char _data_lma[], _data_len[], _sinitdata[], _einitdata[], _init_data_lma[], _init_data_len[];
26#endif
24extern char _sdata_b_l1[], _edata_b_l1[], _sbss_b_l1[], _ebss_b_l1[], 27extern char _sdata_b_l1[], _edata_b_l1[], _sbss_b_l1[], _ebss_b_l1[],
25 _data_b_l1_lma[], __weak _data_b_l1_len[]; 28 _data_b_l1_lma[], __weak _data_b_l1_len[];
26extern char _stext_l2[], _etext_l2[], _sdata_l2[], _edata_l2[], 29extern char _stext_l2[], _etext_l2[], _sdata_l2[], _edata_l2[],
diff --git a/arch/blackfin/include/asm/smp.h b/arch/blackfin/include/asm/smp.h
index 6a0fe94b84a6..f5b537967116 100644
--- a/arch/blackfin/include/asm/smp.h
+++ b/arch/blackfin/include/asm/smp.h
@@ -22,8 +22,23 @@ extern char coreb_trampoline_start, coreb_trampoline_end;
22struct corelock_slot { 22struct corelock_slot {
23 int lock; 23 int lock;
24}; 24};
25extern struct corelock_slot corelock;
26
27#ifdef __ARCH_SYNC_CORE_ICACHE
28extern unsigned long icache_invld_count[NR_CPUS];
29#endif
30#ifdef __ARCH_SYNC_CORE_DCACHE
31extern unsigned long dcache_invld_count[NR_CPUS];
32#endif
25 33
26void smp_icache_flush_range_others(unsigned long start, 34void smp_icache_flush_range_others(unsigned long start,
27 unsigned long end); 35 unsigned long end);
36#ifdef CONFIG_HOTPLUG_CPU
37void coreb_sleep(u32 sic_iwr0, u32 sic_iwr1, u32 sic_iwr2);
38void cpu_die(void);
39void platform_cpu_die(void);
40int __cpu_disable(void);
41int __cpu_die(unsigned int cpu);
42#endif
28 43
29#endif /* !__ASM_BLACKFIN_SMP_H */ 44#endif /* !__ASM_BLACKFIN_SMP_H */
diff --git a/arch/blackfin/include/asm/syscall.h b/arch/blackfin/include/asm/syscall.h
new file mode 100644
index 000000000000..4921a4815cce
--- /dev/null
+++ b/arch/blackfin/include/asm/syscall.h
@@ -0,0 +1,96 @@
1/*
2 * Magic syscall break down functions
3 *
4 * Copyright 2010 Analog Devices Inc.
5 *
6 * Licensed under the GPL-2 or later.
7 */
8
9#ifndef __ASM_BLACKFIN_SYSCALL_H__
10#define __ASM_BLACKFIN_SYSCALL_H__
11
12/*
13 * Blackfin syscalls are simple:
14 * enter:
15 * p0: syscall number
16 * r{0,1,2,3,4,5}: syscall args 0,1,2,3,4,5
17 * exit:
18 * r0: return/error value
19 */
20
21#include <linux/err.h>
22#include <linux/sched.h>
23#include <asm/ptrace.h>
24
25static inline long
26syscall_get_nr(struct task_struct *task, struct pt_regs *regs)
27{
28 return regs->p0;
29}
30
31static inline void
32syscall_rollback(struct task_struct *task, struct pt_regs *regs)
33{
34 regs->p0 = regs->orig_p0;
35}
36
37static inline long
38syscall_get_error(struct task_struct *task, struct pt_regs *regs)
39{
40 return IS_ERR_VALUE(regs->r0) ? regs->r0 : 0;
41}
42
43static inline long
44syscall_get_return_value(struct task_struct *task, struct pt_regs *regs)
45{
46 return regs->r0;
47}
48
49static inline void
50syscall_set_return_value(struct task_struct *task, struct pt_regs *regs,
51 int error, long val)
52{
53 regs->r0 = error ? -error : val;
54}
55
56/**
57 * syscall_get_arguments()
58 * @task: unused
59 * @regs: the register layout to extract syscall arguments from
60 * @i: first syscall argument to extract
61 * @n: number of syscall arguments to extract
62 * @args: array to return the syscall arguments in
63 *
64 * args[0] gets i'th argument, args[n - 1] gets the i+n-1'th argument
65 */
66static inline void
67syscall_get_arguments(struct task_struct *task, struct pt_regs *regs,
68 unsigned int i, unsigned int n, unsigned long *args)
69{
70 /*
71 * Assume the ptrace layout doesn't change -- r5 is first in memory,
72 * then r4, ..., then r0. So we simply reverse the ptrace register
73 * array in memory to store into the args array.
74 */
75 long *aregs = &regs->r0 - i;
76
77 BUG_ON(i > 5 || i + n > 6);
78
79 while (n--)
80 *args++ = *aregs--;
81}
82
83/* See syscall_get_arguments() comments */
84static inline void
85syscall_set_arguments(struct task_struct *task, struct pt_regs *regs,
86 unsigned int i, unsigned int n, const unsigned long *args)
87{
88 long *aregs = &regs->r0 - i;
89
90 BUG_ON(i > 5 || i + n > 6);
91
92 while (n--)
93 *aregs-- = *args++;
94}
95
96#endif
diff --git a/arch/blackfin/include/asm/thread_info.h b/arch/blackfin/include/asm/thread_info.h
index a40d9368c38a..e9a5614cdbb1 100644
--- a/arch/blackfin/include/asm/thread_info.h
+++ b/arch/blackfin/include/asm/thread_info.h
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright 2004-2009 Analog Devices Inc. 2 * Copyright 2004-2010 Analog Devices Inc.
3 * 3 *
4 * Licensed under the GPL-2 or later. 4 * Licensed under the GPL-2 or later.
5 */ 5 */
@@ -17,7 +17,7 @@
17/* Thread Align Mask to reach to the top of the stack 17/* Thread Align Mask to reach to the top of the stack
18 * for any process 18 * for any process
19 */ 19 */
20#define ALIGN_PAGE_MASK 0xffffe000 20#define ALIGN_PAGE_MASK 0xffffe000
21 21
22/* 22/*
23 * Size of kernel stack for each process. This must be a power of 2... 23 * Size of kernel stack for each process. This must be a power of 2...
@@ -57,7 +57,7 @@ struct thread_info {
57 .exec_domain = &default_exec_domain, \ 57 .exec_domain = &default_exec_domain, \
58 .flags = 0, \ 58 .flags = 0, \
59 .cpu = 0, \ 59 .cpu = 0, \
60 .preempt_count = INIT_PREEMPT_COUNT, \ 60 .preempt_count = INIT_PREEMPT_COUNT, \
61 .restart_block = { \ 61 .restart_block = { \
62 .fn = do_no_restart_syscall, \ 62 .fn = do_no_restart_syscall, \
63 }, \ 63 }, \
@@ -73,8 +73,7 @@ __attribute_const__
73static inline struct thread_info *current_thread_info(void) 73static inline struct thread_info *current_thread_info(void)
74{ 74{
75 struct thread_info *ti; 75 struct thread_info *ti;
76 __asm__("%0 = sp;" : "=da"(ti) : 76 __asm__("%0 = sp;" : "=da"(ti));
77 );
78 return (struct thread_info *)((long)ti & ~((long)THREAD_SIZE-1)); 77 return (struct thread_info *)((long)ti & ~((long)THREAD_SIZE-1));
79} 78}
80 79
@@ -99,21 +98,23 @@ static inline struct thread_info *current_thread_info(void)
99#define TIF_NEED_RESCHED 2 /* rescheduling necessary */ 98#define TIF_NEED_RESCHED 2 /* rescheduling necessary */
100#define TIF_POLLING_NRFLAG 3 /* true if poll_idle() is polling 99#define TIF_POLLING_NRFLAG 3 /* true if poll_idle() is polling
101 TIF_NEED_RESCHED */ 100 TIF_NEED_RESCHED */
102#define TIF_MEMDIE 4 101#define TIF_MEMDIE 4
103#define TIF_RESTORE_SIGMASK 5 /* restore signal mask in do_signal() */ 102#define TIF_RESTORE_SIGMASK 5 /* restore signal mask in do_signal() */
104#define TIF_FREEZE 6 /* is freezing for suspend */ 103#define TIF_FREEZE 6 /* is freezing for suspend */
105#define TIF_IRQ_SYNC 7 /* sync pipeline stage */ 104#define TIF_IRQ_SYNC 7 /* sync pipeline stage */
106#define TIF_NOTIFY_RESUME 8 /* callback before returning to user */ 105#define TIF_NOTIFY_RESUME 8 /* callback before returning to user */
106#define TIF_SINGLESTEP 9
107 107
108/* as above, but as bit values */ 108/* as above, but as bit values */
109#define _TIF_SYSCALL_TRACE (1<<TIF_SYSCALL_TRACE) 109#define _TIF_SYSCALL_TRACE (1<<TIF_SYSCALL_TRACE)
110#define _TIF_SIGPENDING (1<<TIF_SIGPENDING) 110#define _TIF_SIGPENDING (1<<TIF_SIGPENDING)
111#define _TIF_NEED_RESCHED (1<<TIF_NEED_RESCHED) 111#define _TIF_NEED_RESCHED (1<<TIF_NEED_RESCHED)
112#define _TIF_NOTIFY_RESUME (1<<TIF_NOTIFY_RESUME)
113#define _TIF_POLLING_NRFLAG (1<<TIF_POLLING_NRFLAG) 112#define _TIF_POLLING_NRFLAG (1<<TIF_POLLING_NRFLAG)
114#define _TIF_RESTORE_SIGMASK (1<<TIF_RESTORE_SIGMASK) 113#define _TIF_RESTORE_SIGMASK (1<<TIF_RESTORE_SIGMASK)
115#define _TIF_FREEZE (1<<TIF_FREEZE) 114#define _TIF_FREEZE (1<<TIF_FREEZE)
116#define _TIF_IRQ_SYNC (1<<TIF_IRQ_SYNC) 115#define _TIF_IRQ_SYNC (1<<TIF_IRQ_SYNC)
116#define _TIF_NOTIFY_RESUME (1<<TIF_NOTIFY_RESUME)
117#define _TIF_SINGLESTEP (1<<TIF_SINGLESTEP)
117 118
118#define _TIF_WORK_MASK 0x0000FFFE /* work to do on interrupt/exception return */ 119#define _TIF_WORK_MASK 0x0000FFFE /* work to do on interrupt/exception return */
119 120
diff --git a/arch/blackfin/include/asm/time.h b/arch/blackfin/include/asm/time.h
index 589e937ed1eb..9ca7db844d10 100644
--- a/arch/blackfin/include/asm/time.h
+++ b/arch/blackfin/include/asm/time.h
@@ -23,9 +23,7 @@
23 */ 23 */
24 24
25#ifndef CONFIG_CPU_FREQ 25#ifndef CONFIG_CPU_FREQ
26#define TIME_SCALE 1 26# define TIME_SCALE 1
27#define __bfin_cycles_off (0)
28#define __bfin_cycles_mod (0)
29#else 27#else
30/* 28/*
31 * Blackfin CPU frequency scaling supports max Core Clock 1, 1/2 and 1/4 . 29 * Blackfin CPU frequency scaling supports max Core Clock 1, 1/2 and 1/4 .
@@ -33,9 +31,16 @@
33 * adjust the Core Timer Presale Register. This way we don't lose time. 31 * adjust the Core Timer Presale Register. This way we don't lose time.
34 */ 32 */
35#define TIME_SCALE 4 33#define TIME_SCALE 4
34
35# ifdef CONFIG_CYCLES_CLOCKSOURCE
36extern unsigned long long __bfin_cycles_off; 36extern unsigned long long __bfin_cycles_off;
37extern unsigned int __bfin_cycles_mod; 37extern unsigned int __bfin_cycles_mod;
38# endif
39#endif
40
41#if defined(CONFIG_TICKSOURCE_CORETMR)
42extern void bfin_coretmr_init(void);
43extern void bfin_coretmr_clockevent_init(void);
38#endif 44#endif
39 45
40extern void __init setup_core_timer(void);
41#endif 46#endif
diff --git a/arch/blackfin/kernel/Makefile b/arch/blackfin/kernel/Makefile
index a8ddbc8ed5af..346a421f1562 100644
--- a/arch/blackfin/kernel/Makefile
+++ b/arch/blackfin/kernel/Makefile
@@ -25,6 +25,7 @@ obj-$(CONFIG_CPLB_INFO) += cplbinfo.o
25obj-$(CONFIG_MODULES) += module.o 25obj-$(CONFIG_MODULES) += module.o
26obj-$(CONFIG_KGDB) += kgdb.o 26obj-$(CONFIG_KGDB) += kgdb.o
27obj-$(CONFIG_KGDB_TESTS) += kgdb_test.o 27obj-$(CONFIG_KGDB_TESTS) += kgdb_test.o
28obj-$(CONFIG_NMI_WATCHDOG) += nmi.o
28obj-$(CONFIG_EARLY_PRINTK) += early_printk.o 29obj-$(CONFIG_EARLY_PRINTK) += early_printk.o
29obj-$(CONFIG_EARLY_PRINTK) += shadow_console.o 30obj-$(CONFIG_EARLY_PRINTK) += shadow_console.o
30obj-$(CONFIG_STACKTRACE) += stacktrace.o 31obj-$(CONFIG_STACKTRACE) += stacktrace.o
diff --git a/arch/blackfin/kernel/bfin_dma_5xx.c b/arch/blackfin/kernel/bfin_dma_5xx.c
index 924c00286bab..26403d1c9e65 100644
--- a/arch/blackfin/kernel/bfin_dma_5xx.c
+++ b/arch/blackfin/kernel/bfin_dma_5xx.c
@@ -91,7 +91,7 @@ late_initcall(proc_dma_init);
91 */ 91 */
92int request_dma(unsigned int channel, const char *device_id) 92int request_dma(unsigned int channel, const char *device_id)
93{ 93{
94 pr_debug("request_dma() : BEGIN \n"); 94 pr_debug("request_dma() : BEGIN\n");
95 95
96 if (device_id == NULL) 96 if (device_id == NULL)
97 printk(KERN_WARNING "request_dma(%u): no device_id given\n", channel); 97 printk(KERN_WARNING "request_dma(%u): no device_id given\n", channel);
@@ -107,7 +107,7 @@ int request_dma(unsigned int channel, const char *device_id)
107#endif 107#endif
108 108
109 if (atomic_cmpxchg(&dma_ch[channel].chan_status, 0, 1)) { 109 if (atomic_cmpxchg(&dma_ch[channel].chan_status, 0, 1)) {
110 pr_debug("DMA CHANNEL IN USE \n"); 110 pr_debug("DMA CHANNEL IN USE\n");
111 return -EBUSY; 111 return -EBUSY;
112 } 112 }
113 113
@@ -131,7 +131,7 @@ int request_dma(unsigned int channel, const char *device_id)
131 * you have to request DMA, before doing any operations on 131 * you have to request DMA, before doing any operations on
132 * descriptor/channel 132 * descriptor/channel
133 */ 133 */
134 pr_debug("request_dma() : END \n"); 134 pr_debug("request_dma() : END\n");
135 return 0; 135 return 0;
136} 136}
137EXPORT_SYMBOL(request_dma); 137EXPORT_SYMBOL(request_dma);
@@ -171,7 +171,7 @@ static void clear_dma_buffer(unsigned int channel)
171 171
172void free_dma(unsigned int channel) 172void free_dma(unsigned int channel)
173{ 173{
174 pr_debug("freedma() : BEGIN \n"); 174 pr_debug("freedma() : BEGIN\n");
175 BUG_ON(channel >= MAX_DMA_CHANNELS || 175 BUG_ON(channel >= MAX_DMA_CHANNELS ||
176 !atomic_read(&dma_ch[channel].chan_status)); 176 !atomic_read(&dma_ch[channel].chan_status));
177 177
@@ -185,7 +185,7 @@ void free_dma(unsigned int channel)
185 /* Clear the DMA Variable in the Channel */ 185 /* Clear the DMA Variable in the Channel */
186 atomic_set(&dma_ch[channel].chan_status, 0); 186 atomic_set(&dma_ch[channel].chan_status, 0);
187 187
188 pr_debug("freedma() : END \n"); 188 pr_debug("freedma() : END\n");
189} 189}
190EXPORT_SYMBOL(free_dma); 190EXPORT_SYMBOL(free_dma);
191 191
diff --git a/arch/blackfin/kernel/bfin_gpio.c b/arch/blackfin/kernel/bfin_gpio.c
index a174596cc009..e35e20f00d9b 100644
--- a/arch/blackfin/kernel/bfin_gpio.c
+++ b/arch/blackfin/kernel/bfin_gpio.c
@@ -1289,44 +1289,50 @@ __initcall(gpio_register_proc);
1289#endif 1289#endif
1290 1290
1291#ifdef CONFIG_GPIOLIB 1291#ifdef CONFIG_GPIOLIB
1292int bfin_gpiolib_direction_input(struct gpio_chip *chip, unsigned gpio) 1292static int bfin_gpiolib_direction_input(struct gpio_chip *chip, unsigned gpio)
1293{ 1293{
1294 return bfin_gpio_direction_input(gpio); 1294 return bfin_gpio_direction_input(gpio);
1295} 1295}
1296 1296
1297int bfin_gpiolib_direction_output(struct gpio_chip *chip, unsigned gpio, int level) 1297static int bfin_gpiolib_direction_output(struct gpio_chip *chip, unsigned gpio, int level)
1298{ 1298{
1299 return bfin_gpio_direction_output(gpio, level); 1299 return bfin_gpio_direction_output(gpio, level);
1300} 1300}
1301 1301
1302int bfin_gpiolib_get_value(struct gpio_chip *chip, unsigned gpio) 1302static int bfin_gpiolib_get_value(struct gpio_chip *chip, unsigned gpio)
1303{ 1303{
1304 return bfin_gpio_get_value(gpio); 1304 return bfin_gpio_get_value(gpio);
1305} 1305}
1306 1306
1307void bfin_gpiolib_set_value(struct gpio_chip *chip, unsigned gpio, int value) 1307static void bfin_gpiolib_set_value(struct gpio_chip *chip, unsigned gpio, int value)
1308{ 1308{
1309 return bfin_gpio_set_value(gpio, value); 1309 return bfin_gpio_set_value(gpio, value);
1310} 1310}
1311 1311
1312int bfin_gpiolib_gpio_request(struct gpio_chip *chip, unsigned gpio) 1312static int bfin_gpiolib_gpio_request(struct gpio_chip *chip, unsigned gpio)
1313{ 1313{
1314 return bfin_gpio_request(gpio, chip->label); 1314 return bfin_gpio_request(gpio, chip->label);
1315} 1315}
1316 1316
1317void bfin_gpiolib_gpio_free(struct gpio_chip *chip, unsigned gpio) 1317static void bfin_gpiolib_gpio_free(struct gpio_chip *chip, unsigned gpio)
1318{ 1318{
1319 return bfin_gpio_free(gpio); 1319 return bfin_gpio_free(gpio);
1320} 1320}
1321 1321
1322static int bfin_gpiolib_gpio_to_irq(struct gpio_chip *chip, unsigned gpio)
1323{
1324 return gpio + GPIO_IRQ_BASE;
1325}
1326
1322static struct gpio_chip bfin_chip = { 1327static struct gpio_chip bfin_chip = {
1323 .label = "Blackfin-GPIOlib", 1328 .label = "BFIN-GPIO",
1324 .direction_input = bfin_gpiolib_direction_input, 1329 .direction_input = bfin_gpiolib_direction_input,
1325 .get = bfin_gpiolib_get_value, 1330 .get = bfin_gpiolib_get_value,
1326 .direction_output = bfin_gpiolib_direction_output, 1331 .direction_output = bfin_gpiolib_direction_output,
1327 .set = bfin_gpiolib_set_value, 1332 .set = bfin_gpiolib_set_value,
1328 .request = bfin_gpiolib_gpio_request, 1333 .request = bfin_gpiolib_gpio_request,
1329 .free = bfin_gpiolib_gpio_free, 1334 .free = bfin_gpiolib_gpio_free,
1335 .to_irq = bfin_gpiolib_gpio_to_irq,
1330 .base = 0, 1336 .base = 0,
1331 .ngpio = MAX_BLACKFIN_GPIOS, 1337 .ngpio = MAX_BLACKFIN_GPIOS,
1332}; 1338};
diff --git a/arch/blackfin/kernel/cplb-mpu/cplbinit.c b/arch/blackfin/kernel/cplb-mpu/cplbinit.c
index 8d42b9e50dfa..30fd6417f069 100644
--- a/arch/blackfin/kernel/cplb-mpu/cplbinit.c
+++ b/arch/blackfin/kernel/cplb-mpu/cplbinit.c
@@ -64,6 +64,15 @@ void __init generate_cplb_tables_cpu(unsigned int cpu)
64 icplb_tbl[cpu][i_i++].data = i_data | (addr == 0 ? CPLB_USER_RD : 0); 64 icplb_tbl[cpu][i_i++].data = i_data | (addr == 0 ? CPLB_USER_RD : 0);
65 } 65 }
66 66
67#ifdef CONFIG_ROMKERNEL
68 /* Cover kernel XIP flash area */
69 addr = CONFIG_ROM_BASE & ~(4 * 1024 * 1024 - 1);
70 dcplb_tbl[cpu][i_d].addr = addr;
71 dcplb_tbl[cpu][i_d++].data = d_data | CPLB_USER_RD;
72 icplb_tbl[cpu][i_i].addr = addr;
73 icplb_tbl[cpu][i_i++].data = i_data | CPLB_USER_RD;
74#endif
75
67 /* Cover L1 memory. One 4M area for code and data each is enough. */ 76 /* Cover L1 memory. One 4M area for code and data each is enough. */
68#if L1_DATA_A_LENGTH > 0 || L1_DATA_B_LENGTH > 0 77#if L1_DATA_A_LENGTH > 0 || L1_DATA_B_LENGTH > 0
69 dcplb_tbl[cpu][i_d].addr = get_l1_data_a_start_cpu(cpu); 78 dcplb_tbl[cpu][i_d].addr = get_l1_data_a_start_cpu(cpu);
diff --git a/arch/blackfin/kernel/cplb-mpu/cplbmgr.c b/arch/blackfin/kernel/cplb-mpu/cplbmgr.c
index 930c01c06813..87b25b1b30ed 100644
--- a/arch/blackfin/kernel/cplb-mpu/cplbmgr.c
+++ b/arch/blackfin/kernel/cplb-mpu/cplbmgr.c
@@ -31,6 +31,12 @@ int nr_dcplb_miss[NR_CPUS], nr_icplb_miss[NR_CPUS];
31int nr_icplb_supv_miss[NR_CPUS], nr_dcplb_prot[NR_CPUS]; 31int nr_icplb_supv_miss[NR_CPUS], nr_dcplb_prot[NR_CPUS];
32int nr_cplb_flush[NR_CPUS]; 32int nr_cplb_flush[NR_CPUS];
33 33
34#ifdef CONFIG_EXCPT_IRQ_SYSC_L1
35#define MGR_ATTR __attribute__((l1_text))
36#else
37#define MGR_ATTR
38#endif
39
34/* 40/*
35 * Given the contents of the status register, return the index of the 41 * Given the contents of the status register, return the index of the
36 * CPLB that caused the fault. 42 * CPLB that caused the fault.
@@ -59,7 +65,7 @@ static int icplb_rr_index[NR_CPUS], dcplb_rr_index[NR_CPUS];
59/* 65/*
60 * Find an ICPLB entry to be evicted and return its index. 66 * Find an ICPLB entry to be evicted and return its index.
61 */ 67 */
62static int evict_one_icplb(unsigned int cpu) 68MGR_ATTR static int evict_one_icplb(unsigned int cpu)
63{ 69{
64 int i; 70 int i;
65 for (i = first_switched_icplb; i < MAX_CPLBS; i++) 71 for (i = first_switched_icplb; i < MAX_CPLBS; i++)
@@ -74,7 +80,7 @@ static int evict_one_icplb(unsigned int cpu)
74 return i; 80 return i;
75} 81}
76 82
77static int evict_one_dcplb(unsigned int cpu) 83MGR_ATTR static int evict_one_dcplb(unsigned int cpu)
78{ 84{
79 int i; 85 int i;
80 for (i = first_switched_dcplb; i < MAX_CPLBS; i++) 86 for (i = first_switched_dcplb; i < MAX_CPLBS; i++)
@@ -89,7 +95,7 @@ static int evict_one_dcplb(unsigned int cpu)
89 return i; 95 return i;
90} 96}
91 97
92static noinline int dcplb_miss(unsigned int cpu) 98MGR_ATTR static noinline int dcplb_miss(unsigned int cpu)
93{ 99{
94 unsigned long addr = bfin_read_DCPLB_FAULT_ADDR(); 100 unsigned long addr = bfin_read_DCPLB_FAULT_ADDR();
95 int status = bfin_read_DCPLB_STATUS(); 101 int status = bfin_read_DCPLB_STATUS();
@@ -114,10 +120,15 @@ static noinline int dcplb_miss(unsigned int cpu)
114 d_data = L2_DMEMORY; 120 d_data = L2_DMEMORY;
115 } else if (addr >= physical_mem_end) { 121 } else if (addr >= physical_mem_end) {
116 if (addr >= ASYNC_BANK0_BASE && addr < ASYNC_BANK3_BASE + ASYNC_BANK3_SIZE) { 122 if (addr >= ASYNC_BANK0_BASE && addr < ASYNC_BANK3_BASE + ASYNC_BANK3_SIZE) {
117 addr &= ~(4 * 1024 * 1024 - 1); 123 mask = current_rwx_mask[cpu];
118 d_data &= ~PAGE_SIZE_4KB; 124 if (mask) {
119 d_data |= PAGE_SIZE_4MB; 125 int page = (addr - (ASYNC_BANK0_BASE - _ramend)) >> PAGE_SHIFT;
120 d_data |= CPLB_USER_RD | CPLB_USER_WR; 126 int idx = page >> 5;
127 int bit = 1 << (page & 31);
128
129 if (mask[idx] & bit)
130 d_data |= CPLB_USER_RD;
131 }
121 } else if (addr >= BOOT_ROM_START && addr < BOOT_ROM_START + BOOT_ROM_LENGTH 132 } else if (addr >= BOOT_ROM_START && addr < BOOT_ROM_START + BOOT_ROM_LENGTH
122 && (status & (FAULT_RW | FAULT_USERSUPV)) == FAULT_USERSUPV) { 133 && (status & (FAULT_RW | FAULT_USERSUPV)) == FAULT_USERSUPV) {
123 addr &= ~(1 * 1024 * 1024 - 1); 134 addr &= ~(1 * 1024 * 1024 - 1);
@@ -126,7 +137,9 @@ static noinline int dcplb_miss(unsigned int cpu)
126 } else 137 } else
127 return CPLB_PROT_VIOL; 138 return CPLB_PROT_VIOL;
128 } else if (addr >= _ramend) { 139 } else if (addr >= _ramend) {
129 d_data |= CPLB_USER_RD | CPLB_USER_WR; 140 d_data |= CPLB_USER_RD | CPLB_USER_WR;
141 if (reserved_mem_dcache_on)
142 d_data |= CPLB_L1_CHBL;
130 } else { 143 } else {
131 mask = current_rwx_mask[cpu]; 144 mask = current_rwx_mask[cpu];
132 if (mask) { 145 if (mask) {
@@ -156,7 +169,7 @@ static noinline int dcplb_miss(unsigned int cpu)
156 return 0; 169 return 0;
157} 170}
158 171
159static noinline int icplb_miss(unsigned int cpu) 172MGR_ATTR static noinline int icplb_miss(unsigned int cpu)
160{ 173{
161 unsigned long addr = bfin_read_ICPLB_FAULT_ADDR(); 174 unsigned long addr = bfin_read_ICPLB_FAULT_ADDR();
162 int status = bfin_read_ICPLB_STATUS(); 175 int status = bfin_read_ICPLB_STATUS();
@@ -204,10 +217,19 @@ static noinline int icplb_miss(unsigned int cpu)
204 i_data = L2_IMEMORY; 217 i_data = L2_IMEMORY;
205 } else if (addr >= physical_mem_end) { 218 } else if (addr >= physical_mem_end) {
206 if (addr >= ASYNC_BANK0_BASE && addr < ASYNC_BANK3_BASE + ASYNC_BANK3_SIZE) { 219 if (addr >= ASYNC_BANK0_BASE && addr < ASYNC_BANK3_BASE + ASYNC_BANK3_SIZE) {
207 addr &= ~(4 * 1024 * 1024 - 1); 220 if (!(status & FAULT_USERSUPV)) {
208 i_data &= ~PAGE_SIZE_4KB; 221 unsigned long *mask = current_rwx_mask[cpu];
209 i_data |= PAGE_SIZE_4MB; 222
210 i_data |= CPLB_USER_RD; 223 if (mask) {
224 int page = (addr - (ASYNC_BANK0_BASE - _ramend)) >> PAGE_SHIFT;
225 int idx = page >> 5;
226 int bit = 1 << (page & 31);
227
228 mask += 2 * page_mask_nelts;
229 if (mask[idx] & bit)
230 i_data |= CPLB_USER_RD;
231 }
232 }
211 } else if (addr >= BOOT_ROM_START && addr < BOOT_ROM_START + BOOT_ROM_LENGTH 233 } else if (addr >= BOOT_ROM_START && addr < BOOT_ROM_START + BOOT_ROM_LENGTH
212 && (status & FAULT_USERSUPV)) { 234 && (status & FAULT_USERSUPV)) {
213 addr &= ~(1 * 1024 * 1024 - 1); 235 addr &= ~(1 * 1024 * 1024 - 1);
@@ -217,6 +239,8 @@ static noinline int icplb_miss(unsigned int cpu)
217 return CPLB_PROT_VIOL; 239 return CPLB_PROT_VIOL;
218 } else if (addr >= _ramend) { 240 } else if (addr >= _ramend) {
219 i_data |= CPLB_USER_RD; 241 i_data |= CPLB_USER_RD;
242 if (reserved_mem_icache_on)
243 i_data |= CPLB_L1_CHBL;
220 } else { 244 } else {
221 /* 245 /*
222 * Two cases to distinguish - a supervisor access must 246 * Two cases to distinguish - a supervisor access must
@@ -251,7 +275,7 @@ static noinline int icplb_miss(unsigned int cpu)
251 return 0; 275 return 0;
252} 276}
253 277
254static noinline int dcplb_protection_fault(unsigned int cpu) 278MGR_ATTR static noinline int dcplb_protection_fault(unsigned int cpu)
255{ 279{
256 int status = bfin_read_DCPLB_STATUS(); 280 int status = bfin_read_DCPLB_STATUS();
257 281
@@ -271,7 +295,7 @@ static noinline int dcplb_protection_fault(unsigned int cpu)
271 return CPLB_PROT_VIOL; 295 return CPLB_PROT_VIOL;
272} 296}
273 297
274int cplb_hdr(int seqstat, struct pt_regs *regs) 298MGR_ATTR int cplb_hdr(int seqstat, struct pt_regs *regs)
275{ 299{
276 int cause = seqstat & 0x3f; 300 int cause = seqstat & 0x3f;
277 unsigned int cpu = raw_smp_processor_id(); 301 unsigned int cpu = raw_smp_processor_id();
diff --git a/arch/blackfin/kernel/cplb-nompu/cplbinit.c b/arch/blackfin/kernel/cplb-nompu/cplbinit.c
index 282a7919821b..bfe75af4e8bd 100644
--- a/arch/blackfin/kernel/cplb-nompu/cplbinit.c
+++ b/arch/blackfin/kernel/cplb-nompu/cplbinit.c
@@ -56,6 +56,15 @@ void __init generate_cplb_tables_cpu(unsigned int cpu)
56 i_tbl[i_i++].data = SDRAM_IGENERIC | PAGE_SIZE_4MB; 56 i_tbl[i_i++].data = SDRAM_IGENERIC | PAGE_SIZE_4MB;
57 } 57 }
58 58
59#ifdef CONFIG_ROMKERNEL
60 /* Cover kernel XIP flash area */
61 addr = CONFIG_ROM_BASE & ~(4 * 1024 * 1024 - 1);
62 d_tbl[i_d].addr = addr;
63 d_tbl[i_d++].data = SDRAM_DGENERIC | PAGE_SIZE_4MB;
64 i_tbl[i_i].addr = addr;
65 i_tbl[i_i++].data = SDRAM_IGENERIC | PAGE_SIZE_4MB;
66#endif
67
59 /* Cover L1 memory. One 4M area for code and data each is enough. */ 68 /* Cover L1 memory. One 4M area for code and data each is enough. */
60 if (cpu == 0) { 69 if (cpu == 0) {
61 if (L1_DATA_A_LENGTH || L1_DATA_B_LENGTH) { 70 if (L1_DATA_A_LENGTH || L1_DATA_B_LENGTH) {
diff --git a/arch/blackfin/kernel/dma-mapping.c b/arch/blackfin/kernel/dma-mapping.c
index e937f323d82c..04ddcfeb7981 100644
--- a/arch/blackfin/kernel/dma-mapping.c
+++ b/arch/blackfin/kernel/dma-mapping.c
@@ -116,7 +116,7 @@ EXPORT_SYMBOL(dma_free_coherent);
116void __dma_sync(dma_addr_t addr, size_t size, 116void __dma_sync(dma_addr_t addr, size_t size,
117 enum dma_data_direction dir) 117 enum dma_data_direction dir)
118{ 118{
119 _dma_sync(addr, size, dir); 119 __dma_sync_inline(addr, size, dir);
120} 120}
121EXPORT_SYMBOL(__dma_sync); 121EXPORT_SYMBOL(__dma_sync);
122 122
diff --git a/arch/blackfin/kernel/entry.S b/arch/blackfin/kernel/entry.S
index f27dc2292e1b..686478f5f66b 100644
--- a/arch/blackfin/kernel/entry.S
+++ b/arch/blackfin/kernel/entry.S
@@ -44,7 +44,7 @@ ENTRY(_ret_from_fork)
44 sti r4; 44 sti r4;
45#endif /* CONFIG_IPIPE */ 45#endif /* CONFIG_IPIPE */
46 SP += -12; 46 SP += -12;
47 call _schedule_tail; 47 pseudo_long_call _schedule_tail, p5;
48 SP += 12; 48 SP += 12;
49 r0 = [sp + PT_IPEND]; 49 r0 = [sp + PT_IPEND];
50 cc = bittst(r0,1); 50 cc = bittst(r0,1);
@@ -79,7 +79,7 @@ ENTRY(_sys_vfork)
79 r0 += 24; 79 r0 += 24;
80 [--sp] = rets; 80 [--sp] = rets;
81 SP += -12; 81 SP += -12;
82 call _bfin_vfork; 82 pseudo_long_call _bfin_vfork, p2;
83 SP += 12; 83 SP += 12;
84 rets = [sp++]; 84 rets = [sp++];
85 rts; 85 rts;
@@ -90,7 +90,7 @@ ENTRY(_sys_clone)
90 r0 += 24; 90 r0 += 24;
91 [--sp] = rets; 91 [--sp] = rets;
92 SP += -12; 92 SP += -12;
93 call _bfin_clone; 93 pseudo_long_call _bfin_clone, p2;
94 SP += 12; 94 SP += 12;
95 rets = [sp++]; 95 rets = [sp++];
96 rts; 96 rts;
@@ -101,7 +101,7 @@ ENTRY(_sys_rt_sigreturn)
101 r0 += 24; 101 r0 += 24;
102 [--sp] = rets; 102 [--sp] = rets;
103 SP += -12; 103 SP += -12;
104 call _do_rt_sigreturn; 104 pseudo_long_call _do_rt_sigreturn, p2;
105 SP += 12; 105 SP += 12;
106 rets = [sp++]; 106 rets = [sp++];
107 rts; 107 rts;
diff --git a/arch/blackfin/kernel/ftrace-entry.S b/arch/blackfin/kernel/ftrace-entry.S
index 76dd4fbcd17a..d66446b572c0 100644
--- a/arch/blackfin/kernel/ftrace-entry.S
+++ b/arch/blackfin/kernel/ftrace-entry.S
@@ -1,7 +1,7 @@
1/* 1/*
2 * mcount and friends -- ftrace stuff 2 * mcount and friends -- ftrace stuff
3 * 3 *
4 * Copyright (C) 2009 Analog Devices Inc. 4 * Copyright (C) 2009-2010 Analog Devices Inc.
5 * Licensed under the GPL-2 or later. 5 * Licensed under the GPL-2 or later.
6 */ 6 */
7 7
@@ -21,6 +21,15 @@
21 * function will be waiting there. mmmm pie. 21 * function will be waiting there. mmmm pie.
22 */ 22 */
23ENTRY(__mcount) 23ENTRY(__mcount)
24#ifdef CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST
25 /* optional micro optimization: return if stopped */
26 p1.l = _function_trace_stop;
27 p1.h = _function_trace_stop;
28 r3 = [p1];
29 cc = r3 == 0;
30 if ! cc jump _ftrace_stub (bp);
31#endif
32
24 /* save third function arg early so we can do testing below */ 33 /* save third function arg early so we can do testing below */
25 [--sp] = r2; 34 [--sp] = r2;
26 35
@@ -106,9 +115,12 @@ ENTRY(_ftrace_graph_caller)
106 [--sp] = r1; 115 [--sp] = r1;
107 [--sp] = rets; 116 [--sp] = rets;
108 117
109 /* prepare_ftrace_return(unsigned long *parent, unsigned long self_addr) */ 118 /* prepare_ftrace_return(parent, self_addr, frame_pointer) */
110 r0 = sp; 119 r0 = sp; /* unsigned long *parent */
111 r1 = rets; 120 r1 = rets; /* unsigned long self_addr */
121#ifdef CONFIG_HAVE_FUNCTION_GRAPH_FP_TEST
122 r2 = fp; /* unsigned long frame_pointer */
123#endif
112 r0 += 16; /* skip the 4 local regs on stack */ 124 r0 += 16; /* skip the 4 local regs on stack */
113 r1 += -MCOUNT_INSN_SIZE; 125 r1 += -MCOUNT_INSN_SIZE;
114 call _prepare_ftrace_return; 126 call _prepare_ftrace_return;
@@ -127,6 +139,9 @@ ENTRY(_return_to_handler)
127 [--sp] = r1; 139 [--sp] = r1;
128 140
129 /* get original return address */ 141 /* get original return address */
142#ifdef CONFIG_HAVE_FUNCTION_GRAPH_FP_TEST
143 r0 = fp; /* Blackfin is sane, so omit this */
144#endif
130 call _ftrace_return_to_handler; 145 call _ftrace_return_to_handler;
131 rets = r0; 146 rets = r0;
132 147
diff --git a/arch/blackfin/kernel/ftrace.c b/arch/blackfin/kernel/ftrace.c
index f2c85ac6f2da..a61d948ea925 100644
--- a/arch/blackfin/kernel/ftrace.c
+++ b/arch/blackfin/kernel/ftrace.c
@@ -16,7 +16,8 @@
16 * Hook the return address and push it in the stack of return addrs 16 * Hook the return address and push it in the stack of return addrs
17 * in current thread info. 17 * in current thread info.
18 */ 18 */
19void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr) 19void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr,
20 unsigned long frame_pointer)
20{ 21{
21 struct ftrace_graph_ent trace; 22 struct ftrace_graph_ent trace;
22 unsigned long return_hooker = (unsigned long)&return_to_handler; 23 unsigned long return_hooker = (unsigned long)&return_to_handler;
@@ -24,7 +25,8 @@ void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr)
24 if (unlikely(atomic_read(&current->tracing_graph_pause))) 25 if (unlikely(atomic_read(&current->tracing_graph_pause)))
25 return; 26 return;
26 27
27 if (ftrace_push_return_trace(*parent, self_addr, &trace.depth, 0) == -EBUSY) 28 if (ftrace_push_return_trace(*parent, self_addr, &trace.depth,
29 frame_pointer) == -EBUSY)
28 return; 30 return;
29 31
30 trace.func = self_addr; 32 trace.func = self_addr;
diff --git a/arch/blackfin/kernel/init_task.c b/arch/blackfin/kernel/init_task.c
index 118c5b9dedac..d3970e8acd1a 100644
--- a/arch/blackfin/kernel/init_task.c
+++ b/arch/blackfin/kernel/init_task.c
@@ -28,5 +28,5 @@ EXPORT_SYMBOL(init_task);
28 * "init_task" linker map entry. 28 * "init_task" linker map entry.
29 */ 29 */
30union thread_union init_thread_union 30union thread_union init_thread_union
31 __attribute__ ((__section__(".init_task.data"))) = { 31 __init_task_data = {
32INIT_THREAD_INFO(init_task)}; 32INIT_THREAD_INFO(init_task)};
diff --git a/arch/blackfin/kernel/kgdb.c b/arch/blackfin/kernel/kgdb.c
index 34c7c3ed2c9c..2c501ceb1e55 100644
--- a/arch/blackfin/kernel/kgdb.c
+++ b/arch/blackfin/kernel/kgdb.c
@@ -145,7 +145,7 @@ void gdb_regs_to_pt_regs(unsigned long *gdb_regs, struct pt_regs *regs)
145#endif 145#endif
146} 146}
147 147
148struct hw_breakpoint { 148static struct hw_breakpoint {
149 unsigned int occupied:1; 149 unsigned int occupied:1;
150 unsigned int skip:1; 150 unsigned int skip:1;
151 unsigned int enabled:1; 151 unsigned int enabled:1;
@@ -155,7 +155,7 @@ struct hw_breakpoint {
155 unsigned int addr; 155 unsigned int addr;
156} breakinfo[HW_WATCHPOINT_NUM]; 156} breakinfo[HW_WATCHPOINT_NUM];
157 157
158int bfin_set_hw_break(unsigned long addr, int len, enum kgdb_bptype type) 158static int bfin_set_hw_break(unsigned long addr, int len, enum kgdb_bptype type)
159{ 159{
160 int breakno; 160 int breakno;
161 int bfin_type; 161 int bfin_type;
@@ -202,7 +202,7 @@ int bfin_set_hw_break(unsigned long addr, int len, enum kgdb_bptype type)
202 return -ENOSPC; 202 return -ENOSPC;
203} 203}
204 204
205int bfin_remove_hw_break(unsigned long addr, int len, enum kgdb_bptype type) 205static int bfin_remove_hw_break(unsigned long addr, int len, enum kgdb_bptype type)
206{ 206{
207 int breakno; 207 int breakno;
208 int bfin_type; 208 int bfin_type;
@@ -230,7 +230,7 @@ int bfin_remove_hw_break(unsigned long addr, int len, enum kgdb_bptype type)
230 return 0; 230 return 0;
231} 231}
232 232
233void bfin_remove_all_hw_break(void) 233static void bfin_remove_all_hw_break(void)
234{ 234{
235 int breakno; 235 int breakno;
236 236
@@ -242,7 +242,7 @@ void bfin_remove_all_hw_break(void)
242 breakinfo[breakno].type = TYPE_DATA_WATCHPOINT; 242 breakinfo[breakno].type = TYPE_DATA_WATCHPOINT;
243} 243}
244 244
245void bfin_correct_hw_break(void) 245static void bfin_correct_hw_break(void)
246{ 246{
247 int breakno; 247 int breakno;
248 unsigned int wpiactl = 0; 248 unsigned int wpiactl = 0;
diff --git a/arch/blackfin/kernel/nmi.c b/arch/blackfin/kernel/nmi.c
new file mode 100644
index 000000000000..0b5f72f17fd0
--- /dev/null
+++ b/arch/blackfin/kernel/nmi.c
@@ -0,0 +1,299 @@
1/*
2 * Blackfin nmi_watchdog Driver
3 *
4 * Originally based on bfin_wdt.c
5 * Copyright 2010-2010 Analog Devices Inc.
6 * Graff Yang <graf.yang@analog.com>
7 *
8 * Enter bugs at http://blackfin.uclinux.org/
9 *
10 * Licensed under the GPL-2 or later.
11 */
12
13#include <linux/bitops.h>
14#include <linux/hardirq.h>
15#include <linux/sysdev.h>
16#include <linux/pm.h>
17#include <linux/nmi.h>
18#include <linux/smp.h>
19#include <linux/timer.h>
20#include <asm/blackfin.h>
21#include <asm/atomic.h>
22#include <asm/cacheflush.h>
23#include <asm/bfin_watchdog.h>
24
25#define DRV_NAME "nmi-wdt"
26
27#define NMI_WDT_TIMEOUT 5 /* 5 seconds */
28#define NMI_CHECK_TIMEOUT (4 * HZ) /* 4 seconds in jiffies */
29static int nmi_wdt_cpu = 1;
30
31static unsigned int timeout = NMI_WDT_TIMEOUT;
32static int nmi_active;
33
34static unsigned short wdoga_ctl;
35static unsigned int wdoga_cnt;
36static struct corelock_slot saved_corelock;
37static atomic_t nmi_touched[NR_CPUS];
38static struct timer_list ntimer;
39
40enum {
41 COREA_ENTER_NMI = 0,
42 COREA_EXIT_NMI,
43 COREB_EXIT_NMI,
44
45 NMI_EVENT_NR,
46};
47static unsigned long nmi_event __attribute__ ((__section__(".l2.bss")));
48
49/* we are in nmi, non-atomic bit ops is safe */
50static inline void set_nmi_event(int event)
51{
52 __set_bit(event, &nmi_event);
53}
54
55static inline void wait_nmi_event(int event)
56{
57 while (!test_bit(event, &nmi_event))
58 barrier();
59 __clear_bit(event, &nmi_event);
60}
61
62static inline void send_corea_nmi(void)
63{
64 wdoga_ctl = bfin_read_WDOGA_CTL();
65 wdoga_cnt = bfin_read_WDOGA_CNT();
66
67 bfin_write_WDOGA_CTL(WDEN_DISABLE);
68 bfin_write_WDOGA_CNT(0);
69 bfin_write_WDOGA_CTL(WDEN_ENABLE | ICTL_NMI);
70}
71
72static inline void restore_corea_nmi(void)
73{
74 bfin_write_WDOGA_CTL(WDEN_DISABLE);
75 bfin_write_WDOGA_CTL(WDOG_EXPIRED | WDEN_DISABLE | ICTL_NONE);
76
77 bfin_write_WDOGA_CNT(wdoga_cnt);
78 bfin_write_WDOGA_CTL(wdoga_ctl);
79}
80
81static inline void save_corelock(void)
82{
83 saved_corelock = corelock;
84 corelock.lock = 0;
85}
86
87static inline void restore_corelock(void)
88{
89 corelock = saved_corelock;
90}
91
92
93static inline void nmi_wdt_keepalive(void)
94{
95 bfin_write_WDOGB_STAT(0);
96}
97
98static inline void nmi_wdt_stop(void)
99{
100 bfin_write_WDOGB_CTL(WDEN_DISABLE);
101}
102
103/* before calling this function, you must stop the WDT */
104static inline void nmi_wdt_clear(void)
105{
106 /* clear TRO bit, disable event generation */
107 bfin_write_WDOGB_CTL(WDOG_EXPIRED | WDEN_DISABLE | ICTL_NONE);
108}
109
110static inline void nmi_wdt_start(void)
111{
112 bfin_write_WDOGB_CTL(WDEN_ENABLE | ICTL_NMI);
113}
114
115static inline int nmi_wdt_running(void)
116{
117 return ((bfin_read_WDOGB_CTL() & WDEN_MASK) != WDEN_DISABLE);
118}
119
120static inline int nmi_wdt_set_timeout(unsigned long t)
121{
122 u32 cnt, max_t, sclk;
123 int run;
124
125 sclk = get_sclk();
126 max_t = -1 / sclk;
127 cnt = t * sclk;
128 if (t > max_t) {
129 pr_warning("NMI: timeout value is too large\n");
130 return -EINVAL;
131 }
132
133 run = nmi_wdt_running();
134 nmi_wdt_stop();
135 bfin_write_WDOGB_CNT(cnt);
136 if (run)
137 nmi_wdt_start();
138
139 timeout = t;
140
141 return 0;
142}
143
144int check_nmi_wdt_touched(void)
145{
146 unsigned int this_cpu = smp_processor_id();
147 unsigned int cpu;
148
149 cpumask_t mask = cpu_online_map;
150
151 if (!atomic_read(&nmi_touched[this_cpu]))
152 return 0;
153
154 atomic_set(&nmi_touched[this_cpu], 0);
155
156 cpu_clear(this_cpu, mask);
157 for_each_cpu_mask(cpu, mask) {
158 invalidate_dcache_range((unsigned long)(&nmi_touched[cpu]),
159 (unsigned long)(&nmi_touched[cpu]));
160 if (!atomic_read(&nmi_touched[cpu]))
161 return 0;
162 atomic_set(&nmi_touched[cpu], 0);
163 }
164
165 return 1;
166}
167
168static void nmi_wdt_timer(unsigned long data)
169{
170 if (check_nmi_wdt_touched())
171 nmi_wdt_keepalive();
172
173 mod_timer(&ntimer, jiffies + NMI_CHECK_TIMEOUT);
174}
175
176static int __init init_nmi_wdt(void)
177{
178 nmi_wdt_set_timeout(timeout);
179 nmi_wdt_start();
180 nmi_active = true;
181
182 init_timer(&ntimer);
183 ntimer.function = nmi_wdt_timer;
184 ntimer.expires = jiffies + NMI_CHECK_TIMEOUT;
185 add_timer(&ntimer);
186
187 pr_info("nmi_wdt: initialized: timeout=%d sec\n", timeout);
188 return 0;
189}
190device_initcall(init_nmi_wdt);
191
192void touch_nmi_watchdog(void)
193{
194 atomic_set(&nmi_touched[smp_processor_id()], 1);
195}
196
197/* Suspend/resume support */
198#ifdef CONFIG_PM
199static int nmi_wdt_suspend(struct sys_device *dev, pm_message_t state)
200{
201 nmi_wdt_stop();
202 return 0;
203}
204
205static int nmi_wdt_resume(struct sys_device *dev)
206{
207 if (nmi_active)
208 nmi_wdt_start();
209 return 0;
210}
211
212static struct sysdev_class nmi_sysclass = {
213 .name = DRV_NAME,
214 .resume = nmi_wdt_resume,
215 .suspend = nmi_wdt_suspend,
216};
217
218static struct sys_device device_nmi_wdt = {
219 .id = 0,
220 .cls = &nmi_sysclass,
221};
222
223static int __init init_nmi_wdt_sysfs(void)
224{
225 int error;
226
227 if (!nmi_active)
228 return 0;
229
230 error = sysdev_class_register(&nmi_sysclass);
231 if (!error)
232 error = sysdev_register(&device_nmi_wdt);
233 return error;
234}
235late_initcall(init_nmi_wdt_sysfs);
236
237#endif /* CONFIG_PM */
238
239
240asmlinkage notrace void do_nmi(struct pt_regs *fp)
241{
242 unsigned int cpu = smp_processor_id();
243 nmi_enter();
244
245 cpu_pda[cpu].__nmi_count += 1;
246
247 if (cpu == nmi_wdt_cpu) {
248 /* CoreB goes here first */
249
250 /* reload the WDOG_STAT */
251 nmi_wdt_keepalive();
252
253 /* clear nmi interrupt for CoreB */
254 nmi_wdt_stop();
255 nmi_wdt_clear();
256
257 /* trigger NMI interrupt of CoreA */
258 send_corea_nmi();
259
260 /* waiting CoreB to enter NMI */
261 wait_nmi_event(COREA_ENTER_NMI);
262
263 /* recover WDOGA's settings */
264 restore_corea_nmi();
265
266 save_corelock();
267
268 /* corelock is save/cleared, CoreA is dummping messages */
269
270 wait_nmi_event(COREA_EXIT_NMI);
271 } else {
272 /* OK, CoreA entered NMI */
273 set_nmi_event(COREA_ENTER_NMI);
274 }
275
276 pr_emerg("\nNMI Watchdog detected LOCKUP, dump for CPU %d\n", cpu);
277 dump_bfin_process(fp);
278 dump_bfin_mem(fp);
279 show_regs(fp);
280 dump_bfin_trace_buffer();
281 show_stack(current, (unsigned long *)fp);
282
283 if (cpu == nmi_wdt_cpu) {
284 pr_emerg("This fault is not recoverable, sorry!\n");
285
286 /* CoreA dump finished, restore the corelock */
287 restore_corelock();
288
289 set_nmi_event(COREB_EXIT_NMI);
290 } else {
291 /* CoreB dump finished, notice the CoreA we are done */
292 set_nmi_event(COREA_EXIT_NMI);
293
294 /* synchronize with CoreA */
295 wait_nmi_event(COREB_EXIT_NMI);
296 }
297
298 nmi_exit();
299}
diff --git a/arch/blackfin/kernel/process.c b/arch/blackfin/kernel/process.c
index b56b0e485e0b..29705cec91de 100644
--- a/arch/blackfin/kernel/process.c
+++ b/arch/blackfin/kernel/process.c
@@ -98,13 +98,6 @@ void cpu_idle(void)
98 } 98 }
99} 99}
100 100
101/* Fill in the fpu structure for a core dump. */
102
103int dump_fpu(struct pt_regs *regs, elf_fpregset_t * fpregs)
104{
105 return 1;
106}
107
108/* 101/*
109 * This gets run with P1 containing the 102 * This gets run with P1 containing the
110 * function to call, and R1 containing 103 * function to call, and R1 containing
diff --git a/arch/blackfin/kernel/ptrace.c b/arch/blackfin/kernel/ptrace.c
index 65567dc4b9f5..43eb969405d1 100644
--- a/arch/blackfin/kernel/ptrace.c
+++ b/arch/blackfin/kernel/ptrace.c
@@ -1,6 +1,6 @@
1/* 1/*
2 * linux/kernel/ptrace.c is by Ross Biro 1/23/92, edited by Linus Torvalds 2 * linux/kernel/ptrace.c is by Ross Biro 1/23/92, edited by Linus Torvalds
3 * these modifications are Copyright 2004-2009 Analog Devices Inc. 3 * these modifications are Copyright 2004-2010 Analog Devices Inc.
4 * 4 *
5 * Licensed under the GPL-2 5 * Licensed under the GPL-2
6 */ 6 */
@@ -9,10 +9,13 @@
9#include <linux/sched.h> 9#include <linux/sched.h>
10#include <linux/mm.h> 10#include <linux/mm.h>
11#include <linux/smp.h> 11#include <linux/smp.h>
12#include <linux/elf.h>
12#include <linux/errno.h> 13#include <linux/errno.h>
13#include <linux/ptrace.h> 14#include <linux/ptrace.h>
14#include <linux/user.h> 15#include <linux/user.h>
16#include <linux/regset.h>
15#include <linux/signal.h> 17#include <linux/signal.h>
18#include <linux/tracehook.h>
16#include <linux/uaccess.h> 19#include <linux/uaccess.h>
17 20
18#include <asm/page.h> 21#include <asm/page.h>
@@ -25,90 +28,57 @@
25#include <asm/cacheflush.h> 28#include <asm/cacheflush.h>
26#include <asm/mem_map.h> 29#include <asm/mem_map.h>
27 30
28#define TEXT_OFFSET 0
29/* 31/*
30 * does not yet catch signals sent when the child dies. 32 * does not yet catch signals sent when the child dies.
31 * in exit.c or in signal.c. 33 * in exit.c or in signal.c.
32 */ 34 */
33 35
34/* determines which bits in the SYSCFG reg the user has access to. */
35/* 1 = access 0 = no access */
36#define SYSCFG_MASK 0x0007 /* SYSCFG reg */
37/* sets the trace bits. */
38#define TRACE_BITS 0x0001
39
40/* Find the stack offset for a register, relative to thread.esp0. */
41#define PT_REG(reg) ((long)&((struct pt_regs *)0)->reg)
42
43/*
44 * Get the address of the live pt_regs for the specified task.
45 * These are saved onto the top kernel stack when the process
46 * is not running.
47 *
48 * Note: if a user thread is execve'd from kernel space, the
49 * kernel stack will not be empty on entry to the kernel, so
50 * ptracing these tasks will fail.
51 */
52static inline struct pt_regs *get_user_regs(struct task_struct *task)
53{
54 return (struct pt_regs *)
55 ((unsigned long)task_stack_page(task) +
56 (THREAD_SIZE - sizeof(struct pt_regs)));
57}
58
59/*
60 * Get all user integer registers.
61 */
62static inline int ptrace_getregs(struct task_struct *tsk, void __user *uregs)
63{
64 struct pt_regs regs;
65 memcpy(&regs, get_user_regs(tsk), sizeof(regs));
66 regs.usp = tsk->thread.usp;
67 return copy_to_user(uregs, &regs, sizeof(struct pt_regs)) ? -EFAULT : 0;
68}
69
70/* Mapping from PT_xxx to the stack offset at which the register is
71 * saved. Notice that usp has no stack-slot and needs to be treated
72 * specially (see get_reg/put_reg below).
73 */
74
75/* 36/*
76 * Get contents of register REGNO in task TASK. 37 * Get contents of register REGNO in task TASK.
77 */ 38 */
78static inline long get_reg(struct task_struct *task, int regno) 39static inline long
40get_reg(struct task_struct *task, long regno, unsigned long __user *datap)
79{ 41{
80 unsigned char *reg_ptr; 42 long tmp;
43 struct pt_regs *regs = task_pt_regs(task);
81 44
82 struct pt_regs *regs = 45 if (regno & 3 || regno > PT_LAST_PSEUDO || regno < 0)
83 (struct pt_regs *)((unsigned long)task_stack_page(task) + 46 return -EIO;
84 (THREAD_SIZE - sizeof(struct pt_regs)));
85 reg_ptr = (char *)regs;
86 47
87 switch (regno) { 48 switch (regno) {
49 case PT_TEXT_ADDR:
50 tmp = task->mm->start_code;
51 break;
52 case PT_TEXT_END_ADDR:
53 tmp = task->mm->end_code;
54 break;
55 case PT_DATA_ADDR:
56 tmp = task->mm->start_data;
57 break;
88 case PT_USP: 58 case PT_USP:
89 return task->thread.usp; 59 tmp = task->thread.usp;
60 break;
90 default: 61 default:
91 if (regno <= 216) 62 if (regno < sizeof(*regs)) {
92 return *(long *)(reg_ptr + regno); 63 void *reg_ptr = regs;
64 tmp = *(long *)(reg_ptr + regno);
65 } else
66 return -EIO;
93 } 67 }
94 /* slight mystery ... never seems to come here but kernel misbehaves without this code! */
95 68
96 printk(KERN_WARNING "Request to get for unknown register %d\n", regno); 69 return put_user(tmp, datap);
97 return 0;
98} 70}
99 71
100/* 72/*
101 * Write contents of register REGNO in task TASK. 73 * Write contents of register REGNO in task TASK.
102 */ 74 */
103static inline int 75static inline int
104put_reg(struct task_struct *task, int regno, unsigned long data) 76put_reg(struct task_struct *task, long regno, unsigned long data)
105{ 77{
106 char *reg_ptr; 78 struct pt_regs *regs = task_pt_regs(task);
107 79
108 struct pt_regs *regs = 80 if (regno & 3 || regno > PT_LAST_PSEUDO || regno < 0)
109 (struct pt_regs *)((unsigned long)task_stack_page(task) + 81 return -EIO;
110 (THREAD_SIZE - sizeof(struct pt_regs)));
111 reg_ptr = (char *)regs;
112 82
113 switch (regno) { 83 switch (regno) {
114 case PT_PC: 84 case PT_PC:
@@ -125,10 +95,18 @@ put_reg(struct task_struct *task, int regno, unsigned long data)
125 regs->usp = data; 95 regs->usp = data;
126 task->thread.usp = data; 96 task->thread.usp = data;
127 break; 97 break;
98 case PT_SYSCFG: /* don't let userspace screw with this */
99 if ((data & ~1) != 0x6)
100 pr_warning("ptrace: ignore syscfg write of %#lx\n", data);
101 break; /* regs->syscfg = data; break; */
128 default: 102 default:
129 if (regno <= 216) 103 if (regno < sizeof(*regs)) {
130 *(long *)(reg_ptr + regno) = data; 104 void *reg_offset = regs;
105 *(long *)(reg_offset + regno) = data;
106 }
107 /* Ignore writes to pseudo registers */
131 } 108 }
109
132 return 0; 110 return 0;
133} 111}
134 112
@@ -160,24 +138,98 @@ static inline int is_user_addr_valid(struct task_struct *child,
160 return -EIO; 138 return -EIO;
161} 139}
162 140
163void ptrace_enable(struct task_struct *child) 141/*
142 * retrieve the contents of Blackfin userspace general registers
143 */
144static int genregs_get(struct task_struct *target,
145 const struct user_regset *regset,
146 unsigned int pos, unsigned int count,
147 void *kbuf, void __user *ubuf)
164{ 148{
165 unsigned long tmp; 149 struct pt_regs *regs = task_pt_regs(target);
166 tmp = get_reg(child, PT_SYSCFG) | (TRACE_BITS); 150 int ret;
167 put_reg(child, PT_SYSCFG, tmp); 151
152 /* This sucks ... */
153 regs->usp = target->thread.usp;
154
155 ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
156 regs, 0, sizeof(*regs));
157 if (ret < 0)
158 return ret;
159
160 return user_regset_copyout_zero(&pos, &count, &kbuf, &ubuf,
161 sizeof(*regs), -1);
168} 162}
169 163
170/* 164/*
171 * Called by kernel/ptrace.c when detaching.. 165 * update the contents of the Blackfin userspace general registers
172 * 166 */
173 * Make sure the single step bit is not set. 167static int genregs_set(struct task_struct *target,
168 const struct user_regset *regset,
169 unsigned int pos, unsigned int count,
170 const void *kbuf, const void __user *ubuf)
171{
172 struct pt_regs *regs = task_pt_regs(target);
173 int ret;
174
175 /* Don't let people set SYSCFG (it's at the end of pt_regs) */
176 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
177 regs, 0, PT_SYSCFG);
178 if (ret < 0)
179 return ret;
180
181 /* This sucks ... */
182 target->thread.usp = regs->usp;
183 /* regs->retx = regs->pc; */
184
185 return user_regset_copyin_ignore(&pos, &count, &kbuf, &ubuf,
186 PT_SYSCFG, -1);
187}
188
189/*
190 * Define the register sets available on the Blackfin under Linux
174 */ 191 */
175void ptrace_disable(struct task_struct *child) 192enum bfin_regset {
193 REGSET_GENERAL,
194};
195
196static const struct user_regset bfin_regsets[] = {
197 [REGSET_GENERAL] = {
198 .core_note_type = NT_PRSTATUS,
199 .n = sizeof(struct pt_regs) / sizeof(long),
200 .size = sizeof(long),
201 .align = sizeof(long),
202 .get = genregs_get,
203 .set = genregs_set,
204 },
205};
206
207static const struct user_regset_view user_bfin_native_view = {
208 .name = "Blackfin",
209 .e_machine = EM_BLACKFIN,
210 .regsets = bfin_regsets,
211 .n = ARRAY_SIZE(bfin_regsets),
212};
213
214const struct user_regset_view *task_user_regset_view(struct task_struct *task)
215{
216 return &user_bfin_native_view;
217}
218
219void user_enable_single_step(struct task_struct *child)
220{
221 struct pt_regs *regs = task_pt_regs(child);
222 regs->syscfg |= SYSCFG_SSSTEP;
223
224 set_tsk_thread_flag(child, TIF_SINGLESTEP);
225}
226
227void user_disable_single_step(struct task_struct *child)
176{ 228{
177 unsigned long tmp; 229 struct pt_regs *regs = task_pt_regs(child);
178 /* make sure the single step bit is not set. */ 230 regs->syscfg &= ~SYSCFG_SSSTEP;
179 tmp = get_reg(child, PT_SYSCFG) & ~TRACE_BITS; 231
180 put_reg(child, PT_SYSCFG, tmp); 232 clear_tsk_thread_flag(child, TIF_SINGLESTEP);
181} 233}
182 234
183long arch_ptrace(struct task_struct *child, long request, long addr, long data) 235long arch_ptrace(struct task_struct *child, long request, long addr, long data)
@@ -240,40 +292,6 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
240 break; 292 break;
241 } 293 }
242 294
243 /* read the word at location addr in the USER area. */
244 case PTRACE_PEEKUSR:
245 {
246 unsigned long tmp;
247 ret = -EIO;
248 tmp = 0;
249 if ((addr & 3) || (addr > (sizeof(struct pt_regs) + 16))) {
250 printk(KERN_WARNING "ptrace error : PEEKUSR : temporarily returning "
251 "0 - %x sizeof(pt_regs) is %lx\n",
252 (int)addr, sizeof(struct pt_regs));
253 break;
254 }
255 if (addr == sizeof(struct pt_regs)) {
256 /* PT_TEXT_ADDR */
257 tmp = child->mm->start_code + TEXT_OFFSET;
258 } else if (addr == (sizeof(struct pt_regs) + 4)) {
259 /* PT_TEXT_END_ADDR */
260 tmp = child->mm->end_code;
261 } else if (addr == (sizeof(struct pt_regs) + 8)) {
262 /* PT_DATA_ADDR */
263 tmp = child->mm->start_data;
264#ifdef CONFIG_BINFMT_ELF_FDPIC
265 } else if (addr == (sizeof(struct pt_regs) + 12)) {
266 goto case_PTRACE_GETFDPIC_EXEC;
267 } else if (addr == (sizeof(struct pt_regs) + 16)) {
268 goto case_PTRACE_GETFDPIC_INTERP;
269#endif
270 } else {
271 tmp = get_reg(child, addr);
272 }
273 ret = put_user(tmp, datap);
274 break;
275 }
276
277#ifdef CONFIG_BINFMT_ELF_FDPIC 295#ifdef CONFIG_BINFMT_ELF_FDPIC
278 case PTRACE_GETFDPIC: { 296 case PTRACE_GETFDPIC: {
279 unsigned long tmp = 0; 297 unsigned long tmp = 0;
@@ -336,78 +354,36 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
336 break; 354 break;
337 } 355 }
338 356
339 case PTRACE_POKEUSR: /* write the word at location addr in the USER area */ 357 case PTRACE_PEEKUSR:
340 ret = -EIO; 358 switch (addr) {
341 if ((addr & 3) || (addr > (sizeof(struct pt_regs) + 16))) { 359#ifdef CONFIG_BINFMT_ELF_FDPIC /* backwards compat */
342 printk(KERN_WARNING "ptrace error : POKEUSR: temporarily returning 0\n"); 360 case PT_FDPIC_EXEC: goto case_PTRACE_GETFDPIC_EXEC;
343 break; 361 case PT_FDPIC_INTERP: goto case_PTRACE_GETFDPIC_INTERP;
344 } 362#endif
345 363 default:
346 if (addr >= (sizeof(struct pt_regs))) { 364 ret = get_reg(child, addr, datap);
347 ret = 0;
348 break;
349 }
350 if (addr == PT_SYSCFG) {
351 data &= SYSCFG_MASK;
352 data |= get_reg(child, PT_SYSCFG);
353 } 365 }
354 ret = put_reg(child, addr, data); 366 pr_debug("ptrace: PEEKUSR reg %li with %#lx = %i\n", addr, data, ret);
355 break; 367 break;
356 368
357 case PTRACE_SYSCALL: /* continue and stop at next (return from) syscall */ 369 case PTRACE_POKEUSR:
358 case PTRACE_CONT: /* restart after signal. */ 370 ret = put_reg(child, addr, data);
359 pr_debug("ptrace: syscall/cont\n"); 371 pr_debug("ptrace: POKEUSR reg %li with %li = %i\n", addr, data, ret);
360
361 ret = -EIO;
362 if (!valid_signal(data))
363 break;
364 if (request == PTRACE_SYSCALL)
365 set_tsk_thread_flag(child, TIF_SYSCALL_TRACE);
366 else
367 clear_tsk_thread_flag(child, TIF_SYSCALL_TRACE);
368 child->exit_code = data;
369 ptrace_disable(child);
370 pr_debug("ptrace: before wake_up_process\n");
371 wake_up_process(child);
372 ret = 0;
373 break;
374
375 /*
376 * make the child exit. Best I can do is send it a sigkill.
377 * perhaps it should be put in the status that it wants to
378 * exit.
379 */
380 case PTRACE_KILL:
381 ret = 0;
382 if (child->exit_state == EXIT_ZOMBIE) /* already dead */
383 break;
384 child->exit_code = SIGKILL;
385 ptrace_disable(child);
386 wake_up_process(child);
387 break;
388
389 case PTRACE_SINGLESTEP: /* set the trap flag. */
390 pr_debug("ptrace: single step\n");
391 ret = -EIO;
392 if (!valid_signal(data))
393 break;
394 clear_tsk_thread_flag(child, TIF_SYSCALL_TRACE);
395 ptrace_enable(child);
396 child->exit_code = data;
397 wake_up_process(child);
398 ret = 0;
399 break; 372 break;
400 373
401 case PTRACE_GETREGS: 374 case PTRACE_GETREGS:
402 /* Get all gp regs from the child. */ 375 pr_debug("ptrace: PTRACE_GETREGS\n");
403 ret = ptrace_getregs(child, datap); 376 return copy_regset_to_user(child, &user_bfin_native_view,
404 break; 377 REGSET_GENERAL,
378 0, sizeof(struct pt_regs),
379 (void __user *)data);
405 380
406 case PTRACE_SETREGS: 381 case PTRACE_SETREGS:
407 printk(KERN_WARNING "ptrace: SETREGS: **** NOT IMPLEMENTED ***\n"); 382 pr_debug("ptrace: PTRACE_SETREGS\n");
408 /* Set all gp regs in the child. */ 383 return copy_regset_from_user(child, &user_bfin_native_view,
409 ret = 0; 384 REGSET_GENERAL,
410 break; 385 0, sizeof(struct pt_regs),
386 (const void __user *)data);
411 387
412 default: 388 default:
413 ret = ptrace_request(child, request, addr, data); 389 ret = ptrace_request(child, request, addr, data);
@@ -417,27 +393,21 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
417 return ret; 393 return ret;
418} 394}
419 395
420asmlinkage void syscall_trace(void) 396asmlinkage int syscall_trace_enter(struct pt_regs *regs)
421{ 397{
422 if (!test_thread_flag(TIF_SYSCALL_TRACE)) 398 int ret = 0;
423 return; 399
424 400 if (test_thread_flag(TIF_SYSCALL_TRACE))
425 if (!(current->ptrace & PT_PTRACED)) 401 ret = tracehook_report_syscall_entry(regs);
426 return; 402
427 403 return ret;
428 /* the 0x80 provides a way for the tracing parent to distinguish 404}
429 * between a syscall stop and SIGTRAP delivery 405
430 */ 406asmlinkage void syscall_trace_leave(struct pt_regs *regs)
431 ptrace_notify(SIGTRAP | ((current->ptrace & PT_TRACESYSGOOD) 407{
432 ? 0x80 : 0)); 408 int step;
433 409
434 /* 410 step = test_thread_flag(TIF_SINGLESTEP);
435 * this isn't the same as continuing with a signal, but it will do 411 if (step || test_thread_flag(TIF_SYSCALL_TRACE))
436 * for normal use. strace only continues with a signal if the 412 tracehook_report_syscall_exit(regs, step);
437 * stopping signal is not SIGTRAP. -brl
438 */
439 if (current->exit_code) {
440 send_sig(current->exit_code, current, 1);
441 current->exit_code = 0;
442 }
443} 413}
diff --git a/arch/blackfin/kernel/setup.c b/arch/blackfin/kernel/setup.c
index 95448ae9c43a..8e2efceb364b 100644
--- a/arch/blackfin/kernel/setup.c
+++ b/arch/blackfin/kernel/setup.c
@@ -220,6 +220,16 @@ void __init bfin_relocate_l1_mem(void)
220 memcpy(_stext_l2, _l2_lma, l2_len); 220 memcpy(_stext_l2, _l2_lma, l2_len);
221} 221}
222 222
223#ifdef CONFIG_ROMKERNEL
224void __init bfin_relocate_xip_data(void)
225{
226 early_shadow_stamp();
227
228 memcpy(_sdata, _data_lma, (unsigned long)_data_len - THREAD_SIZE + sizeof(struct thread_info));
229 memcpy(_sinitdata, _init_data_lma, (unsigned long)_init_data_len);
230}
231#endif
232
223/* add_memory_region to memmap */ 233/* add_memory_region to memmap */
224static void __init add_memory_region(unsigned long long start, 234static void __init add_memory_region(unsigned long long start,
225 unsigned long long size, int type) 235 unsigned long long size, int type)
@@ -504,7 +514,7 @@ static __init void memory_setup(void)
504#endif 514#endif
505 unsigned long max_mem; 515 unsigned long max_mem;
506 516
507 _rambase = (unsigned long)_stext; 517 _rambase = CONFIG_BOOT_LOAD;
508 _ramstart = (unsigned long)_end; 518 _ramstart = (unsigned long)_end;
509 519
510 if (DMA_UNCACHED_REGION > (_ramend - _ramstart)) { 520 if (DMA_UNCACHED_REGION > (_ramend - _ramstart)) {
@@ -597,7 +607,12 @@ static __init void memory_setup(void)
597 } 607 }
598 608
599#ifdef CONFIG_MPU 609#ifdef CONFIG_MPU
610#if defined(CONFIG_ROMFS_ON_MTD) && defined(CONFIG_MTD_ROM)
611 page_mask_nelts = (((_ramend + ASYNC_BANK3_BASE + ASYNC_BANK3_SIZE -
612 ASYNC_BANK0_BASE) >> PAGE_SHIFT) + 31) / 32;
613#else
600 page_mask_nelts = ((_ramend >> PAGE_SHIFT) + 31) / 32; 614 page_mask_nelts = ((_ramend >> PAGE_SHIFT) + 31) / 32;
615#endif
601 page_mask_order = get_order(3 * page_mask_nelts * sizeof(long)); 616 page_mask_order = get_order(3 * page_mask_nelts * sizeof(long));
602#endif 617#endif
603 618
@@ -630,7 +645,7 @@ static __init void memory_setup(void)
630 __bss_start, __bss_stop, 645 __bss_start, __bss_stop,
631 _sdata, _edata, 646 _sdata, _edata,
632 (void *)&init_thread_union, 647 (void *)&init_thread_union,
633 (void *)((int)(&init_thread_union) + 0x2000), 648 (void *)((int)(&init_thread_union) + THREAD_SIZE),
634 __init_begin, __init_end, 649 __init_begin, __init_end,
635 (void *)_ramstart, (void *)memory_end 650 (void *)_ramstart, (void *)memory_end
636#ifdef CONFIG_MTD_UCLINUX 651#ifdef CONFIG_MTD_UCLINUX
@@ -792,10 +807,17 @@ static inline int __init get_mem_size(void)
792 BUG(); 807 BUG();
793} 808}
794 809
810__attribute__((weak))
811void __init native_machine_early_platform_add_devices(void)
812{
813}
814
795void __init setup_arch(char **cmdline_p) 815void __init setup_arch(char **cmdline_p)
796{ 816{
797 unsigned long sclk, cclk; 817 unsigned long sclk, cclk;
798 818
819 native_machine_early_platform_add_devices();
820
799 enable_shadow_console(); 821 enable_shadow_console();
800 822
801 /* Check to make sure we are running on the right processor */ 823 /* Check to make sure we are running on the right processor */
@@ -1217,10 +1239,10 @@ static int show_cpuinfo(struct seq_file *m, void *v)
1217 dsup_banks, BFIN_DSUBBANKS, BFIN_DWAYS, 1239 dsup_banks, BFIN_DSUBBANKS, BFIN_DWAYS,
1218 BFIN_DLINES); 1240 BFIN_DLINES);
1219#ifdef __ARCH_SYNC_CORE_DCACHE 1241#ifdef __ARCH_SYNC_CORE_DCACHE
1220 seq_printf(m, "SMP Dcache Flushes\t: %lu\n\n", cpudata->dcache_invld_count); 1242 seq_printf(m, "SMP Dcache Flushes\t: %lu\n\n", dcache_invld_count[cpu_num]);
1221#endif 1243#endif
1222#ifdef __ARCH_SYNC_CORE_ICACHE 1244#ifdef __ARCH_SYNC_CORE_ICACHE
1223 seq_printf(m, "SMP Icache Flushes\t: %lu\n\n", cpudata->icache_invld_count); 1245 seq_printf(m, "SMP Icache Flushes\t: %lu\n\n", icache_invld_count[cpu_num]);
1224#endif 1246#endif
1225 1247
1226 if (cpu_num != num_possible_cpus() - 1) 1248 if (cpu_num != num_possible_cpus() - 1)
@@ -1249,8 +1271,8 @@ static int show_cpuinfo(struct seq_file *m, void *v)
1249 seq_printf(m, "board memory\t: %ld kB (0x%p -> 0x%p)\n", 1271 seq_printf(m, "board memory\t: %ld kB (0x%p -> 0x%p)\n",
1250 physical_mem_end >> 10, (void *)0, (void *)physical_mem_end); 1272 physical_mem_end >> 10, (void *)0, (void *)physical_mem_end);
1251 seq_printf(m, "kernel memory\t: %d kB (0x%p -> 0x%p)\n", 1273 seq_printf(m, "kernel memory\t: %d kB (0x%p -> 0x%p)\n",
1252 ((int)memory_end - (int)_stext) >> 10, 1274 ((int)memory_end - (int)_rambase) >> 10,
1253 _stext, 1275 (void *)_rambase,
1254 (void *)memory_end); 1276 (void *)memory_end);
1255 seq_printf(m, "\n"); 1277 seq_printf(m, "\n");
1256 1278
diff --git a/arch/blackfin/kernel/signal.c b/arch/blackfin/kernel/signal.c
index e0fd63e9e38a..d536f35d1f43 100644
--- a/arch/blackfin/kernel/signal.c
+++ b/arch/blackfin/kernel/signal.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright 2004-2009 Analog Devices Inc. 2 * Copyright 2004-2010 Analog Devices Inc.
3 * 3 *
4 * Licensed under the GPL-2 or later 4 * Licensed under the GPL-2 or later
5 */ 5 */
@@ -17,6 +17,7 @@
17#include <asm/cacheflush.h> 17#include <asm/cacheflush.h>
18#include <asm/ucontext.h> 18#include <asm/ucontext.h>
19#include <asm/fixed_code.h> 19#include <asm/fixed_code.h>
20#include <asm/syscall.h>
20 21
21#define _BLOCKABLE (~(sigmask(SIGKILL) | sigmask(SIGSTOP))) 22#define _BLOCKABLE (~(sigmask(SIGKILL) | sigmask(SIGSTOP)))
22 23
@@ -50,6 +51,9 @@ rt_restore_sigcontext(struct pt_regs *regs, struct sigcontext __user *sc, int *p
50 unsigned long usp = 0; 51 unsigned long usp = 0;
51 int err = 0; 52 int err = 0;
52 53
54 /* Always make any pending restarted system calls return -EINTR */
55 current_thread_info()->restart_block.fn = do_no_restart_syscall;
56
53#define RESTORE(x) err |= __get_user(regs->x, &sc->sc_##x) 57#define RESTORE(x) err |= __get_user(regs->x, &sc->sc_##x)
54 58
55 /* restore passed registers */ 59 /* restore passed registers */
@@ -206,16 +210,6 @@ setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t * info,
206 regs->r1 = (unsigned long)(&frame->info); 210 regs->r1 = (unsigned long)(&frame->info);
207 regs->r2 = (unsigned long)(&frame->uc); 211 regs->r2 = (unsigned long)(&frame->uc);
208 212
209 /*
210 * Clear the trace flag when entering the signal handler, but
211 * notify any tracer that was single-stepping it. The tracer
212 * may want to single-step inside the handler too.
213 */
214 if (regs->syscfg & TRACE_BITS) {
215 regs->syscfg &= ~TRACE_BITS;
216 ptrace_notify(SIGTRAP);
217 }
218
219 return 0; 213 return 0;
220 214
221 give_sigsegv: 215 give_sigsegv:
@@ -247,6 +241,11 @@ handle_restart(struct pt_regs *regs, struct k_sigaction *ka, int has_handler)
247 regs->r0 = regs->orig_r0; 241 regs->r0 = regs->orig_r0;
248 regs->pc -= 2; 242 regs->pc -= 2;
249 break; 243 break;
244
245 case -ERESTART_RESTARTBLOCK:
246 regs->p0 = __NR_restart_syscall;
247 regs->pc -= 2;
248 break;
250 } 249 }
251} 250}
252 251
@@ -315,6 +314,9 @@ asmlinkage void do_signal(struct pt_regs *regs)
315 * clear the TIF_RESTORE_SIGMASK flag */ 314 * clear the TIF_RESTORE_SIGMASK flag */
316 if (test_thread_flag(TIF_RESTORE_SIGMASK)) 315 if (test_thread_flag(TIF_RESTORE_SIGMASK))
317 clear_thread_flag(TIF_RESTORE_SIGMASK); 316 clear_thread_flag(TIF_RESTORE_SIGMASK);
317
318 tracehook_signal_handler(signr, &info, &ka, regs,
319 test_thread_flag(TIF_SINGLESTEP));
318 } 320 }
319 321
320 return; 322 return;
diff --git a/arch/blackfin/kernel/time-ts.c b/arch/blackfin/kernel/time-ts.c
index 17c38c5b5b22..cb7a01d4f009 100644
--- a/arch/blackfin/kernel/time-ts.c
+++ b/arch/blackfin/kernel/time-ts.c
@@ -21,6 +21,7 @@
21#include <asm/blackfin.h> 21#include <asm/blackfin.h>
22#include <asm/time.h> 22#include <asm/time.h>
23#include <asm/gptimers.h> 23#include <asm/gptimers.h>
24#include <asm/nmi.h>
24 25
25/* Accelerators for sched_clock() 26/* Accelerators for sched_clock()
26 * convert from cycles(64bits) => nanoseconds (64bits) 27 * convert from cycles(64bits) => nanoseconds (64bits)
@@ -50,7 +51,11 @@
50 51
51static notrace cycle_t bfin_read_cycles(struct clocksource *cs) 52static notrace cycle_t bfin_read_cycles(struct clocksource *cs)
52{ 53{
54#ifdef CONFIG_CPU_FREQ
53 return __bfin_cycles_off + (get_cycles() << __bfin_cycles_mod); 55 return __bfin_cycles_off + (get_cycles() << __bfin_cycles_mod);
56#else
57 return get_cycles();
58#endif
54} 59}
55 60
56static struct clocksource bfin_cs_cycles = { 61static struct clocksource bfin_cs_cycles = {
@@ -132,7 +137,6 @@ static int __init bfin_cs_gptimer0_init(void)
132# define bfin_cs_gptimer0_init() 137# define bfin_cs_gptimer0_init()
133#endif 138#endif
134 139
135
136#if defined(CONFIG_GPTMR0_CLOCKSOURCE) || defined(CONFIG_CYCLES_CLOCKSOURCE) 140#if defined(CONFIG_GPTMR0_CLOCKSOURCE) || defined(CONFIG_CYCLES_CLOCKSOURCE)
137/* prefer to use cycles since it has higher rating */ 141/* prefer to use cycles since it has higher rating */
138notrace unsigned long long sched_clock(void) 142notrace unsigned long long sched_clock(void)
@@ -145,47 +149,8 @@ notrace unsigned long long sched_clock(void)
145} 149}
146#endif 150#endif
147 151
148#ifdef CONFIG_CORE_TIMER_IRQ_L1
149__attribute__((l1_text))
150#endif
151irqreturn_t timer_interrupt(int irq, void *dev_id);
152
153static int bfin_timer_set_next_event(unsigned long, \
154 struct clock_event_device *);
155
156static void bfin_timer_set_mode(enum clock_event_mode, \
157 struct clock_event_device *);
158
159static struct clock_event_device clockevent_bfin = {
160#if defined(CONFIG_TICKSOURCE_GPTMR0)
161 .name = "bfin_gptimer0",
162 .rating = 300,
163 .irq = IRQ_TIMER0,
164#else
165 .name = "bfin_core_timer",
166 .rating = 350,
167 .irq = IRQ_CORETMR,
168#endif
169 .shift = 32,
170 .features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT,
171 .set_next_event = bfin_timer_set_next_event,
172 .set_mode = bfin_timer_set_mode,
173};
174
175static struct irqaction bfin_timer_irq = {
176#if defined(CONFIG_TICKSOURCE_GPTMR0)
177 .name = "Blackfin GPTimer0",
178#else
179 .name = "Blackfin CoreTimer",
180#endif
181 .flags = IRQF_DISABLED | IRQF_TIMER | \
182 IRQF_IRQPOLL | IRQF_PERCPU,
183 .handler = timer_interrupt,
184 .dev_id = &clockevent_bfin,
185};
186
187#if defined(CONFIG_TICKSOURCE_GPTMR0) 152#if defined(CONFIG_TICKSOURCE_GPTMR0)
188static int bfin_timer_set_next_event(unsigned long cycles, 153static int bfin_gptmr0_set_next_event(unsigned long cycles,
189 struct clock_event_device *evt) 154 struct clock_event_device *evt)
190{ 155{
191 disable_gptimers(TIMER0bit); 156 disable_gptimers(TIMER0bit);
@@ -196,7 +161,7 @@ static int bfin_timer_set_next_event(unsigned long cycles,
196 return 0; 161 return 0;
197} 162}
198 163
199static void bfin_timer_set_mode(enum clock_event_mode mode, 164static void bfin_gptmr0_set_mode(enum clock_event_mode mode,
200 struct clock_event_device *evt) 165 struct clock_event_device *evt)
201{ 166{
202 switch (mode) { 167 switch (mode) {
@@ -224,25 +189,65 @@ static void bfin_timer_set_mode(enum clock_event_mode mode,
224 } 189 }
225} 190}
226 191
227static void bfin_timer_ack(void) 192static void bfin_gptmr0_ack(void)
228{ 193{
229 set_gptimer_status(TIMER_GROUP1, TIMER_STATUS_TIMIL0); 194 set_gptimer_status(TIMER_GROUP1, TIMER_STATUS_TIMIL0);
230} 195}
231 196
232static void __init bfin_timer_init(void) 197static void __init bfin_gptmr0_init(void)
233{ 198{
234 disable_gptimers(TIMER0bit); 199 disable_gptimers(TIMER0bit);
235} 200}
236 201
237static unsigned long __init bfin_clockevent_check(void) 202#ifdef CONFIG_CORE_TIMER_IRQ_L1
203__attribute__((l1_text))
204#endif
205irqreturn_t bfin_gptmr0_interrupt(int irq, void *dev_id)
238{ 206{
239 setup_irq(IRQ_TIMER0, &bfin_timer_irq); 207 struct clock_event_device *evt = dev_id;
240 return get_sclk(); 208 smp_mb();
209 evt->event_handler(evt);
210 bfin_gptmr0_ack();
211 return IRQ_HANDLED;
241} 212}
242 213
243#else /* CONFIG_TICKSOURCE_CORETMR */ 214static struct irqaction gptmr0_irq = {
215 .name = "Blackfin GPTimer0",
216 .flags = IRQF_DISABLED | IRQF_TIMER | \
217 IRQF_IRQPOLL | IRQF_PERCPU,
218 .handler = bfin_gptmr0_interrupt,
219};
244 220
245static int bfin_timer_set_next_event(unsigned long cycles, 221static struct clock_event_device clockevent_gptmr0 = {
222 .name = "bfin_gptimer0",
223 .rating = 300,
224 .irq = IRQ_TIMER0,
225 .shift = 32,
226 .features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT,
227 .set_next_event = bfin_gptmr0_set_next_event,
228 .set_mode = bfin_gptmr0_set_mode,
229};
230
231static void __init bfin_gptmr0_clockevent_init(struct clock_event_device *evt)
232{
233 unsigned long clock_tick;
234
235 clock_tick = get_sclk();
236 evt->mult = div_sc(clock_tick, NSEC_PER_SEC, evt->shift);
237 evt->max_delta_ns = clockevent_delta2ns(-1, evt);
238 evt->min_delta_ns = clockevent_delta2ns(100, evt);
239
240 evt->cpumask = cpumask_of(0);
241
242 clockevents_register_device(evt);
243}
244#endif /* CONFIG_TICKSOURCE_GPTMR0 */
245
246#if defined(CONFIG_TICKSOURCE_CORETMR)
247/* per-cpu local core timer */
248static DEFINE_PER_CPU(struct clock_event_device, coretmr_events);
249
250static int bfin_coretmr_set_next_event(unsigned long cycles,
246 struct clock_event_device *evt) 251 struct clock_event_device *evt)
247{ 252{
248 bfin_write_TCNTL(TMPWR); 253 bfin_write_TCNTL(TMPWR);
@@ -253,7 +258,7 @@ static int bfin_timer_set_next_event(unsigned long cycles,
253 return 0; 258 return 0;
254} 259}
255 260
256static void bfin_timer_set_mode(enum clock_event_mode mode, 261static void bfin_coretmr_set_mode(enum clock_event_mode mode,
257 struct clock_event_device *evt) 262 struct clock_event_device *evt)
258{ 263{
259 switch (mode) { 264 switch (mode) {
@@ -285,19 +290,13 @@ static void bfin_timer_set_mode(enum clock_event_mode mode,
285 } 290 }
286} 291}
287 292
288static void bfin_timer_ack(void) 293void bfin_coretmr_init(void)
289{
290}
291
292static void __init bfin_timer_init(void)
293{ 294{
294 /* power up the timer, but don't enable it just yet */ 295 /* power up the timer, but don't enable it just yet */
295 bfin_write_TCNTL(TMPWR); 296 bfin_write_TCNTL(TMPWR);
296 CSYNC(); 297 CSYNC();
297 298
298 /* 299 /* the TSCALE prescaler counter. */
299 * the TSCALE prescaler counter.
300 */
301 bfin_write_TSCALE(TIME_SCALE - 1); 300 bfin_write_TSCALE(TIME_SCALE - 1);
302 bfin_write_TPERIOD(0); 301 bfin_write_TPERIOD(0);
303 bfin_write_TCOUNT(0); 302 bfin_write_TCOUNT(0);
@@ -305,48 +304,54 @@ static void __init bfin_timer_init(void)
305 CSYNC(); 304 CSYNC();
306} 305}
307 306
308static unsigned long __init bfin_clockevent_check(void) 307#ifdef CONFIG_CORE_TIMER_IRQ_L1
309{ 308__attribute__((l1_text))
310 setup_irq(IRQ_CORETMR, &bfin_timer_irq); 309#endif
311 return get_cclk() / TIME_SCALE; 310irqreturn_t bfin_coretmr_interrupt(int irq, void *dev_id)
312}
313
314void __init setup_core_timer(void)
315{ 311{
316 bfin_timer_init(); 312 int cpu = smp_processor_id();
317 bfin_timer_set_mode(CLOCK_EVT_MODE_PERIODIC, NULL); 313 struct clock_event_device *evt = &per_cpu(coretmr_events, cpu);
318}
319#endif /* CONFIG_TICKSOURCE_GPTMR0 */
320 314
321/*
322 * timer_interrupt() needs to keep up the real-time clock,
323 * as well as call the "do_timer()" routine every clocktick
324 */
325irqreturn_t timer_interrupt(int irq, void *dev_id)
326{
327 struct clock_event_device *evt = dev_id;
328 smp_mb(); 315 smp_mb();
329 evt->event_handler(evt); 316 evt->event_handler(evt);
330 bfin_timer_ack();
331 return IRQ_HANDLED;
332}
333
334static int __init bfin_clockevent_init(void)
335{
336 unsigned long timer_clk;
337 317
338 timer_clk = bfin_clockevent_check(); 318 touch_nmi_watchdog();
339 319
340 bfin_timer_init(); 320 return IRQ_HANDLED;
321}
341 322
342 clockevent_bfin.mult = div_sc(timer_clk, NSEC_PER_SEC, clockevent_bfin.shift); 323static struct irqaction coretmr_irq = {
343 clockevent_bfin.max_delta_ns = clockevent_delta2ns(-1, &clockevent_bfin); 324 .name = "Blackfin CoreTimer",
344 clockevent_bfin.min_delta_ns = clockevent_delta2ns(100, &clockevent_bfin); 325 .flags = IRQF_DISABLED | IRQF_TIMER | \
345 clockevent_bfin.cpumask = cpumask_of(0); 326 IRQF_IRQPOLL | IRQF_PERCPU,
346 clockevents_register_device(&clockevent_bfin); 327 .handler = bfin_coretmr_interrupt,
328};
347 329
348 return 0; 330void bfin_coretmr_clockevent_init(void)
331{
332 unsigned long clock_tick;
333 unsigned int cpu = smp_processor_id();
334 struct clock_event_device *evt = &per_cpu(coretmr_events, cpu);
335
336 evt->name = "bfin_core_timer";
337 evt->rating = 350;
338 evt->irq = -1;
339 evt->shift = 32;
340 evt->features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT;
341 evt->set_next_event = bfin_coretmr_set_next_event;
342 evt->set_mode = bfin_coretmr_set_mode;
343
344 clock_tick = get_cclk() / TIME_SCALE;
345 evt->mult = div_sc(clock_tick, NSEC_PER_SEC, evt->shift);
346 evt->max_delta_ns = clockevent_delta2ns(-1, evt);
347 evt->min_delta_ns = clockevent_delta2ns(100, evt);
348
349 evt->cpumask = cpumask_of(cpu);
350
351 clockevents_register_device(evt);
349} 352}
353#endif /* CONFIG_TICKSOURCE_CORETMR */
354
350 355
351void __init time_init(void) 356void __init time_init(void)
352{ 357{
@@ -370,5 +375,21 @@ void __init time_init(void)
370 375
371 bfin_cs_cycles_init(); 376 bfin_cs_cycles_init();
372 bfin_cs_gptimer0_init(); 377 bfin_cs_gptimer0_init();
373 bfin_clockevent_init(); 378
379#if defined(CONFIG_TICKSOURCE_CORETMR)
380 bfin_coretmr_init();
381 setup_irq(IRQ_CORETMR, &coretmr_irq);
382 bfin_coretmr_clockevent_init();
383#endif
384
385#if defined(CONFIG_TICKSOURCE_GPTMR0)
386 bfin_gptmr0_init();
387 setup_irq(IRQ_TIMER0, &gptmr0_irq);
388 gptmr0_irq.dev_id = &clockevent_gptmr0;
389 bfin_gptmr0_clockevent_init(&clockevent_gptmr0);
390#endif
391
392#if !defined(CONFIG_TICKSOURCE_CORETMR) && !defined(CONFIG_TICKSOURCE_GPTMR0)
393# error at least one clock event device is required
394#endif
374} 395}
diff --git a/arch/blackfin/kernel/traps.c b/arch/blackfin/kernel/traps.c
index d3cbcd6bd985..ba70c4bc2699 100644
--- a/arch/blackfin/kernel/traps.c
+++ b/arch/blackfin/kernel/traps.c
@@ -138,6 +138,12 @@ static void decode_address(char *buf, unsigned long address)
138 if (!mm) 138 if (!mm)
139 continue; 139 continue;
140 140
141 if (!down_read_trylock(&mm->mmap_sem)) {
142 if (!in_atomic)
143 mmput(mm);
144 continue;
145 }
146
141 for (n = rb_first(&mm->mm_rb); n; n = rb_next(n)) { 147 for (n = rb_first(&mm->mm_rb); n; n = rb_next(n)) {
142 struct vm_area_struct *vma; 148 struct vm_area_struct *vma;
143 149
@@ -177,6 +183,7 @@ static void decode_address(char *buf, unsigned long address)
177 sprintf(buf, "[ %s vma:0x%lx-0x%lx]", 183 sprintf(buf, "[ %s vma:0x%lx-0x%lx]",
178 name, vma->vm_start, vma->vm_end); 184 name, vma->vm_start, vma->vm_end);
179 185
186 up_read(&mm->mmap_sem);
180 if (!in_atomic) 187 if (!in_atomic)
181 mmput(mm); 188 mmput(mm);
182 189
@@ -186,11 +193,16 @@ static void decode_address(char *buf, unsigned long address)
186 goto done; 193 goto done;
187 } 194 }
188 } 195 }
196
197 up_read(&mm->mmap_sem);
189 if (!in_atomic) 198 if (!in_atomic)
190 mmput(mm); 199 mmput(mm);
191 } 200 }
192 201
193 /* we were unable to find this address anywhere */ 202 /*
203 * we were unable to find this address anywhere,
204 * or some MMs were skipped because they were in use.
205 */
194 sprintf(buf, "/* kernel dynamic memory */"); 206 sprintf(buf, "/* kernel dynamic memory */");
195 207
196done: 208done:
@@ -248,9 +260,7 @@ asmlinkage notrace void trap_c(struct pt_regs *fp)
248#ifdef CONFIG_DEBUG_BFIN_HWTRACE_ON 260#ifdef CONFIG_DEBUG_BFIN_HWTRACE_ON
249 int j; 261 int j;
250#endif 262#endif
251#ifdef CONFIG_DEBUG_HUNT_FOR_ZERO
252 unsigned int cpu = raw_smp_processor_id(); 263 unsigned int cpu = raw_smp_processor_id();
253#endif
254 const char *strerror = NULL; 264 const char *strerror = NULL;
255 int sig = 0; 265 int sig = 0;
256 siginfo_t info; 266 siginfo_t info;
@@ -639,7 +649,17 @@ asmlinkage notrace void trap_c(struct pt_regs *fp)
639 { 649 {
640 info.si_signo = sig; 650 info.si_signo = sig;
641 info.si_errno = 0; 651 info.si_errno = 0;
642 info.si_addr = (void __user *)fp->pc; 652 switch (trapnr) {
653 case VEC_CPLB_VL:
654 case VEC_MISALI_D:
655 case VEC_CPLB_M:
656 case VEC_CPLB_MHIT:
657 info.si_addr = (void __user *)cpu_pda[cpu].dcplb_fault_addr;
658 break;
659 default:
660 info.si_addr = (void __user *)fp->pc;
661 break;
662 }
643 force_sig_info(sig, &info, current); 663 force_sig_info(sig, &info, current);
644 } 664 }
645 665
@@ -712,7 +732,7 @@ static void decode_instruction(unsigned short *address)
712 verbose_printk("RTE"); 732 verbose_printk("RTE");
713 else if (opcode == 0x0025) 733 else if (opcode == 0x0025)
714 verbose_printk("EMUEXCPT"); 734 verbose_printk("EMUEXCPT");
715 else if (opcode == 0x0040 && opcode <= 0x0047) 735 else if (opcode >= 0x0040 && opcode <= 0x0047)
716 verbose_printk("STI R%i", opcode & 7); 736 verbose_printk("STI R%i", opcode & 7);
717 else if (opcode >= 0x0050 && opcode <= 0x0057) 737 else if (opcode >= 0x0050 && opcode <= 0x0057)
718 verbose_printk("JUMP (P%i)", opcode & 7); 738 verbose_printk("JUMP (P%i)", opcode & 7);
@@ -1096,7 +1116,7 @@ void dump_bfin_mem(struct pt_regs *fp)
1096 /* And the last RETI points to the current userspace context */ 1116 /* And the last RETI points to the current userspace context */
1097 if ((fp + 1)->pc >= current->mm->start_code && 1117 if ((fp + 1)->pc >= current->mm->start_code &&
1098 (fp + 1)->pc <= current->mm->end_code) { 1118 (fp + 1)->pc <= current->mm->end_code) {
1099 verbose_printk(KERN_NOTICE "It might be better to look around here : \n"); 1119 verbose_printk(KERN_NOTICE "It might be better to look around here :\n");
1100 verbose_printk(KERN_NOTICE "-------------------------------------------\n"); 1120 verbose_printk(KERN_NOTICE "-------------------------------------------\n");
1101 show_regs(fp + 1); 1121 show_regs(fp + 1);
1102 verbose_printk(KERN_NOTICE "-------------------------------------------\n"); 1122 verbose_printk(KERN_NOTICE "-------------------------------------------\n");
diff --git a/arch/blackfin/kernel/vmlinux.lds.S b/arch/blackfin/kernel/vmlinux.lds.S
index 66799e763dc9..984c78172397 100644
--- a/arch/blackfin/kernel/vmlinux.lds.S
+++ b/arch/blackfin/kernel/vmlinux.lds.S
@@ -15,7 +15,12 @@ _jiffies = _jiffies_64;
15 15
16SECTIONS 16SECTIONS
17{ 17{
18#ifdef CONFIG_RAMKERNEL
18 . = CONFIG_BOOT_LOAD; 19 . = CONFIG_BOOT_LOAD;
20#else
21 . = CONFIG_ROM_BASE;
22#endif
23
19 /* Neither the text, ro_data or bss section need to be aligned 24 /* Neither the text, ro_data or bss section need to be aligned
20 * So pack them back to back 25 * So pack them back to back
21 */ 26 */
@@ -31,6 +36,12 @@ SECTIONS
31 LOCK_TEXT 36 LOCK_TEXT
32 IRQENTRY_TEXT 37 IRQENTRY_TEXT
33 KPROBES_TEXT 38 KPROBES_TEXT
39#ifdef CONFIG_ROMKERNEL
40 __sinittext = .;
41 INIT_TEXT
42 __einittext = .;
43 EXIT_TEXT
44#endif
34 *(.text.*) 45 *(.text.*)
35 *(.fixup) 46 *(.fixup)
36 47
@@ -50,8 +61,14 @@ SECTIONS
50 61
51 /* Just in case the first read only is a 32-bit access */ 62 /* Just in case the first read only is a 32-bit access */
52 RO_DATA(4) 63 RO_DATA(4)
64 __rodata_end = .;
53 65
66#ifdef CONFIG_ROMKERNEL
67 . = CONFIG_BOOT_LOAD;
68 .bss : AT(__rodata_end)
69#else
54 .bss : 70 .bss :
71#endif
55 { 72 {
56 . = ALIGN(4); 73 . = ALIGN(4);
57 ___bss_start = .; 74 ___bss_start = .;
@@ -67,7 +84,11 @@ SECTIONS
67 ___bss_stop = .; 84 ___bss_stop = .;
68 } 85 }
69 86
87#if defined(CONFIG_ROMKERNEL)
88 .data : AT(LOADADDR(.bss) + SIZEOF(.bss))
89#else
70 .data : 90 .data :
91#endif
71 { 92 {
72 __sdata = .; 93 __sdata = .;
73 /* This gets done first, so the glob doesn't suck it in */ 94 /* This gets done first, so the glob doesn't suck it in */
@@ -94,6 +115,8 @@ SECTIONS
94 115
95 __edata = .; 116 __edata = .;
96 } 117 }
118 __data_lma = LOADADDR(.data);
119 __data_len = SIZEOF(.data);
97 120
98 /* The init section should be last, so when we free it, it goes into 121 /* The init section should be last, so when we free it, it goes into
99 * the general memory pool, and (hopefully) will decrease fragmentation 122 * the general memory pool, and (hopefully) will decrease fragmentation
@@ -103,25 +126,58 @@ SECTIONS
103 . = ALIGN(PAGE_SIZE); 126 . = ALIGN(PAGE_SIZE);
104 ___init_begin = .; 127 ___init_begin = .;
105 128
129#ifdef CONFIG_RAMKERNEL
106 INIT_TEXT_SECTION(PAGE_SIZE) 130 INIT_TEXT_SECTION(PAGE_SIZE)
107 . = ALIGN(16);
108 INIT_DATA_SECTION(16)
109 PERCPU(4)
110 131
111 /* we have to discard exit text and such at runtime, not link time, to 132 /* We have to discard exit text and such at runtime, not link time, to
112 * handle embedded cross-section references (alt instructions, bug 133 * handle embedded cross-section references (alt instructions, bug
113 * table, eh_frame, etc...) 134 * table, eh_frame, etc...). We need all of our .text up front and
135 * .data after it for PCREL call issues.
114 */ 136 */
115 .exit.text : 137 .exit.text :
116 { 138 {
117 EXIT_TEXT 139 EXIT_TEXT
118 } 140 }
141
142 . = ALIGN(16);
143 INIT_DATA_SECTION(16)
144 PERCPU(4)
145
119 .exit.data : 146 .exit.data :
120 { 147 {
121 EXIT_DATA 148 EXIT_DATA
122 } 149 }
123 150
124 .text_l1 L1_CODE_START : AT(LOADADDR(.exit.data) + SIZEOF(.exit.data)) 151 .text_l1 L1_CODE_START : AT(LOADADDR(.exit.data) + SIZEOF(.exit.data))
152#else
153 .init.data : AT(__data_lma + __data_len)
154 {
155 __sinitdata = .;
156 INIT_DATA
157 INIT_SETUP(16)
158 INIT_CALLS
159 CON_INITCALL
160 SECURITY_INITCALL
161 INIT_RAM_FS
162
163 . = ALIGN(4);
164 ___per_cpu_load = .;
165 ___per_cpu_start = .;
166 *(.data.percpu.first)
167 *(.data.percpu.page_aligned)
168 *(.data.percpu)
169 *(.data.percpu.shared_aligned)
170 ___per_cpu_end = .;
171
172 EXIT_DATA
173 __einitdata = .;
174 }
175 __init_data_lma = LOADADDR(.init.data);
176 __init_data_len = SIZEOF(.init.data);
177 __init_data_end = .;
178
179 .text_l1 L1_CODE_START : AT(__init_data_lma + __init_data_len)
180#endif
125 { 181 {
126 . = ALIGN(4); 182 . = ALIGN(4);
127 __stext_l1 = .; 183 __stext_l1 = .;
@@ -202,7 +258,11 @@ SECTIONS
202 /* Force trailing alignment of our init section so that when we 258 /* Force trailing alignment of our init section so that when we
203 * free our init memory, we don't leave behind a partial page. 259 * free our init memory, we don't leave behind a partial page.
204 */ 260 */
261#ifdef CONFIG_RAMKERNEL
205 . = __l2_lma + __l2_len; 262 . = __l2_lma + __l2_len;
263#else
264 . = __init_data_end;
265#endif
206 . = ALIGN(PAGE_SIZE); 266 . = ALIGN(PAGE_SIZE);
207 ___init_end = .; 267 ___init_end = .;
208 268
diff --git a/arch/blackfin/mach-bf518/boards/Kconfig b/arch/blackfin/mach-bf518/boards/Kconfig
index 96163514ed22..252261ec04c4 100644
--- a/arch/blackfin/mach-bf518/boards/Kconfig
+++ b/arch/blackfin/mach-bf518/boards/Kconfig
@@ -9,4 +9,9 @@ config BFIN518F_EZBRD
9 help 9 help
10 BF518-EZBRD board support. 10 BF518-EZBRD board support.
11 11
12config BFIN518F_TCM
13 bool "Bluetechnix TCM-BF518"
14 help
15 Bluetechnix TCM-BF518 board support.
16
12endchoice 17endchoice
diff --git a/arch/blackfin/mach-bf518/boards/Makefile b/arch/blackfin/mach-bf518/boards/Makefile
index 172e859c3a7f..a9ef25c6b302 100644
--- a/arch/blackfin/mach-bf518/boards/Makefile
+++ b/arch/blackfin/mach-bf518/boards/Makefile
@@ -3,3 +3,4 @@
3# 3#
4 4
5obj-$(CONFIG_BFIN518F_EZBRD) += ezbrd.o 5obj-$(CONFIG_BFIN518F_EZBRD) += ezbrd.o
6obj-$(CONFIG_BFIN518F_TCM) += tcm-bf518.o
diff --git a/arch/blackfin/mach-bf518/boards/ezbrd.c b/arch/blackfin/mach-bf518/boards/ezbrd.c
index 01975c017116..44d6d5299022 100644
--- a/arch/blackfin/mach-bf518/boards/ezbrd.c
+++ b/arch/blackfin/mach-bf518/boards/ezbrd.c
@@ -382,30 +382,93 @@ static struct platform_device bfin_spi1_device = {
382#endif /* spi master and devices */ 382#endif /* spi master and devices */
383 383
384#if defined(CONFIG_SERIAL_BFIN) || defined(CONFIG_SERIAL_BFIN_MODULE) 384#if defined(CONFIG_SERIAL_BFIN) || defined(CONFIG_SERIAL_BFIN_MODULE)
385static struct resource bfin_uart_resources[] = {
386#ifdef CONFIG_SERIAL_BFIN_UART0 385#ifdef CONFIG_SERIAL_BFIN_UART0
386static struct resource bfin_uart0_resources[] = {
387 { 387 {
388 .start = 0xFFC00400, 388 .start = UART0_THR,
389 .end = 0xFFC004FF, 389 .end = UART0_GCTL+2,
390 .flags = IORESOURCE_MEM, 390 .flags = IORESOURCE_MEM,
391 }, 391 },
392 {
393 .start = IRQ_UART0_RX,
394 .end = IRQ_UART0_RX+1,
395 .flags = IORESOURCE_IRQ,
396 },
397 {
398 .start = IRQ_UART0_ERROR,
399 .end = IRQ_UART0_ERROR,
400 .flags = IORESOURCE_IRQ,
401 },
402 {
403 .start = CH_UART0_TX,
404 .end = CH_UART0_TX,
405 .flags = IORESOURCE_DMA,
406 },
407 {
408 .start = CH_UART0_RX,
409 .end = CH_UART0_RX,
410 .flags = IORESOURCE_DMA,
411 },
412};
413
414unsigned short bfin_uart0_peripherals[] = {
415 P_UART0_TX, P_UART0_RX, 0
416};
417
418static struct platform_device bfin_uart0_device = {
419 .name = "bfin-uart",
420 .id = 0,
421 .num_resources = ARRAY_SIZE(bfin_uart0_resources),
422 .resource = bfin_uart0_resources,
423 .dev = {
424 .platform_data = &bfin_uart0_peripherals, /* Passed to driver */
425 },
426};
392#endif 427#endif
393#ifdef CONFIG_SERIAL_BFIN_UART1 428#ifdef CONFIG_SERIAL_BFIN_UART1
429static struct resource bfin_uart1_resources[] = {
394 { 430 {
395 .start = 0xFFC02000, 431 .start = UART1_THR,
396 .end = 0xFFC020FF, 432 .end = UART1_GCTL+2,
397 .flags = IORESOURCE_MEM, 433 .flags = IORESOURCE_MEM,
398 }, 434 },
399#endif 435 {
436 .start = IRQ_UART1_RX,
437 .end = IRQ_UART1_RX+1,
438 .flags = IORESOURCE_IRQ,
439 },
440 {
441 .start = IRQ_UART1_ERROR,
442 .end = IRQ_UART1_ERROR,
443 .flags = IORESOURCE_IRQ,
444 },
445 {
446 .start = CH_UART1_TX,
447 .end = CH_UART1_TX,
448 .flags = IORESOURCE_DMA,
449 },
450 {
451 .start = CH_UART1_RX,
452 .end = CH_UART1_RX,
453 .flags = IORESOURCE_DMA,
454 },
455};
456
457unsigned short bfin_uart1_peripherals[] = {
458 P_UART1_TX, P_UART1_RX, 0
400}; 459};
401 460
402static struct platform_device bfin_uart_device = { 461static struct platform_device bfin_uart1_device = {
403 .name = "bfin-uart", 462 .name = "bfin-uart",
404 .id = 1, 463 .id = 1,
405 .num_resources = ARRAY_SIZE(bfin_uart_resources), 464 .num_resources = ARRAY_SIZE(bfin_uart1_resources),
406 .resource = bfin_uart_resources, 465 .resource = bfin_uart1_resources,
466 .dev = {
467 .platform_data = &bfin_uart1_peripherals, /* Passed to driver */
468 },
407}; 469};
408#endif 470#endif
471#endif
409 472
410#if defined(CONFIG_BFIN_SIR) || defined(CONFIG_BFIN_SIR_MODULE) 473#if defined(CONFIG_BFIN_SIR) || defined(CONFIG_BFIN_SIR_MODULE)
411#ifdef CONFIG_BFIN_SIR0 474#ifdef CONFIG_BFIN_SIR0
@@ -499,16 +562,75 @@ static struct i2c_board_info __initdata bfin_i2c_board_info[] = {
499}; 562};
500 563
501#if defined(CONFIG_SERIAL_BFIN_SPORT) || defined(CONFIG_SERIAL_BFIN_SPORT_MODULE) 564#if defined(CONFIG_SERIAL_BFIN_SPORT) || defined(CONFIG_SERIAL_BFIN_SPORT_MODULE)
565#ifdef CONFIG_SERIAL_BFIN_SPORT0_UART
566static struct resource bfin_sport0_uart_resources[] = {
567 {
568 .start = SPORT0_TCR1,
569 .end = SPORT0_MRCS3+4,
570 .flags = IORESOURCE_MEM,
571 },
572 {
573 .start = IRQ_SPORT0_RX,
574 .end = IRQ_SPORT0_RX+1,
575 .flags = IORESOURCE_IRQ,
576 },
577 {
578 .start = IRQ_SPORT0_ERROR,
579 .end = IRQ_SPORT0_ERROR,
580 .flags = IORESOURCE_IRQ,
581 },
582};
583
584unsigned short bfin_sport0_peripherals[] = {
585 P_SPORT0_TFS, P_SPORT0_DTPRI, P_SPORT0_TSCLK, P_SPORT0_RFS,
586 P_SPORT0_DRPRI, P_SPORT0_RSCLK, P_SPORT0_DRSEC, P_SPORT0_DTSEC, 0
587};
588
502static struct platform_device bfin_sport0_uart_device = { 589static struct platform_device bfin_sport0_uart_device = {
503 .name = "bfin-sport-uart", 590 .name = "bfin-sport-uart",
504 .id = 0, 591 .id = 0,
592 .num_resources = ARRAY_SIZE(bfin_sport0_uart_resources),
593 .resource = bfin_sport0_uart_resources,
594 .dev = {
595 .platform_data = &bfin_sport0_peripherals, /* Passed to driver */
596 },
597};
598#endif
599#ifdef CONFIG_SERIAL_BFIN_SPORT1_UART
600static struct resource bfin_sport1_uart_resources[] = {
601 {
602 .start = SPORT1_TCR1,
603 .end = SPORT1_MRCS3+4,
604 .flags = IORESOURCE_MEM,
605 },
606 {
607 .start = IRQ_SPORT1_RX,
608 .end = IRQ_SPORT1_RX+1,
609 .flags = IORESOURCE_IRQ,
610 },
611 {
612 .start = IRQ_SPORT1_ERROR,
613 .end = IRQ_SPORT1_ERROR,
614 .flags = IORESOURCE_IRQ,
615 },
616};
617
618unsigned short bfin_sport1_peripherals[] = {
619 P_SPORT1_TFS, P_SPORT1_DTPRI, P_SPORT1_TSCLK, P_SPORT1_RFS,
620 P_SPORT1_DRPRI, P_SPORT1_RSCLK, P_SPORT1_DRSEC, P_SPORT1_DTSEC, 0
505}; 621};
506 622
507static struct platform_device bfin_sport1_uart_device = { 623static struct platform_device bfin_sport1_uart_device = {
508 .name = "bfin-sport-uart", 624 .name = "bfin-sport-uart",
509 .id = 1, 625 .id = 1,
626 .num_resources = ARRAY_SIZE(bfin_sport1_uart_resources),
627 .resource = bfin_sport1_uart_resources,
628 .dev = {
629 .platform_data = &bfin_sport1_peripherals, /* Passed to driver */
630 },
510}; 631};
511#endif 632#endif
633#endif
512 634
513#if defined(CONFIG_KEYBOARD_GPIO) || defined(CONFIG_KEYBOARD_GPIO_MODULE) 635#if defined(CONFIG_KEYBOARD_GPIO) || defined(CONFIG_KEYBOARD_GPIO_MODULE)
514#include <linux/input.h> 636#include <linux/input.h>
@@ -593,7 +715,12 @@ static struct platform_device *stamp_devices[] __initdata = {
593#endif 715#endif
594 716
595#if defined(CONFIG_SERIAL_BFIN) || defined(CONFIG_SERIAL_BFIN_MODULE) 717#if defined(CONFIG_SERIAL_BFIN) || defined(CONFIG_SERIAL_BFIN_MODULE)
596 &bfin_uart_device, 718#ifdef CONFIG_SERIAL_BFIN_UART0
719 &bfin_uart0_device,
720#endif
721#ifdef CONFIG_SERIAL_BFIN_UART1
722 &bfin_uart1_device,
723#endif
597#endif 724#endif
598 725
599#if defined(CONFIG_BFIN_SIR) || defined(CONFIG_BFIN_SIR_MODULE) 726#if defined(CONFIG_BFIN_SIR) || defined(CONFIG_BFIN_SIR_MODULE)
@@ -610,9 +737,13 @@ static struct platform_device *stamp_devices[] __initdata = {
610#endif 737#endif
611 738
612#if defined(CONFIG_SERIAL_BFIN_SPORT) || defined(CONFIG_SERIAL_BFIN_SPORT_MODULE) 739#if defined(CONFIG_SERIAL_BFIN_SPORT) || defined(CONFIG_SERIAL_BFIN_SPORT_MODULE)
740#ifdef CONFIG_SERIAL_BFIN_SPORT0_UART
613 &bfin_sport0_uart_device, 741 &bfin_sport0_uart_device,
742#endif
743#ifdef CONFIG_SERIAL_BFIN_SPORT1_UART
614 &bfin_sport1_uart_device, 744 &bfin_sport1_uart_device,
615#endif 745#endif
746#endif
616 747
617#if defined(CONFIG_KEYBOARD_GPIO) || defined(CONFIG_KEYBOARD_GPIO_MODULE) 748#if defined(CONFIG_KEYBOARD_GPIO) || defined(CONFIG_KEYBOARD_GPIO_MODULE)
618 &bfin_device_gpiokeys, 749 &bfin_device_gpiokeys,
@@ -644,6 +775,33 @@ static int __init ezbrd_init(void)
644 775
645arch_initcall(ezbrd_init); 776arch_initcall(ezbrd_init);
646 777
778static struct platform_device *ezbrd_early_devices[] __initdata = {
779#if defined(CONFIG_SERIAL_BFIN_CONSOLE) || defined(CONFIG_EARLY_PRINTK)
780#ifdef CONFIG_SERIAL_BFIN_UART0
781 &bfin_uart0_device,
782#endif
783#ifdef CONFIG_SERIAL_BFIN_UART1
784 &bfin_uart1_device,
785#endif
786#endif
787
788#if defined(CONFIG_SERIAL_BFIN_SPORT_CONSOLE)
789#ifdef CONFIG_SERIAL_BFIN_SPORT0_UART
790 &bfin_sport0_uart_device,
791#endif
792#ifdef CONFIG_SERIAL_BFIN_SPORT1_UART
793 &bfin_sport1_uart_device,
794#endif
795#endif
796};
797
798void __init native_machine_early_platform_add_devices(void)
799{
800 printk(KERN_INFO "register early platform devices\n");
801 early_platform_add_devices(ezbrd_early_devices,
802 ARRAY_SIZE(ezbrd_early_devices));
803}
804
647void native_machine_restart(char *cmd) 805void native_machine_restart(char *cmd)
648{ 806{
649 /* workaround reboot hang when booting from SPI */ 807 /* workaround reboot hang when booting from SPI */
diff --git a/arch/blackfin/mach-bf518/boards/tcm-bf518.c b/arch/blackfin/mach-bf518/boards/tcm-bf518.c
new file mode 100644
index 000000000000..9b72e5cb21fe
--- /dev/null
+++ b/arch/blackfin/mach-bf518/boards/tcm-bf518.c
@@ -0,0 +1,753 @@
1/*
2 * Copyright 2004-2009 Analog Devices Inc.
3 * 2005 National ICT Australia (NICTA)
4 * Aidan Williams <aidan@nicta.com.au>
5 *
6 * Licensed under the GPL-2 or later.
7 */
8
9#include <linux/device.h>
10#include <linux/etherdevice.h>
11#include <linux/platform_device.h>
12#include <linux/mtd/mtd.h>
13#include <linux/mtd/partitions.h>
14#include <linux/mtd/physmap.h>
15#include <linux/spi/spi.h>
16#include <linux/spi/flash.h>
17
18#include <linux/i2c.h>
19#include <linux/irq.h>
20#include <linux/interrupt.h>
21#include <asm/dma.h>
22#include <asm/bfin5xx_spi.h>
23#include <asm/reboot.h>
24#include <asm/portmux.h>
25#include <asm/dpmc.h>
26#include <asm/bfin_sdh.h>
27#include <linux/spi/ad7877.h>
28#include <net/dsa.h>
29
30/*
31 * Name the Board for the /proc/cpuinfo
32 */
33const char bfin_board_name[] = "Bluetechnix TCM-BF518";
34
35/*
36 * Driver needs to know address, irq and flag pin.
37 */
38
39#if defined(CONFIG_MTD_PHYSMAP) || defined(CONFIG_MTD_PHYSMAP_MODULE)
40static struct mtd_partition tcm_partitions[] = {
41 {
42 .name = "bootloader(nor)",
43 .size = 0x40000,
44 .offset = 0,
45 },
46 {
47 .name = "linux(nor)",
48 .size = 0x1C0000,
49 .offset = MTDPART_OFS_APPEND,
50 }
51};
52
53static struct physmap_flash_data tcm_flash_data = {
54 .width = 2,
55 .parts = tcm_partitions,
56 .nr_parts = ARRAY_SIZE(tcm_partitions),
57};
58
59static struct resource tcm_flash_resource = {
60 .start = 0x20000000,
61 .end = 0x201fffff,
62 .flags = IORESOURCE_MEM,
63};
64
65static struct platform_device tcm_flash_device = {
66 .name = "physmap-flash",
67 .id = 0,
68 .dev = {
69 .platform_data = &tcm_flash_data,
70 },
71 .num_resources = 1,
72 .resource = &tcm_flash_resource,
73};
74#endif
75
76#if defined(CONFIG_RTC_DRV_BFIN) || defined(CONFIG_RTC_DRV_BFIN_MODULE)
77static struct platform_device rtc_device = {
78 .name = "rtc-bfin",
79 .id = -1,
80};
81#endif
82
83#if defined(CONFIG_BFIN_MAC) || defined(CONFIG_BFIN_MAC_MODULE)
84static struct platform_device bfin_mii_bus = {
85 .name = "bfin_mii_bus",
86};
87
88static struct platform_device bfin_mac_device = {
89 .name = "bfin_mac",
90 .dev.platform_data = &bfin_mii_bus,
91};
92#endif
93
94#if defined(CONFIG_MTD_M25P80) \
95 || defined(CONFIG_MTD_M25P80_MODULE)
96static struct mtd_partition bfin_spi_flash_partitions[] = {
97 {
98 .name = "bootloader(spi)",
99 .size = 0x00040000,
100 .offset = 0,
101 .mask_flags = MTD_CAP_ROM
102 }, {
103 .name = "linux kernel(spi)",
104 .size = MTDPART_SIZ_FULL,
105 .offset = MTDPART_OFS_APPEND,
106 }
107};
108
109static struct flash_platform_data bfin_spi_flash_data = {
110 .name = "m25p80",
111 .parts = bfin_spi_flash_partitions,
112 .nr_parts = ARRAY_SIZE(bfin_spi_flash_partitions),
113 .type = "m25p16",
114};
115
116/* SPI flash chip (m25p64) */
117static struct bfin5xx_spi_chip spi_flash_chip_info = {
118 .enable_dma = 0, /* use dma transfer with this chip*/
119 .bits_per_word = 8,
120};
121#endif
122
123#if defined(CONFIG_BFIN_SPI_ADC) \
124 || defined(CONFIG_BFIN_SPI_ADC_MODULE)
125/* SPI ADC chip */
126static struct bfin5xx_spi_chip spi_adc_chip_info = {
127 .enable_dma = 1, /* use dma transfer with this chip*/
128 .bits_per_word = 16,
129};
130#endif
131
132#if defined(CONFIG_MMC_SPI) || defined(CONFIG_MMC_SPI_MODULE)
133static struct bfin5xx_spi_chip mmc_spi_chip_info = {
134 .enable_dma = 0,
135 .bits_per_word = 8,
136};
137#endif
138
139#if defined(CONFIG_TOUCHSCREEN_AD7877) || defined(CONFIG_TOUCHSCREEN_AD7877_MODULE)
140static struct bfin5xx_spi_chip spi_ad7877_chip_info = {
141 .enable_dma = 0,
142 .bits_per_word = 16,
143};
144
145static const struct ad7877_platform_data bfin_ad7877_ts_info = {
146 .model = 7877,
147 .vref_delay_usecs = 50, /* internal, no capacitor */
148 .x_plate_ohms = 419,
149 .y_plate_ohms = 486,
150 .pressure_max = 1000,
151 .pressure_min = 0,
152 .stopacq_polarity = 1,
153 .first_conversion_delay = 3,
154 .acquisition_time = 1,
155 .averaging = 1,
156 .pen_down_acc_interval = 1,
157};
158#endif
159
160#if defined(CONFIG_SND_SOC_WM8731) || defined(CONFIG_SND_SOC_WM8731_MODULE) \
161 && defined(CONFIG_SND_SOC_WM8731_SPI)
162static struct bfin5xx_spi_chip spi_wm8731_chip_info = {
163 .enable_dma = 0,
164 .bits_per_word = 16,
165};
166#endif
167
168#if defined(CONFIG_SPI_SPIDEV) || defined(CONFIG_SPI_SPIDEV_MODULE)
169static struct bfin5xx_spi_chip spidev_chip_info = {
170 .enable_dma = 0,
171 .bits_per_word = 8,
172};
173#endif
174
175static struct spi_board_info bfin_spi_board_info[] __initdata = {
176#if defined(CONFIG_MTD_M25P80) \
177 || defined(CONFIG_MTD_M25P80_MODULE)
178 {
179 /* the modalias must be the same as spi device driver name */
180 .modalias = "m25p80", /* Name of spi_driver for this device */
181 .max_speed_hz = 25000000, /* max spi clock (SCK) speed in HZ */
182 .bus_num = 0, /* Framework bus number */
183 .chip_select = 2, /* SPI0_SSEL2 */
184 .platform_data = &bfin_spi_flash_data,
185 .controller_data = &spi_flash_chip_info,
186 .mode = SPI_MODE_3,
187 },
188#endif
189
190#if defined(CONFIG_BFIN_SPI_ADC) \
191 || defined(CONFIG_BFIN_SPI_ADC_MODULE)
192 {
193 .modalias = "bfin_spi_adc", /* Name of spi_driver for this device */
194 .max_speed_hz = 6250000, /* max spi clock (SCK) speed in HZ */
195 .bus_num = 0, /* Framework bus number */
196 .chip_select = 1, /* Framework chip select. */
197 .platform_data = NULL, /* No spi_driver specific config */
198 .controller_data = &spi_adc_chip_info,
199 },
200#endif
201
202#if defined(CONFIG_MMC_SPI) || defined(CONFIG_MMC_SPI_MODULE)
203 {
204 .modalias = "mmc_spi",
205 .max_speed_hz = 20000000, /* max spi clock (SCK) speed in HZ */
206 .bus_num = 0,
207 .chip_select = 5,
208 .controller_data = &mmc_spi_chip_info,
209 .mode = SPI_MODE_3,
210 },
211#endif
212#if defined(CONFIG_TOUCHSCREEN_AD7877) || defined(CONFIG_TOUCHSCREEN_AD7877_MODULE)
213 {
214 .modalias = "ad7877",
215 .platform_data = &bfin_ad7877_ts_info,
216 .irq = IRQ_PF8,
217 .max_speed_hz = 12500000, /* max spi clock (SCK) speed in HZ */
218 .bus_num = 0,
219 .chip_select = 2,
220 .controller_data = &spi_ad7877_chip_info,
221 },
222#endif
223#if defined(CONFIG_SND_SOC_WM8731) || defined(CONFIG_SND_SOC_WM8731_MODULE) \
224 && defined(CONFIG_SND_SOC_WM8731_SPI)
225 {
226 .modalias = "wm8731",
227 .max_speed_hz = 3125000, /* max spi clock (SCK) speed in HZ */
228 .bus_num = 0,
229 .chip_select = 5,
230 .controller_data = &spi_wm8731_chip_info,
231 .mode = SPI_MODE_0,
232 },
233#endif
234#if defined(CONFIG_SPI_SPIDEV) || defined(CONFIG_SPI_SPIDEV_MODULE)
235 {
236 .modalias = "spidev",
237 .max_speed_hz = 3125000, /* max spi clock (SCK) speed in HZ */
238 .bus_num = 0,
239 .chip_select = 1,
240 .controller_data = &spidev_chip_info,
241 },
242#endif
243#if defined(CONFIG_FB_BFIN_LQ035Q1) || defined(CONFIG_FB_BFIN_LQ035Q1_MODULE)
244 {
245 .modalias = "bfin-lq035q1-spi",
246 .max_speed_hz = 20000000, /* max spi clock (SCK) speed in HZ */
247 .bus_num = 0,
248 .chip_select = 1,
249 .controller_data = &lq035q1_spi_chip_info,
250 .mode = SPI_CPHA | SPI_CPOL,
251 },
252#endif
253};
254
255/* SPI controller data */
256#if defined(CONFIG_SPI_BFIN) || defined(CONFIG_SPI_BFIN_MODULE)
257/* SPI (0) */
258static struct bfin5xx_spi_master bfin_spi0_info = {
259 .num_chipselect = 6,
260 .enable_dma = 1, /* master has the ability to do dma transfer */
261 .pin_req = {P_SPI0_SCK, P_SPI0_MISO, P_SPI0_MOSI, 0},
262};
263
264static struct resource bfin_spi0_resource[] = {
265 [0] = {
266 .start = SPI0_REGBASE,
267 .end = SPI0_REGBASE + 0xFF,
268 .flags = IORESOURCE_MEM,
269 },
270 [1] = {
271 .start = CH_SPI0,
272 .end = CH_SPI0,
273 .flags = IORESOURCE_DMA,
274 },
275 [2] = {
276 .start = IRQ_SPI0,
277 .end = IRQ_SPI0,
278 .flags = IORESOURCE_IRQ,
279 },
280};
281
282static struct platform_device bfin_spi0_device = {
283 .name = "bfin-spi",
284 .id = 0, /* Bus number */
285 .num_resources = ARRAY_SIZE(bfin_spi0_resource),
286 .resource = bfin_spi0_resource,
287 .dev = {
288 .platform_data = &bfin_spi0_info, /* Passed to driver */
289 },
290};
291
292/* SPI (1) */
293static struct bfin5xx_spi_master bfin_spi1_info = {
294 .num_chipselect = 5,
295 .enable_dma = 1, /* master has the ability to do dma transfer */
296 .pin_req = {P_SPI1_SCK, P_SPI1_MISO, P_SPI1_MOSI, 0},
297};
298
299static struct resource bfin_spi1_resource[] = {
300 [0] = {
301 .start = SPI1_REGBASE,
302 .end = SPI1_REGBASE + 0xFF,
303 .flags = IORESOURCE_MEM,
304 },
305 [1] = {
306 .start = CH_SPI1,
307 .end = CH_SPI1,
308 .flags = IORESOURCE_DMA,
309 },
310 [2] = {
311 .start = IRQ_SPI1,
312 .end = IRQ_SPI1,
313 .flags = IORESOURCE_IRQ,
314 },
315};
316
317static struct platform_device bfin_spi1_device = {
318 .name = "bfin-spi",
319 .id = 1, /* Bus number */
320 .num_resources = ARRAY_SIZE(bfin_spi1_resource),
321 .resource = bfin_spi1_resource,
322 .dev = {
323 .platform_data = &bfin_spi1_info, /* Passed to driver */
324 },
325};
326#endif /* spi master and devices */
327
328#if defined(CONFIG_SERIAL_BFIN) || defined(CONFIG_SERIAL_BFIN_MODULE)
329#ifdef CONFIG_SERIAL_BFIN_UART0
330static struct resource bfin_uart0_resources[] = {
331 {
332 .start = UART0_THR,
333 .end = UART0_GCTL+2,
334 .flags = IORESOURCE_MEM,
335 },
336 {
337 .start = IRQ_UART0_RX,
338 .end = IRQ_UART0_RX+1,
339 .flags = IORESOURCE_IRQ,
340 },
341 {
342 .start = IRQ_UART0_ERROR,
343 .end = IRQ_UART0_ERROR,
344 .flags = IORESOURCE_IRQ,
345 },
346 {
347 .start = CH_UART0_TX,
348 .end = CH_UART0_TX,
349 .flags = IORESOURCE_DMA,
350 },
351 {
352 .start = CH_UART0_RX,
353 .end = CH_UART0_RX,
354 .flags = IORESOURCE_DMA,
355 },
356};
357
358unsigned short bfin_uart0_peripherals[] = {
359 P_UART0_TX, P_UART0_RX, 0
360};
361
362static struct platform_device bfin_uart0_device = {
363 .name = "bfin-uart",
364 .id = 0,
365 .num_resources = ARRAY_SIZE(bfin_uart0_resources),
366 .resource = bfin_uart0_resources,
367 .dev = {
368 .platform_data = &bfin_uart0_peripherals, /* Passed to driver */
369 },
370};
371#endif
372#ifdef CONFIG_SERIAL_BFIN_UART1
373static struct resource bfin_uart1_resources[] = {
374 {
375 .start = UART1_THR,
376 .end = UART1_GCTL+2,
377 .flags = IORESOURCE_MEM,
378 },
379 {
380 .start = IRQ_UART1_RX,
381 .end = IRQ_UART1_RX+1,
382 .flags = IORESOURCE_IRQ,
383 },
384 {
385 .start = IRQ_UART1_ERROR,
386 .end = IRQ_UART1_ERROR,
387 .flags = IORESOURCE_IRQ,
388 },
389 {
390 .start = CH_UART1_TX,
391 .end = CH_UART1_TX,
392 .flags = IORESOURCE_DMA,
393 },
394 {
395 .start = CH_UART1_RX,
396 .end = CH_UART1_RX,
397 .flags = IORESOURCE_DMA,
398 },
399};
400
401unsigned short bfin_uart1_peripherals[] = {
402 P_UART1_TX, P_UART1_RX, 0
403};
404
405static struct platform_device bfin_uart1_device = {
406 .name = "bfin-uart",
407 .id = 1,
408 .num_resources = ARRAY_SIZE(bfin_uart1_resources),
409 .resource = bfin_uart1_resources,
410 .dev = {
411 .platform_data = &bfin_uart1_peripherals, /* Passed to driver */
412 },
413};
414#endif
415#endif
416
417#if defined(CONFIG_BFIN_SIR) || defined(CONFIG_BFIN_SIR_MODULE)
418#ifdef CONFIG_BFIN_SIR0
419static struct resource bfin_sir0_resources[] = {
420 {
421 .start = 0xFFC00400,
422 .end = 0xFFC004FF,
423 .flags = IORESOURCE_MEM,
424 },
425 {
426 .start = IRQ_UART0_RX,
427 .end = IRQ_UART0_RX+1,
428 .flags = IORESOURCE_IRQ,
429 },
430 {
431 .start = CH_UART0_RX,
432 .end = CH_UART0_RX+1,
433 .flags = IORESOURCE_DMA,
434 },
435};
436
437static struct platform_device bfin_sir0_device = {
438 .name = "bfin_sir",
439 .id = 0,
440 .num_resources = ARRAY_SIZE(bfin_sir0_resources),
441 .resource = bfin_sir0_resources,
442};
443#endif
444#ifdef CONFIG_BFIN_SIR1
445static struct resource bfin_sir1_resources[] = {
446 {
447 .start = 0xFFC02000,
448 .end = 0xFFC020FF,
449 .flags = IORESOURCE_MEM,
450 },
451 {
452 .start = IRQ_UART1_RX,
453 .end = IRQ_UART1_RX+1,
454 .flags = IORESOURCE_IRQ,
455 },
456 {
457 .start = CH_UART1_RX,
458 .end = CH_UART1_RX+1,
459 .flags = IORESOURCE_DMA,
460 },
461};
462
463static struct platform_device bfin_sir1_device = {
464 .name = "bfin_sir",
465 .id = 1,
466 .num_resources = ARRAY_SIZE(bfin_sir1_resources),
467 .resource = bfin_sir1_resources,
468};
469#endif
470#endif
471
472#if defined(CONFIG_I2C_BLACKFIN_TWI) || defined(CONFIG_I2C_BLACKFIN_TWI_MODULE)
473static struct resource bfin_twi0_resource[] = {
474 [0] = {
475 .start = TWI0_REGBASE,
476 .end = TWI0_REGBASE,
477 .flags = IORESOURCE_MEM,
478 },
479 [1] = {
480 .start = IRQ_TWI,
481 .end = IRQ_TWI,
482 .flags = IORESOURCE_IRQ,
483 },
484};
485
486static struct platform_device i2c_bfin_twi_device = {
487 .name = "i2c-bfin-twi",
488 .id = 0,
489 .num_resources = ARRAY_SIZE(bfin_twi0_resource),
490 .resource = bfin_twi0_resource,
491};
492#endif
493
494static struct i2c_board_info __initdata bfin_i2c_board_info[] = {
495#if defined(CONFIG_BFIN_TWI_LCD) || defined(CONFIG_BFIN_TWI_LCD_MODULE)
496 {
497 I2C_BOARD_INFO("pcf8574_lcd", 0x22),
498 },
499#endif
500#if defined(CONFIG_INPUT_PCF8574) || defined(CONFIG_INPUT_PCF8574_MODULE)
501 {
502 I2C_BOARD_INFO("pcf8574_keypad", 0x27),
503 .irq = IRQ_PF8,
504 },
505#endif
506};
507
508#if defined(CONFIG_SERIAL_BFIN_SPORT) || defined(CONFIG_SERIAL_BFIN_SPORT_MODULE)
509#ifdef CONFIG_SERIAL_BFIN_SPORT0_UART
510static struct resource bfin_sport0_uart_resources[] = {
511 {
512 .start = SPORT0_TCR1,
513 .end = SPORT0_MRCS3+4,
514 .flags = IORESOURCE_MEM,
515 },
516 {
517 .start = IRQ_SPORT0_RX,
518 .end = IRQ_SPORT0_RX+1,
519 .flags = IORESOURCE_IRQ,
520 },
521 {
522 .start = IRQ_SPORT0_ERROR,
523 .end = IRQ_SPORT0_ERROR,
524 .flags = IORESOURCE_IRQ,
525 },
526};
527
528unsigned short bfin_sport0_peripherals[] = {
529 P_SPORT0_TFS, P_SPORT0_DTPRI, P_SPORT0_TSCLK, P_SPORT0_RFS,
530 P_SPORT0_DRPRI, P_SPORT0_RSCLK, P_SPORT0_DRSEC, P_SPORT0_DTSEC, 0
531};
532
533static struct platform_device bfin_sport0_uart_device = {
534 .name = "bfin-sport-uart",
535 .id = 0,
536 .num_resources = ARRAY_SIZE(bfin_sport0_uart_resources),
537 .resource = bfin_sport0_uart_resources,
538 .dev = {
539 .platform_data = &bfin_sport0_peripherals, /* Passed to driver */
540 },
541};
542#endif
543#ifdef CONFIG_SERIAL_BFIN_SPORT1_UART
544static struct resource bfin_sport1_uart_resources[] = {
545 {
546 .start = SPORT1_TCR1,
547 .end = SPORT1_MRCS3+4,
548 .flags = IORESOURCE_MEM,
549 },
550 {
551 .start = IRQ_SPORT1_RX,
552 .end = IRQ_SPORT1_RX+1,
553 .flags = IORESOURCE_IRQ,
554 },
555 {
556 .start = IRQ_SPORT1_ERROR,
557 .end = IRQ_SPORT1_ERROR,
558 .flags = IORESOURCE_IRQ,
559 },
560};
561
562unsigned short bfin_sport1_peripherals[] = {
563 P_SPORT1_TFS, P_SPORT1_DTPRI, P_SPORT1_TSCLK, P_SPORT1_RFS,
564 P_SPORT1_DRPRI, P_SPORT1_RSCLK, P_SPORT1_DRSEC, P_SPORT1_DTSEC, 0
565};
566
567static struct platform_device bfin_sport1_uart_device = {
568 .name = "bfin-sport-uart",
569 .id = 1,
570 .num_resources = ARRAY_SIZE(bfin_sport1_uart_resources),
571 .resource = bfin_sport1_uart_resources,
572 .dev = {
573 .platform_data = &bfin_sport1_peripherals, /* Passed to driver */
574 },
575};
576#endif
577#endif
578
579#if defined(CONFIG_KEYBOARD_GPIO) || defined(CONFIG_KEYBOARD_GPIO_MODULE)
580#include <linux/input.h>
581#include <linux/gpio_keys.h>
582
583static struct gpio_keys_button bfin_gpio_keys_table[] = {
584 {BTN_0, GPIO_PG0, 1, "gpio-keys: BTN0"},
585 {BTN_1, GPIO_PG13, 1, "gpio-keys: BTN1"},
586};
587
588static struct gpio_keys_platform_data bfin_gpio_keys_data = {
589 .buttons = bfin_gpio_keys_table,
590 .nbuttons = ARRAY_SIZE(bfin_gpio_keys_table),
591};
592
593static struct platform_device bfin_device_gpiokeys = {
594 .name = "gpio-keys",
595 .dev = {
596 .platform_data = &bfin_gpio_keys_data,
597 },
598};
599#endif
600
601#if defined(CONFIG_SDH_BFIN) || defined(CONFIG_SDH_BFIN_MODULE)
602
603static struct bfin_sd_host bfin_sdh_data = {
604 .dma_chan = CH_RSI,
605 .irq_int0 = IRQ_RSI_INT0,
606 .pin_req = {P_RSI_DATA0, P_RSI_DATA1, P_RSI_DATA2, P_RSI_DATA3, P_RSI_CMD, P_RSI_CLK, 0},
607};
608
609static struct platform_device bf51x_sdh_device = {
610 .name = "bfin-sdh",
611 .id = 0,
612 .dev = {
613 .platform_data = &bfin_sdh_data,
614 },
615};
616#endif
617
618static const unsigned int cclk_vlev_datasheet[] =
619{
620 VRPAIR(VLEV_100, 400000000),
621 VRPAIR(VLEV_105, 426000000),
622 VRPAIR(VLEV_110, 500000000),
623 VRPAIR(VLEV_115, 533000000),
624 VRPAIR(VLEV_120, 600000000),
625};
626
627static struct bfin_dpmc_platform_data bfin_dmpc_vreg_data = {
628 .tuple_tab = cclk_vlev_datasheet,
629 .tabsize = ARRAY_SIZE(cclk_vlev_datasheet),
630 .vr_settling_time = 25 /* us */,
631};
632
633static struct platform_device bfin_dpmc = {
634 .name = "bfin dpmc",
635 .dev = {
636 .platform_data = &bfin_dmpc_vreg_data,
637 },
638};
639
640static struct platform_device *tcm_devices[] __initdata = {
641
642 &bfin_dpmc,
643
644#if defined(CONFIG_RTC_DRV_BFIN) || defined(CONFIG_RTC_DRV_BFIN_MODULE)
645 &rtc_device,
646#endif
647
648#if defined(CONFIG_BFIN_MAC) || defined(CONFIG_BFIN_MAC_MODULE)
649 &bfin_mii_bus,
650 &bfin_mac_device,
651#endif
652
653#if defined(CONFIG_SPI_BFIN) || defined(CONFIG_SPI_BFIN_MODULE)
654 &bfin_spi0_device,
655 &bfin_spi1_device,
656#endif
657
658#if defined(CONFIG_SERIAL_BFIN) || defined(CONFIG_SERIAL_BFIN_MODULE)
659#ifdef CONFIG_SERIAL_BFIN_UART0
660 &bfin_uart0_device,
661#endif
662#ifdef CONFIG_SERIAL_BFIN_UART1
663 &bfin_uart1_device,
664#endif
665#endif
666
667#if defined(CONFIG_BFIN_SIR) || defined(CONFIG_BFIN_SIR_MODULE)
668#ifdef CONFIG_BFIN_SIR0
669 &bfin_sir0_device,
670#endif
671#ifdef CONFIG_BFIN_SIR1
672 &bfin_sir1_device,
673#endif
674#endif
675
676#if defined(CONFIG_I2C_BLACKFIN_TWI) || defined(CONFIG_I2C_BLACKFIN_TWI_MODULE)
677 &i2c_bfin_twi_device,
678#endif
679
680#if defined(CONFIG_SERIAL_BFIN_SPORT) || defined(CONFIG_SERIAL_BFIN_SPORT_MODULE)
681#ifdef CONFIG_SERIAL_BFIN_SPORT0_UART
682 &bfin_sport0_uart_device,
683#endif
684#ifdef CONFIG_SERIAL_BFIN_SPORT1_UART
685 &bfin_sport1_uart_device,
686#endif
687#endif
688
689#if defined(CONFIG_KEYBOARD_GPIO) || defined(CONFIG_KEYBOARD_GPIO_MODULE)
690 &bfin_device_gpiokeys,
691#endif
692
693#if defined(CONFIG_SDH_BFIN) || defined(CONFIG_SDH_BFIN_MODULE)
694 &bf51x_sdh_device,
695#endif
696
697#if defined(CONFIG_MTD_PHYSMAP) || defined(CONFIG_MTD_PHYSMAP_MODULE)
698 &tcm_flash_device,
699#endif
700};
701
702static int __init tcm_init(void)
703{
704 printk(KERN_INFO "%s(): registering device resources\n", __func__);
705 i2c_register_board_info(0, bfin_i2c_board_info,
706 ARRAY_SIZE(bfin_i2c_board_info));
707 platform_add_devices(tcm_devices, ARRAY_SIZE(tcm_devices));
708 spi_register_board_info(bfin_spi_board_info, ARRAY_SIZE(bfin_spi_board_info));
709 return 0;
710}
711
712arch_initcall(tcm_init);
713
714static struct platform_device *tcm_early_devices[] __initdata = {
715#if defined(CONFIG_SERIAL_BFIN_CONSOLE) || defined(CONFIG_EARLY_PRINTK)
716#ifdef CONFIG_SERIAL_BFIN_UART0
717 &bfin_uart0_device,
718#endif
719#ifdef CONFIG_SERIAL_BFIN_UART1
720 &bfin_uart1_device,
721#endif
722#endif
723
724#if defined(CONFIG_SERIAL_BFIN_SPORT_CONSOLE)
725#ifdef CONFIG_SERIAL_BFIN_SPORT0_UART
726 &bfin_sport0_uart_device,
727#endif
728#ifdef CONFIG_SERIAL_BFIN_SPORT1_UART
729 &bfin_sport1_uart_device,
730#endif
731#endif
732};
733
734void __init native_machine_early_platform_add_devices(void)
735{
736 printk(KERN_INFO "register early platform devices\n");
737 early_platform_add_devices(tcm_early_devices,
738 ARRAY_SIZE(tcm_early_devices));
739}
740
741void native_machine_restart(char *cmd)
742{
743 /* workaround reboot hang when booting from SPI */
744 if ((bfin_read_SYSCR() & 0x7) == 0x3)
745 bfin_reset_boot_spi_cs(P_DEFAULT_BOOT_SPI_CS);
746}
747
748void bfin_get_ether_addr(char *addr)
749{
750 random_ether_addr(addr);
751 printk(KERN_WARNING "%s:%s: Setting Ethernet MAC to a random one\n", __FILE__, __func__);
752}
753EXPORT_SYMBOL(bfin_get_ether_addr);
diff --git a/arch/blackfin/mach-bf518/include/mach/irq.h b/arch/blackfin/mach-bf518/include/mach/irq.h
index 14e52ec7afa5..435e76e31aaa 100644
--- a/arch/blackfin/mach-bf518/include/mach/irq.h
+++ b/arch/blackfin/mach-bf518/include/mach/irq.h
@@ -151,7 +151,17 @@
151 151
152#define GPIO_IRQ_BASE IRQ_PF0 152#define GPIO_IRQ_BASE IRQ_PF0
153 153
154#define NR_IRQS (IRQ_PH15 + 1) 154#define IRQ_MAC_PHYINT 119 /* PHY_INT Interrupt */
155#define IRQ_MAC_MMCINT 120 /* MMC Counter Interrupt */
156#define IRQ_MAC_RXFSINT 121 /* RX Frame-Status Interrupt */
157#define IRQ_MAC_TXFSINT 122 /* TX Frame-Status Interrupt */
158#define IRQ_MAC_WAKEDET 123 /* Wake-Up Interrupt */
159#define IRQ_MAC_RXDMAERR 124 /* RX DMA Direction Error Interrupt */
160#define IRQ_MAC_TXDMAERR 125 /* TX DMA Direction Error Interrupt */
161#define IRQ_MAC_STMDONE 126 /* Station Mgt. Transfer Done Interrupt */
162
163#define NR_MACH_IRQS (IRQ_MAC_STMDONE + 1)
164#define NR_IRQS (NR_MACH_IRQS + NR_SPARE_IRQS)
155 165
156#define IVG7 7 166#define IVG7 7
157#define IVG8 8 167#define IVG8 8
diff --git a/arch/blackfin/mach-bf518/include/mach/mem_map.h b/arch/blackfin/mach-bf518/include/mach/mem_map.h
index 3c6777cb3532..073b5d73d391 100644
--- a/arch/blackfin/mach-bf518/include/mach/mem_map.h
+++ b/arch/blackfin/mach-bf518/include/mach/mem_map.h
@@ -41,7 +41,7 @@
41#define L1_DATA_A_START 0xFF800000 41#define L1_DATA_A_START 0xFF800000
42#define L1_DATA_B_START 0xFF900000 42#define L1_DATA_B_START 0xFF900000
43 43
44#define L1_CODE_LENGTH 0xC000 44#define L1_CODE_LENGTH 0x8000
45 45
46#ifdef CONFIG_BFIN_DCACHE 46#ifdef CONFIG_BFIN_DCACHE
47 47
diff --git a/arch/blackfin/mach-bf527/boards/Kconfig b/arch/blackfin/mach-bf527/boards/Kconfig
index df224d04e167..b14c28810a44 100644
--- a/arch/blackfin/mach-bf527/boards/Kconfig
+++ b/arch/blackfin/mach-bf527/boards/Kconfig
@@ -9,6 +9,11 @@ config BFIN527_EZKIT
9 help 9 help
10 BF527-EZKIT-LITE board support. 10 BF527-EZKIT-LITE board support.
11 11
12config BFIN527_EZKIT_V2
13 bool "BF527-EZKIT-V2"
14 help
15 BF527-EZKIT-LITE V2.1+ board support.
16
12config BFIN527_BLUETECHNIX_CM 17config BFIN527_BLUETECHNIX_CM
13 bool "Bluetechnix CM-BF527" 18 bool "Bluetechnix CM-BF527"
14 help 19 help
diff --git a/arch/blackfin/mach-bf527/boards/Makefile b/arch/blackfin/mach-bf527/boards/Makefile
index eb6ed3362f9f..51a5817c4a90 100644
--- a/arch/blackfin/mach-bf527/boards/Makefile
+++ b/arch/blackfin/mach-bf527/boards/Makefile
@@ -3,5 +3,6 @@
3# 3#
4 4
5obj-$(CONFIG_BFIN527_EZKIT) += ezkit.o 5obj-$(CONFIG_BFIN527_EZKIT) += ezkit.o
6obj-$(CONFIG_BFIN527_EZKIT_V2) += ezkit.o
6obj-$(CONFIG_BFIN527_BLUETECHNIX_CM) += cm_bf527.o 7obj-$(CONFIG_BFIN527_BLUETECHNIX_CM) += cm_bf527.o
7obj-$(CONFIG_BFIN526_EZBRD) += ezbrd.o 8obj-$(CONFIG_BFIN526_EZBRD) += ezbrd.o
diff --git a/arch/blackfin/mach-bf527/boards/cm_bf527.c b/arch/blackfin/mach-bf527/boards/cm_bf527.c
index 7ab0800e2914..ebe76d1e874a 100644
--- a/arch/blackfin/mach-bf527/boards/cm_bf527.c
+++ b/arch/blackfin/mach-bf527/boards/cm_bf527.c
@@ -18,7 +18,6 @@
18#include <linux/i2c.h> 18#include <linux/i2c.h>
19#include <linux/irq.h> 19#include <linux/irq.h>
20#include <linux/interrupt.h> 20#include <linux/interrupt.h>
21#include <linux/usb/sl811.h>
22#include <linux/usb/musb.h> 21#include <linux/usb/musb.h>
23#include <asm/dma.h> 22#include <asm/dma.h>
24#include <asm/bfin5xx_spi.h> 23#include <asm/bfin5xx_spi.h>
@@ -270,50 +269,6 @@ static struct platform_device dm9000_device = {
270}; 269};
271#endif 270#endif
272 271
273#if defined(CONFIG_USB_SL811_HCD) || defined(CONFIG_USB_SL811_HCD_MODULE)
274static struct resource sl811_hcd_resources[] = {
275 {
276 .start = 0x20340000,
277 .end = 0x20340000,
278 .flags = IORESOURCE_MEM,
279 }, {
280 .start = 0x20340004,
281 .end = 0x20340004,
282 .flags = IORESOURCE_MEM,
283 }, {
284 .start = CONFIG_USB_SL811_BFIN_IRQ,
285 .end = CONFIG_USB_SL811_BFIN_IRQ,
286 .flags = IORESOURCE_IRQ | IORESOURCE_IRQ_HIGHLEVEL,
287 },
288};
289
290#if defined(CONFIG_USB_SL811_BFIN_USE_VBUS)
291void sl811_port_power(struct device *dev, int is_on)
292{
293 gpio_request(CONFIG_USB_SL811_BFIN_GPIO_VBUS, "usb:SL811_VBUS");
294 gpio_direction_output(CONFIG_USB_SL811_BFIN_GPIO_VBUS, is_on);
295}
296#endif
297
298static struct sl811_platform_data sl811_priv = {
299 .potpg = 10,
300 .power = 250, /* == 500mA */
301#if defined(CONFIG_USB_SL811_BFIN_USE_VBUS)
302 .port_power = &sl811_port_power,
303#endif
304};
305
306static struct platform_device sl811_hcd_device = {
307 .name = "sl811-hcd",
308 .id = 0,
309 .dev = {
310 .platform_data = &sl811_priv,
311 },
312 .num_resources = ARRAY_SIZE(sl811_hcd_resources),
313 .resource = sl811_hcd_resources,
314};
315#endif
316
317#if defined(CONFIG_BFIN_MAC) || defined(CONFIG_BFIN_MAC_MODULE) 272#if defined(CONFIG_BFIN_MAC) || defined(CONFIG_BFIN_MAC_MODULE)
318static struct platform_device bfin_mii_bus = { 273static struct platform_device bfin_mii_bus = {
319 .name = "bfin_mii_bus", 274 .name = "bfin_mii_bus",
@@ -384,8 +339,8 @@ static struct bfin5xx_spi_chip spi_adc_chip_info = {
384}; 339};
385#endif 340#endif
386 341
387#if defined(CONFIG_SND_BLACKFIN_AD1836) \ 342#if defined(CONFIG_SND_BLACKFIN_AD183X) \
388 || defined(CONFIG_SND_BLACKFIN_AD1836_MODULE) 343 || defined(CONFIG_SND_BLACKFIN_AD183X_MODULE)
389static struct bfin5xx_spi_chip ad1836_spi_chip_info = { 344static struct bfin5xx_spi_chip ad1836_spi_chip_info = {
390 .enable_dma = 0, 345 .enable_dma = 0,
391 .bits_per_word = 16, 346 .bits_per_word = 16,
@@ -462,8 +417,8 @@ static struct spi_board_info bfin_spi_board_info[] __initdata = {
462 }, 417 },
463#endif 418#endif
464 419
465#if defined(CONFIG_SND_BLACKFIN_AD1836) \ 420#if defined(CONFIG_SND_BLACKFIN_AD183X) \
466 || defined(CONFIG_SND_BLACKFIN_AD1836_MODULE) 421 || defined(CONFIG_SND_BLACKFIN_AD183X_MODULE)
467 { 422 {
468 .modalias = "ad1836", 423 .modalias = "ad1836",
469 .max_speed_hz = 3125000, /* max spi clock (SCK) speed in HZ */ 424 .max_speed_hz = 3125000, /* max spi clock (SCK) speed in HZ */
@@ -603,30 +558,105 @@ static struct platform_device cm_flash_device = {
603#endif 558#endif
604 559
605#if defined(CONFIG_SERIAL_BFIN) || defined(CONFIG_SERIAL_BFIN_MODULE) 560#if defined(CONFIG_SERIAL_BFIN) || defined(CONFIG_SERIAL_BFIN_MODULE)
606static struct resource bfin_uart_resources[] = {
607#ifdef CONFIG_SERIAL_BFIN_UART0 561#ifdef CONFIG_SERIAL_BFIN_UART0
562static struct resource bfin_uart0_resources[] = {
608 { 563 {
609 .start = 0xFFC00400, 564 .start = UART0_THR,
610 .end = 0xFFC004FF, 565 .end = UART0_GCTL+2,
611 .flags = IORESOURCE_MEM, 566 .flags = IORESOURCE_MEM,
612 }, 567 },
568 {
569 .start = IRQ_UART0_RX,
570 .end = IRQ_UART0_RX+1,
571 .flags = IORESOURCE_IRQ,
572 },
573 {
574 .start = IRQ_UART0_ERROR,
575 .end = IRQ_UART0_ERROR,
576 .flags = IORESOURCE_IRQ,
577 },
578 {
579 .start = CH_UART0_TX,
580 .end = CH_UART0_TX,
581 .flags = IORESOURCE_DMA,
582 },
583 {
584 .start = CH_UART0_RX,
585 .end = CH_UART0_RX,
586 .flags = IORESOURCE_DMA,
587 },
588};
589
590unsigned short bfin_uart0_peripherals[] = {
591 P_UART0_TX, P_UART0_RX, 0
592};
593
594static struct platform_device bfin_uart0_device = {
595 .name = "bfin-uart",
596 .id = 0,
597 .num_resources = ARRAY_SIZE(bfin_uart0_resources),
598 .resource = bfin_uart0_resources,
599 .dev = {
600 .platform_data = &bfin_uart0_peripherals, /* Passed to driver */
601 },
602};
613#endif 603#endif
614#ifdef CONFIG_SERIAL_BFIN_UART1 604#ifdef CONFIG_SERIAL_BFIN_UART1
605static struct resource bfin_uart1_resources[] = {
615 { 606 {
616 .start = 0xFFC02000, 607 .start = UART1_THR,
617 .end = 0xFFC020FF, 608 .end = UART1_GCTL+2,
618 .flags = IORESOURCE_MEM, 609 .flags = IORESOURCE_MEM,
619 }, 610 },
611 {
612 .start = IRQ_UART1_RX,
613 .end = IRQ_UART1_RX+1,
614 .flags = IORESOURCE_IRQ,
615 },
616 {
617 .start = IRQ_UART1_ERROR,
618 .end = IRQ_UART1_ERROR,
619 .flags = IORESOURCE_IRQ,
620 },
621 {
622 .start = CH_UART1_TX,
623 .end = CH_UART1_TX,
624 .flags = IORESOURCE_DMA,
625 },
626 {
627 .start = CH_UART1_RX,
628 .end = CH_UART1_RX,
629 .flags = IORESOURCE_DMA,
630 },
631#ifdef CONFIG_BFIN_UART1_CTSRTS
632 { /* CTS pin */
633 .start = GPIO_PF9,
634 .end = GPIO_PF9,
635 .flags = IORESOURCE_IO,
636 },
637 { /* RTS pin */
638 .start = GPIO_PF10,
639 .end = GPIO_PF10,
640 .flags = IORESOURCE_IO,
641 },
620#endif 642#endif
621}; 643};
622 644
623static struct platform_device bfin_uart_device = { 645unsigned short bfin_uart1_peripherals[] = {
646 P_UART1_TX, P_UART1_RX, 0
647};
648
649static struct platform_device bfin_uart1_device = {
624 .name = "bfin-uart", 650 .name = "bfin-uart",
625 .id = 1, 651 .id = 1,
626 .num_resources = ARRAY_SIZE(bfin_uart_resources), 652 .num_resources = ARRAY_SIZE(bfin_uart1_resources),
627 .resource = bfin_uart_resources, 653 .resource = bfin_uart1_resources,
654 .dev = {
655 .platform_data = &bfin_uart1_peripherals, /* Passed to driver */
656 },
628}; 657};
629#endif 658#endif
659#endif
630 660
631#if defined(CONFIG_BFIN_SIR) || defined(CONFIG_BFIN_SIR_MODULE) 661#if defined(CONFIG_BFIN_SIR) || defined(CONFIG_BFIN_SIR_MODULE)
632#ifdef CONFIG_BFIN_SIR0 662#ifdef CONFIG_BFIN_SIR0
@@ -725,16 +755,75 @@ static struct i2c_board_info __initdata bfin_i2c_board_info[] = {
725}; 755};
726 756
727#if defined(CONFIG_SERIAL_BFIN_SPORT) || defined(CONFIG_SERIAL_BFIN_SPORT_MODULE) 757#if defined(CONFIG_SERIAL_BFIN_SPORT) || defined(CONFIG_SERIAL_BFIN_SPORT_MODULE)
758#ifdef CONFIG_SERIAL_BFIN_SPORT0_UART
759static struct resource bfin_sport0_uart_resources[] = {
760 {
761 .start = SPORT0_TCR1,
762 .end = SPORT0_MRCS3+4,
763 .flags = IORESOURCE_MEM,
764 },
765 {
766 .start = IRQ_SPORT0_RX,
767 .end = IRQ_SPORT0_RX+1,
768 .flags = IORESOURCE_IRQ,
769 },
770 {
771 .start = IRQ_SPORT0_ERROR,
772 .end = IRQ_SPORT0_ERROR,
773 .flags = IORESOURCE_IRQ,
774 },
775};
776
777unsigned short bfin_sport0_peripherals[] = {
778 P_SPORT0_TFS, P_SPORT0_DTPRI, P_SPORT0_TSCLK, P_SPORT0_RFS,
779 P_SPORT0_DRPRI, P_SPORT0_RSCLK, P_SPORT0_DRSEC, P_SPORT0_DTSEC, 0
780};
781
728static struct platform_device bfin_sport0_uart_device = { 782static struct platform_device bfin_sport0_uart_device = {
729 .name = "bfin-sport-uart", 783 .name = "bfin-sport-uart",
730 .id = 0, 784 .id = 0,
785 .num_resources = ARRAY_SIZE(bfin_sport0_uart_resources),
786 .resource = bfin_sport0_uart_resources,
787 .dev = {
788 .platform_data = &bfin_sport0_peripherals, /* Passed to driver */
789 },
790};
791#endif
792#ifdef CONFIG_SERIAL_BFIN_SPORT1_UART
793static struct resource bfin_sport1_uart_resources[] = {
794 {
795 .start = SPORT1_TCR1,
796 .end = SPORT1_MRCS3+4,
797 .flags = IORESOURCE_MEM,
798 },
799 {
800 .start = IRQ_SPORT1_RX,
801 .end = IRQ_SPORT1_RX+1,
802 .flags = IORESOURCE_IRQ,
803 },
804 {
805 .start = IRQ_SPORT1_ERROR,
806 .end = IRQ_SPORT1_ERROR,
807 .flags = IORESOURCE_IRQ,
808 },
809};
810
811unsigned short bfin_sport1_peripherals[] = {
812 P_SPORT1_TFS, P_SPORT1_DTPRI, P_SPORT1_TSCLK, P_SPORT1_RFS,
813 P_SPORT1_DRPRI, P_SPORT1_RSCLK, P_SPORT1_DRSEC, P_SPORT1_DTSEC, 0
731}; 814};
732 815
733static struct platform_device bfin_sport1_uart_device = { 816static struct platform_device bfin_sport1_uart_device = {
734 .name = "bfin-sport-uart", 817 .name = "bfin-sport-uart",
735 .id = 1, 818 .id = 1,
819 .num_resources = ARRAY_SIZE(bfin_sport1_uart_resources),
820 .resource = bfin_sport1_uart_resources,
821 .dev = {
822 .platform_data = &bfin_sport1_peripherals, /* Passed to driver */
823 },
736}; 824};
737#endif 825#endif
826#endif
738 827
739#if defined(CONFIG_KEYBOARD_GPIO) || defined(CONFIG_KEYBOARD_GPIO_MODULE) 828#if defined(CONFIG_KEYBOARD_GPIO) || defined(CONFIG_KEYBOARD_GPIO_MODULE)
740#include <linux/input.h> 829#include <linux/input.h>
@@ -795,10 +884,6 @@ static struct platform_device *cmbf527_devices[] __initdata = {
795 &rtc_device, 884 &rtc_device,
796#endif 885#endif
797 886
798#if defined(CONFIG_USB_SL811_HCD) || defined(CONFIG_USB_SL811_HCD_MODULE)
799 &sl811_hcd_device,
800#endif
801
802#if defined(CONFIG_USB_ISP1760_HCD) || defined(CONFIG_USB_ISP1760_HCD_MODULE) 887#if defined(CONFIG_USB_ISP1760_HCD) || defined(CONFIG_USB_ISP1760_HCD_MODULE)
803 &bfin_isp1760_device, 888 &bfin_isp1760_device,
804#endif 889#endif
@@ -829,7 +914,12 @@ static struct platform_device *cmbf527_devices[] __initdata = {
829#endif 914#endif
830 915
831#if defined(CONFIG_SERIAL_BFIN) || defined(CONFIG_SERIAL_BFIN_MODULE) 916#if defined(CONFIG_SERIAL_BFIN) || defined(CONFIG_SERIAL_BFIN_MODULE)
832 &bfin_uart_device, 917#ifdef CONFIG_SERIAL_BFIN_UART0
918 &bfin_uart0_device,
919#endif
920#ifdef CONFIG_SERIAL_BFIN_UART1
921 &bfin_uart1_device,
922#endif
833#endif 923#endif
834 924
835#if defined(CONFIG_BFIN_SIR) || defined(CONFIG_BFIN_SIR_MODULE) 925#if defined(CONFIG_BFIN_SIR) || defined(CONFIG_BFIN_SIR_MODULE)
@@ -846,9 +936,13 @@ static struct platform_device *cmbf527_devices[] __initdata = {
846#endif 936#endif
847 937
848#if defined(CONFIG_SERIAL_BFIN_SPORT) || defined(CONFIG_SERIAL_BFIN_SPORT_MODULE) 938#if defined(CONFIG_SERIAL_BFIN_SPORT) || defined(CONFIG_SERIAL_BFIN_SPORT_MODULE)
939#ifdef CONFIG_SERIAL_BFIN_SPORT0_UART
849 &bfin_sport0_uart_device, 940 &bfin_sport0_uart_device,
941#endif
942#ifdef CONFIG_SERIAL_BFIN_SPORT1_UART
850 &bfin_sport1_uart_device, 943 &bfin_sport1_uart_device,
851#endif 944#endif
945#endif
852 946
853#if defined(CONFIG_KEYBOARD_GPIO) || defined(CONFIG_KEYBOARD_GPIO_MODULE) 947#if defined(CONFIG_KEYBOARD_GPIO) || defined(CONFIG_KEYBOARD_GPIO_MODULE)
854 &bfin_device_gpiokeys, 948 &bfin_device_gpiokeys,
@@ -871,6 +965,33 @@ static int __init cm_init(void)
871 965
872arch_initcall(cm_init); 966arch_initcall(cm_init);
873 967
968static struct platform_device *cmbf527_early_devices[] __initdata = {
969#if defined(CONFIG_SERIAL_BFIN_CONSOLE) || defined(CONFIG_EARLY_PRINTK)
970#ifdef CONFIG_SERIAL_BFIN_UART0
971 &bfin_uart0_device,
972#endif
973#ifdef CONFIG_SERIAL_BFIN_UART1
974 &bfin_uart1_device,
975#endif
976#endif
977
978#if defined(CONFIG_SERIAL_BFIN_SPORT_CONSOLE)
979#ifdef CONFIG_SERIAL_BFIN_SPORT0_UART
980 &bfin_sport0_uart_device,
981#endif
982#ifdef CONFIG_SERIAL_BFIN_SPORT1_UART
983 &bfin_sport1_uart_device,
984#endif
985#endif
986};
987
988void __init native_machine_early_platform_add_devices(void)
989{
990 printk(KERN_INFO "register early platform devices\n");
991 early_platform_add_devices(cmbf527_early_devices,
992 ARRAY_SIZE(cmbf527_early_devices));
993}
994
874void native_machine_restart(char *cmd) 995void native_machine_restart(char *cmd)
875{ 996{
876 /* workaround reboot hang when booting from SPI */ 997 /* workaround reboot hang when booting from SPI */
diff --git a/arch/blackfin/mach-bf527/boards/ezbrd.c b/arch/blackfin/mach-bf527/boards/ezbrd.c
index cad23b15d83c..55069af4f67d 100644
--- a/arch/blackfin/mach-bf527/boards/ezbrd.c
+++ b/arch/blackfin/mach-bf527/boards/ezbrd.c
@@ -274,8 +274,8 @@ static const struct ad7879_platform_data bfin_ad7879_ts_info = {
274 .median = 2, /* do 8 measurements */ 274 .median = 2, /* do 8 measurements */
275 .averaging = 1, /* take the average of 4 middle samples */ 275 .averaging = 1, /* take the average of 4 middle samples */
276 .pen_down_acc_interval = 255, /* 9.4 ms */ 276 .pen_down_acc_interval = 255, /* 9.4 ms */
277 .gpio_output = 1, /* configure AUX/VBAT/GPIO as GPIO output */ 277 .gpio_export = 1, /* Export GPIO to gpiolib */
278 .gpio_default = 1, /* During initialization set GPIO = HIGH */ 278 .gpio_base = -1, /* Dynamic allocation */
279}; 279};
280#endif 280#endif
281 281
@@ -439,30 +439,105 @@ static struct platform_device bfin_spi0_device = {
439#endif /* spi master and devices */ 439#endif /* spi master and devices */
440 440
441#if defined(CONFIG_SERIAL_BFIN) || defined(CONFIG_SERIAL_BFIN_MODULE) 441#if defined(CONFIG_SERIAL_BFIN) || defined(CONFIG_SERIAL_BFIN_MODULE)
442static struct resource bfin_uart_resources[] = {
443#ifdef CONFIG_SERIAL_BFIN_UART0 442#ifdef CONFIG_SERIAL_BFIN_UART0
443static struct resource bfin_uart0_resources[] = {
444 { 444 {
445 .start = 0xFFC00400, 445 .start = UART0_THR,
446 .end = 0xFFC004FF, 446 .end = UART0_GCTL+2,
447 .flags = IORESOURCE_MEM, 447 .flags = IORESOURCE_MEM,
448 }, 448 },
449 {
450 .start = IRQ_UART0_RX,
451 .end = IRQ_UART0_RX+1,
452 .flags = IORESOURCE_IRQ,
453 },
454 {
455 .start = IRQ_UART0_ERROR,
456 .end = IRQ_UART0_ERROR,
457 .flags = IORESOURCE_IRQ,
458 },
459 {
460 .start = CH_UART0_TX,
461 .end = CH_UART0_TX,
462 .flags = IORESOURCE_DMA,
463 },
464 {
465 .start = CH_UART0_RX,
466 .end = CH_UART0_RX,
467 .flags = IORESOURCE_DMA,
468 },
469};
470
471unsigned short bfin_uart0_peripherals[] = {
472 P_UART0_TX, P_UART0_RX, 0
473};
474
475static struct platform_device bfin_uart0_device = {
476 .name = "bfin-uart",
477 .id = 0,
478 .num_resources = ARRAY_SIZE(bfin_uart0_resources),
479 .resource = bfin_uart0_resources,
480 .dev = {
481 .platform_data = &bfin_uart0_peripherals, /* Passed to driver */
482 },
483};
449#endif 484#endif
450#ifdef CONFIG_SERIAL_BFIN_UART1 485#ifdef CONFIG_SERIAL_BFIN_UART1
486static struct resource bfin_uart1_resources[] = {
451 { 487 {
452 .start = 0xFFC02000, 488 .start = UART1_THR,
453 .end = 0xFFC020FF, 489 .end = UART1_GCTL+2,
454 .flags = IORESOURCE_MEM, 490 .flags = IORESOURCE_MEM,
455 }, 491 },
492 {
493 .start = IRQ_UART1_RX,
494 .end = IRQ_UART1_RX+1,
495 .flags = IORESOURCE_IRQ,
496 },
497 {
498 .start = IRQ_UART1_ERROR,
499 .end = IRQ_UART1_ERROR,
500 .flags = IORESOURCE_IRQ,
501 },
502 {
503 .start = CH_UART1_TX,
504 .end = CH_UART1_TX,
505 .flags = IORESOURCE_DMA,
506 },
507 {
508 .start = CH_UART1_RX,
509 .end = CH_UART1_RX,
510 .flags = IORESOURCE_DMA,
511 },
512#ifdef CONFIG_BFIN_UART1_CTSRTS
513 { /* CTS pin */
514 .start = GPIO_PG0,
515 .end = GPIO_PG0,
516 .flags = IORESOURCE_IO,
517 },
518 { /* RTS pin */
519 .start = GPIO_PF10,
520 .end = GPIO_PF10,
521 .flags = IORESOURCE_IO,
522 },
456#endif 523#endif
457}; 524};
458 525
459static struct platform_device bfin_uart_device = { 526unsigned short bfin_uart1_peripherals[] = {
527 P_UART1_TX, P_UART1_RX, 0
528};
529
530static struct platform_device bfin_uart1_device = {
460 .name = "bfin-uart", 531 .name = "bfin-uart",
461 .id = 1, 532 .id = 1,
462 .num_resources = ARRAY_SIZE(bfin_uart_resources), 533 .num_resources = ARRAY_SIZE(bfin_uart1_resources),
463 .resource = bfin_uart_resources, 534 .resource = bfin_uart1_resources,
535 .dev = {
536 .platform_data = &bfin_uart1_peripherals, /* Passed to driver */
537 },
464}; 538};
465#endif 539#endif
540#endif
466 541
467#if defined(CONFIG_BFIN_SIR) || defined(CONFIG_BFIN_SIR_MODULE) 542#if defined(CONFIG_BFIN_SIR) || defined(CONFIG_BFIN_SIR_MODULE)
468#ifdef CONFIG_BFIN_SIR0 543#ifdef CONFIG_BFIN_SIR0
@@ -556,16 +631,75 @@ static struct i2c_board_info __initdata bfin_i2c_board_info[] = {
556}; 631};
557 632
558#if defined(CONFIG_SERIAL_BFIN_SPORT) || defined(CONFIG_SERIAL_BFIN_SPORT_MODULE) 633#if defined(CONFIG_SERIAL_BFIN_SPORT) || defined(CONFIG_SERIAL_BFIN_SPORT_MODULE)
634#ifdef CONFIG_SERIAL_BFIN_SPORT0_UART
635static struct resource bfin_sport0_uart_resources[] = {
636 {
637 .start = SPORT0_TCR1,
638 .end = SPORT0_MRCS3+4,
639 .flags = IORESOURCE_MEM,
640 },
641 {
642 .start = IRQ_SPORT0_RX,
643 .end = IRQ_SPORT0_RX+1,
644 .flags = IORESOURCE_IRQ,
645 },
646 {
647 .start = IRQ_SPORT0_ERROR,
648 .end = IRQ_SPORT0_ERROR,
649 .flags = IORESOURCE_IRQ,
650 },
651};
652
653unsigned short bfin_sport0_peripherals[] = {
654 P_SPORT0_TFS, P_SPORT0_DTPRI, P_SPORT0_TSCLK, P_SPORT0_RFS,
655 P_SPORT0_DRPRI, P_SPORT0_RSCLK, P_SPORT0_DRSEC, P_SPORT0_DTSEC, 0
656};
657
559static struct platform_device bfin_sport0_uart_device = { 658static struct platform_device bfin_sport0_uart_device = {
560 .name = "bfin-sport-uart", 659 .name = "bfin-sport-uart",
561 .id = 0, 660 .id = 0,
661 .num_resources = ARRAY_SIZE(bfin_sport0_uart_resources),
662 .resource = bfin_sport0_uart_resources,
663 .dev = {
664 .platform_data = &bfin_sport0_peripherals, /* Passed to driver */
665 },
666};
667#endif
668#ifdef CONFIG_SERIAL_BFIN_SPORT1_UART
669static struct resource bfin_sport1_uart_resources[] = {
670 {
671 .start = SPORT1_TCR1,
672 .end = SPORT1_MRCS3+4,
673 .flags = IORESOURCE_MEM,
674 },
675 {
676 .start = IRQ_SPORT1_RX,
677 .end = IRQ_SPORT1_RX+1,
678 .flags = IORESOURCE_IRQ,
679 },
680 {
681 .start = IRQ_SPORT1_ERROR,
682 .end = IRQ_SPORT1_ERROR,
683 .flags = IORESOURCE_IRQ,
684 },
685};
686
687unsigned short bfin_sport1_peripherals[] = {
688 P_SPORT1_TFS, P_SPORT1_DTPRI, P_SPORT1_TSCLK, P_SPORT1_RFS,
689 P_SPORT1_DRPRI, P_SPORT1_RSCLK, P_SPORT1_DRSEC, P_SPORT1_DTSEC, 0
562}; 690};
563 691
564static struct platform_device bfin_sport1_uart_device = { 692static struct platform_device bfin_sport1_uart_device = {
565 .name = "bfin-sport-uart", 693 .name = "bfin-sport-uart",
566 .id = 1, 694 .id = 1,
695 .num_resources = ARRAY_SIZE(bfin_sport1_uart_resources),
696 .resource = bfin_sport1_uart_resources,
697 .dev = {
698 .platform_data = &bfin_sport1_peripherals, /* Passed to driver */
699 },
567}; 700};
568#endif 701#endif
702#endif
569 703
570#if defined(CONFIG_KEYBOARD_GPIO) || defined(CONFIG_KEYBOARD_GPIO_MODULE) 704#if defined(CONFIG_KEYBOARD_GPIO) || defined(CONFIG_KEYBOARD_GPIO_MODULE)
571#include <linux/input.h> 705#include <linux/input.h>
@@ -615,9 +749,10 @@ static struct platform_device bfin_dpmc = {
615#include <asm/bfin-lq035q1.h> 749#include <asm/bfin-lq035q1.h>
616 750
617static struct bfin_lq035q1fb_disp_info bfin_lq035q1_data = { 751static struct bfin_lq035q1fb_disp_info bfin_lq035q1_data = {
618 .mode = LQ035_NORM | LQ035_RGB | LQ035_RL | LQ035_TB, 752 .mode = LQ035_NORM | LQ035_RGB | LQ035_RL | LQ035_TB,
619 .use_bl = 1, 753 .ppi_mode = USE_RGB565_16_BIT_PPI,
620 .gpio_bl = GPIO_PG12, 754 .use_bl = 1,
755 .gpio_bl = GPIO_PG12,
621}; 756};
622 757
623static struct resource bfin_lq035q1_resources[] = { 758static struct resource bfin_lq035q1_resources[] = {
@@ -665,7 +800,12 @@ static struct platform_device *stamp_devices[] __initdata = {
665#endif 800#endif
666 801
667#if defined(CONFIG_SERIAL_BFIN) || defined(CONFIG_SERIAL_BFIN_MODULE) 802#if defined(CONFIG_SERIAL_BFIN) || defined(CONFIG_SERIAL_BFIN_MODULE)
668 &bfin_uart_device, 803#ifdef CONFIG_SERIAL_BFIN_UART0
804 &bfin_uart0_device,
805#endif
806#ifdef CONFIG_SERIAL_BFIN_UART1
807 &bfin_uart1_device,
808#endif
669#endif 809#endif
670 810
671#if defined(CONFIG_FB_BFIN_LQ035Q1) || defined(CONFIG_FB_BFIN_LQ035Q1_MODULE) 811#if defined(CONFIG_FB_BFIN_LQ035Q1) || defined(CONFIG_FB_BFIN_LQ035Q1_MODULE)
@@ -686,9 +826,13 @@ static struct platform_device *stamp_devices[] __initdata = {
686#endif 826#endif
687 827
688#if defined(CONFIG_SERIAL_BFIN_SPORT) || defined(CONFIG_SERIAL_BFIN_SPORT_MODULE) 828#if defined(CONFIG_SERIAL_BFIN_SPORT) || defined(CONFIG_SERIAL_BFIN_SPORT_MODULE)
829#ifdef CONFIG_SERIAL_BFIN_SPORT0_UART
689 &bfin_sport0_uart_device, 830 &bfin_sport0_uart_device,
831#endif
832#ifdef CONFIG_SERIAL_BFIN_SPORT1_UART
690 &bfin_sport1_uart_device, 833 &bfin_sport1_uart_device,
691#endif 834#endif
835#endif
692 836
693#if defined(CONFIG_KEYBOARD_GPIO) || defined(CONFIG_KEYBOARD_GPIO_MODULE) 837#if defined(CONFIG_KEYBOARD_GPIO) || defined(CONFIG_KEYBOARD_GPIO_MODULE)
694 &bfin_device_gpiokeys, 838 &bfin_device_gpiokeys,
@@ -711,6 +855,33 @@ static int __init ezbrd_init(void)
711 855
712arch_initcall(ezbrd_init); 856arch_initcall(ezbrd_init);
713 857
858static struct platform_device *ezbrd_early_devices[] __initdata = {
859#if defined(CONFIG_SERIAL_BFIN_CONSOLE) || defined(CONFIG_EARLY_PRINTK)
860#ifdef CONFIG_SERIAL_BFIN_UART0
861 &bfin_uart0_device,
862#endif
863#ifdef CONFIG_SERIAL_BFIN_UART1
864 &bfin_uart1_device,
865#endif
866#endif
867
868#if defined(CONFIG_SERIAL_BFIN_SPORT_CONSOLE)
869#ifdef CONFIG_SERIAL_BFIN_SPORT0_UART
870 &bfin_sport0_uart_device,
871#endif
872#ifdef CONFIG_SERIAL_BFIN_SPORT1_UART
873 &bfin_sport1_uart_device,
874#endif
875#endif
876};
877
878void __init native_machine_early_platform_add_devices(void)
879{
880 printk(KERN_INFO "register early platform devices\n");
881 early_platform_add_devices(ezbrd_early_devices,
882 ARRAY_SIZE(ezbrd_early_devices));
883}
884
714void native_machine_restart(char *cmd) 885void native_machine_restart(char *cmd)
715{ 886{
716 /* workaround reboot hang when booting from SPI */ 887 /* workaround reboot hang when booting from SPI */
diff --git a/arch/blackfin/mach-bf527/boards/ezkit.c b/arch/blackfin/mach-bf527/boards/ezkit.c
index 5294fdd20732..923383386aa1 100644
--- a/arch/blackfin/mach-bf527/boards/ezkit.c
+++ b/arch/blackfin/mach-bf527/boards/ezkit.c
@@ -16,8 +16,9 @@
16#include <linux/i2c.h> 16#include <linux/i2c.h>
17#include <linux/irq.h> 17#include <linux/irq.h>
18#include <linux/interrupt.h> 18#include <linux/interrupt.h>
19#include <linux/usb/sl811.h>
20#include <linux/usb/musb.h> 19#include <linux/usb/musb.h>
20#include <linux/leds.h>
21#include <linux/input.h>
21#include <asm/dma.h> 22#include <asm/dma.h>
22#include <asm/bfin5xx_spi.h> 23#include <asm/bfin5xx_spi.h>
23#include <asm/reboot.h> 24#include <asm/reboot.h>
@@ -29,7 +30,11 @@
29/* 30/*
30 * Name the Board for the /proc/cpuinfo 31 * Name the Board for the /proc/cpuinfo
31 */ 32 */
33#ifdef CONFIG_BFIN527_EZKIT_V2
34const char bfin_board_name[] = "ADI BF527-EZKIT V2";
35#else
32const char bfin_board_name[] = "ADI BF527-EZKIT"; 36const char bfin_board_name[] = "ADI BF527-EZKIT";
37#endif
33 38
34/* 39/*
35 * Driver needs to know address, irq and flag pin. 40 * Driver needs to know address, irq and flag pin.
@@ -143,6 +148,33 @@ static struct platform_device bf52x_t350mcqb_device = {
143}; 148};
144#endif 149#endif
145 150
151#if defined(CONFIG_FB_BFIN_LQ035Q1) || defined(CONFIG_FB_BFIN_LQ035Q1_MODULE)
152#include <asm/bfin-lq035q1.h>
153
154static struct bfin_lq035q1fb_disp_info bfin_lq035q1_data = {
155 .mode = LQ035_NORM | LQ035_RGB | LQ035_RL | LQ035_TB,
156 .ppi_mode = USE_RGB565_8_BIT_PPI,
157};
158
159static struct resource bfin_lq035q1_resources[] = {
160 {
161 .start = IRQ_PPI_ERROR,
162 .end = IRQ_PPI_ERROR,
163 .flags = IORESOURCE_IRQ,
164 },
165};
166
167static struct platform_device bfin_lq035q1_device = {
168 .name = "bfin-lq035q1",
169 .id = -1,
170 .num_resources = ARRAY_SIZE(bfin_lq035q1_resources),
171 .resource = bfin_lq035q1_resources,
172 .dev = {
173 .platform_data = &bfin_lq035q1_data,
174 },
175};
176#endif
177
146#if defined(CONFIG_MTD_PHYSMAP) || defined(CONFIG_MTD_PHYSMAP_MODULE) 178#if defined(CONFIG_MTD_PHYSMAP) || defined(CONFIG_MTD_PHYSMAP_MODULE)
147static struct mtd_partition ezkit_partitions[] = { 179static struct mtd_partition ezkit_partitions[] = {
148 { 180 {
@@ -326,50 +358,6 @@ static struct platform_device dm9000_device = {
326}; 358};
327#endif 359#endif
328 360
329#if defined(CONFIG_USB_SL811_HCD) || defined(CONFIG_USB_SL811_HCD_MODULE)
330static struct resource sl811_hcd_resources[] = {
331 {
332 .start = 0x20340000,
333 .end = 0x20340000,
334 .flags = IORESOURCE_MEM,
335 }, {
336 .start = 0x20340004,
337 .end = 0x20340004,
338 .flags = IORESOURCE_MEM,
339 }, {
340 .start = CONFIG_USB_SL811_BFIN_IRQ,
341 .end = CONFIG_USB_SL811_BFIN_IRQ,
342 .flags = IORESOURCE_IRQ | IORESOURCE_IRQ_HIGHLEVEL,
343 },
344};
345
346#if defined(CONFIG_USB_SL811_BFIN_USE_VBUS)
347void sl811_port_power(struct device *dev, int is_on)
348{
349 gpio_request(CONFIG_USB_SL811_BFIN_GPIO_VBUS, "usb:SL811_VBUS");
350 gpio_direction_output(CONFIG_USB_SL811_BFIN_GPIO_VBUS, is_on);
351}
352#endif
353
354static struct sl811_platform_data sl811_priv = {
355 .potpg = 10,
356 .power = 250, /* == 500mA */
357#if defined(CONFIG_USB_SL811_BFIN_USE_VBUS)
358 .port_power = &sl811_port_power,
359#endif
360};
361
362static struct platform_device sl811_hcd_device = {
363 .name = "sl811-hcd",
364 .id = 0,
365 .dev = {
366 .platform_data = &sl811_priv,
367 },
368 .num_resources = ARRAY_SIZE(sl811_hcd_resources),
369 .resource = sl811_hcd_resources,
370};
371#endif
372
373#if defined(CONFIG_BFIN_MAC) || defined(CONFIG_BFIN_MAC_MODULE) 361#if defined(CONFIG_BFIN_MAC) || defined(CONFIG_BFIN_MAC_MODULE)
374static struct platform_device bfin_mii_bus = { 362static struct platform_device bfin_mii_bus = {
375 .name = "bfin_mii_bus", 363 .name = "bfin_mii_bus",
@@ -440,8 +428,8 @@ static struct bfin5xx_spi_chip spi_adc_chip_info = {
440}; 428};
441#endif 429#endif
442 430
443#if defined(CONFIG_SND_BLACKFIN_AD1836) \ 431#if defined(CONFIG_SND_BLACKFIN_AD183X) \
444 || defined(CONFIG_SND_BLACKFIN_AD1836_MODULE) 432 || defined(CONFIG_SND_BLACKFIN_AD183X_MODULE)
445static struct bfin5xx_spi_chip ad1836_spi_chip_info = { 433static struct bfin5xx_spi_chip ad1836_spi_chip_info = {
446 .enable_dma = 0, 434 .enable_dma = 0,
447 .bits_per_word = 16, 435 .bits_per_word = 16,
@@ -488,8 +476,7 @@ static const struct ad7879_platform_data bfin_ad7879_ts_info = {
488 .median = 2, /* do 8 measurements */ 476 .median = 2, /* do 8 measurements */
489 .averaging = 1, /* take the average of 4 middle samples */ 477 .averaging = 1, /* take the average of 4 middle samples */
490 .pen_down_acc_interval = 255, /* 9.4 ms */ 478 .pen_down_acc_interval = 255, /* 9.4 ms */
491 .gpio_output = 1, /* configure AUX/VBAT/GPIO as GPIO output */ 479 .gpio_export = 0, /* Export GPIO to gpiolib */
492 .gpio_default = 1, /* During initialization set GPIO = HIGH */
493}; 480};
494#endif 481#endif
495 482
@@ -500,14 +487,6 @@ static struct bfin5xx_spi_chip spi_ad7879_chip_info = {
500}; 487};
501#endif 488#endif
502 489
503#if defined(CONFIG_SND_SOC_WM8731) || defined(CONFIG_SND_SOC_WM8731_MODULE) \
504 && defined(CONFIG_SND_SOC_WM8731_SPI)
505static struct bfin5xx_spi_chip spi_wm8731_chip_info = {
506 .enable_dma = 0,
507 .bits_per_word = 16,
508};
509#endif
510
511#if defined(CONFIG_SPI_SPIDEV) || defined(CONFIG_SPI_SPIDEV_MODULE) 490#if defined(CONFIG_SPI_SPIDEV) || defined(CONFIG_SPI_SPIDEV_MODULE)
512static struct bfin5xx_spi_chip spidev_chip_info = { 491static struct bfin5xx_spi_chip spidev_chip_info = {
513 .enable_dma = 0, 492 .enable_dma = 0,
@@ -515,6 +494,29 @@ static struct bfin5xx_spi_chip spidev_chip_info = {
515}; 494};
516#endif 495#endif
517 496
497#if defined(CONFIG_SND_BF5XX_I2S) || defined(CONFIG_SND_BF5XX_I2S_MODULE)
498static struct platform_device bfin_i2s = {
499 .name = "bfin-i2s",
500 .id = CONFIG_SND_BF5XX_SPORT_NUM,
501 /* TODO: add platform data here */
502};
503#endif
504
505#if defined(CONFIG_SND_BF5XX_TDM) || defined(CONFIG_SND_BF5XX_TDM_MODULE)
506static struct platform_device bfin_tdm = {
507 .name = "bfin-tdm",
508 .id = CONFIG_SND_BF5XX_SPORT_NUM,
509 /* TODO: add platform data here */
510};
511#endif
512
513#if defined(CONFIG_FB_BFIN_LQ035Q1) || defined(CONFIG_FB_BFIN_LQ035Q1_MODULE)
514static struct bfin5xx_spi_chip lq035q1_spi_chip_info = {
515 .enable_dma = 0,
516 .bits_per_word = 8,
517};
518#endif
519
518static struct spi_board_info bfin_spi_board_info[] __initdata = { 520static struct spi_board_info bfin_spi_board_info[] __initdata = {
519#if defined(CONFIG_MTD_M25P80) \ 521#if defined(CONFIG_MTD_M25P80) \
520 || defined(CONFIG_MTD_M25P80_MODULE) 522 || defined(CONFIG_MTD_M25P80_MODULE)
@@ -542,8 +544,8 @@ static struct spi_board_info bfin_spi_board_info[] __initdata = {
542 }, 544 },
543#endif 545#endif
544 546
545#if defined(CONFIG_SND_BLACKFIN_AD1836) \ 547#if defined(CONFIG_SND_BLACKFIN_AD183X) \
546 || defined(CONFIG_SND_BLACKFIN_AD1836_MODULE) 548 || defined(CONFIG_SND_BLACKFIN_AD183X_MODULE)
547 { 549 {
548 .modalias = "ad1836", 550 .modalias = "ad1836",
549 .max_speed_hz = 3125000, /* max spi clock (SCK) speed in HZ */ 551 .max_speed_hz = 3125000, /* max spi clock (SCK) speed in HZ */
@@ -586,17 +588,6 @@ static struct spi_board_info bfin_spi_board_info[] __initdata = {
586 .mode = SPI_CPHA | SPI_CPOL, 588 .mode = SPI_CPHA | SPI_CPOL,
587 }, 589 },
588#endif 590#endif
589#if defined(CONFIG_SND_SOC_WM8731) || defined(CONFIG_SND_SOC_WM8731_MODULE) \
590 && defined(CONFIG_SND_SOC_WM8731_SPI)
591 {
592 .modalias = "wm8731",
593 .max_speed_hz = 3125000, /* max spi clock (SCK) speed in HZ */
594 .bus_num = 0,
595 .chip_select = 5,
596 .controller_data = &spi_wm8731_chip_info,
597 .mode = SPI_MODE_0,
598 },
599#endif
600#if defined(CONFIG_SPI_SPIDEV) || defined(CONFIG_SPI_SPIDEV_MODULE) 591#if defined(CONFIG_SPI_SPIDEV) || defined(CONFIG_SPI_SPIDEV_MODULE)
601 { 592 {
602 .modalias = "spidev", 593 .modalias = "spidev",
@@ -606,6 +597,16 @@ static struct spi_board_info bfin_spi_board_info[] __initdata = {
606 .controller_data = &spidev_chip_info, 597 .controller_data = &spidev_chip_info,
607 }, 598 },
608#endif 599#endif
600#if defined(CONFIG_FB_BFIN_LQ035Q1) || defined(CONFIG_FB_BFIN_LQ035Q1_MODULE)
601 {
602 .modalias = "bfin-lq035q1-spi",
603 .max_speed_hz = 20000000, /* max spi clock (SCK) speed in HZ */
604 .bus_num = 0,
605 .chip_select = 7,
606 .controller_data = &lq035q1_spi_chip_info,
607 .mode = SPI_CPHA | SPI_CPOL,
608 },
609#endif
609}; 610};
610 611
611#if defined(CONFIG_SPI_BFIN) || defined(CONFIG_SPI_BFIN_MODULE) 612#if defined(CONFIG_SPI_BFIN) || defined(CONFIG_SPI_BFIN_MODULE)
@@ -647,30 +648,105 @@ static struct platform_device bfin_spi0_device = {
647#endif /* spi master and devices */ 648#endif /* spi master and devices */
648 649
649#if defined(CONFIG_SERIAL_BFIN) || defined(CONFIG_SERIAL_BFIN_MODULE) 650#if defined(CONFIG_SERIAL_BFIN) || defined(CONFIG_SERIAL_BFIN_MODULE)
650static struct resource bfin_uart_resources[] = {
651#ifdef CONFIG_SERIAL_BFIN_UART0 651#ifdef CONFIG_SERIAL_BFIN_UART0
652static struct resource bfin_uart0_resources[] = {
652 { 653 {
653 .start = 0xFFC00400, 654 .start = UART0_THR,
654 .end = 0xFFC004FF, 655 .end = UART0_GCTL+2,
655 .flags = IORESOURCE_MEM, 656 .flags = IORESOURCE_MEM,
656 }, 657 },
658 {
659 .start = IRQ_UART0_RX,
660 .end = IRQ_UART0_RX+1,
661 .flags = IORESOURCE_IRQ,
662 },
663 {
664 .start = IRQ_UART0_ERROR,
665 .end = IRQ_UART0_ERROR,
666 .flags = IORESOURCE_IRQ,
667 },
668 {
669 .start = CH_UART0_TX,
670 .end = CH_UART0_TX,
671 .flags = IORESOURCE_DMA,
672 },
673 {
674 .start = CH_UART0_RX,
675 .end = CH_UART0_RX,
676 .flags = IORESOURCE_DMA,
677 },
678};
679
680unsigned short bfin_uart0_peripherals[] = {
681 P_UART0_TX, P_UART0_RX, 0
682};
683
684static struct platform_device bfin_uart0_device = {
685 .name = "bfin-uart",
686 .id = 0,
687 .num_resources = ARRAY_SIZE(bfin_uart0_resources),
688 .resource = bfin_uart0_resources,
689 .dev = {
690 .platform_data = &bfin_uart0_peripherals, /* Passed to driver */
691 },
692};
657#endif 693#endif
658#ifdef CONFIG_SERIAL_BFIN_UART1 694#ifdef CONFIG_SERIAL_BFIN_UART1
695static struct resource bfin_uart1_resources[] = {
659 { 696 {
660 .start = 0xFFC02000, 697 .start = UART1_THR,
661 .end = 0xFFC020FF, 698 .end = UART1_GCTL+2,
662 .flags = IORESOURCE_MEM, 699 .flags = IORESOURCE_MEM,
663 }, 700 },
701 {
702 .start = IRQ_UART1_RX,
703 .end = IRQ_UART1_RX+1,
704 .flags = IORESOURCE_IRQ,
705 },
706 {
707 .start = IRQ_UART1_ERROR,
708 .end = IRQ_UART1_ERROR,
709 .flags = IORESOURCE_IRQ,
710 },
711 {
712 .start = CH_UART1_TX,
713 .end = CH_UART1_TX,
714 .flags = IORESOURCE_DMA,
715 },
716 {
717 .start = CH_UART1_RX,
718 .end = CH_UART1_RX,
719 .flags = IORESOURCE_DMA,
720 },
721#ifdef CONFIG_BFIN_UART1_CTSRTS
722 { /* CTS pin */
723 .start = GPIO_PF9,
724 .end = GPIO_PF9,
725 .flags = IORESOURCE_IO,
726 },
727 { /* RTS pin */
728 .start = GPIO_PF10,
729 .end = GPIO_PF10,
730 .flags = IORESOURCE_IO,
731 },
664#endif 732#endif
665}; 733};
666 734
667static struct platform_device bfin_uart_device = { 735unsigned short bfin_uart1_peripherals[] = {
736 P_UART1_TX, P_UART1_RX, 0
737};
738
739static struct platform_device bfin_uart1_device = {
668 .name = "bfin-uart", 740 .name = "bfin-uart",
669 .id = 1, 741 .id = 1,
670 .num_resources = ARRAY_SIZE(bfin_uart_resources), 742 .num_resources = ARRAY_SIZE(bfin_uart1_resources),
671 .resource = bfin_uart_resources, 743 .resource = bfin_uart1_resources,
744 .dev = {
745 .platform_data = &bfin_uart1_peripherals, /* Passed to driver */
746 },
672}; 747};
673#endif 748#endif
749#endif
674 750
675#if defined(CONFIG_BFIN_SIR) || defined(CONFIG_BFIN_SIR_MODULE) 751#if defined(CONFIG_BFIN_SIR) || defined(CONFIG_BFIN_SIR_MODULE)
676#ifdef CONFIG_BFIN_SIR0 752#ifdef CONFIG_BFIN_SIR0
@@ -749,6 +825,71 @@ static struct platform_device i2c_bfin_twi_device = {
749}; 825};
750#endif 826#endif
751 827
828#if defined(CONFIG_PMIC_ADP5520) || defined(CONFIG_PMIC_ADP5520_MODULE)
829#include <linux/mfd/adp5520.h>
830
831 /*
832 * ADP5520/5501 LEDs Data
833 */
834
835static struct led_info adp5520_leds[] = {
836 {
837 .name = "adp5520-led1",
838 .default_trigger = "none",
839 .flags = FLAG_ID_ADP5520_LED1_ADP5501_LED0 | ADP5520_LED_OFFT_600ms,
840 },
841};
842
843static struct adp5520_leds_platform_data adp5520_leds_data = {
844 .num_leds = ARRAY_SIZE(adp5520_leds),
845 .leds = adp5520_leds,
846 .fade_in = ADP5520_FADE_T_600ms,
847 .fade_out = ADP5520_FADE_T_600ms,
848 .led_on_time = ADP5520_LED_ONT_600ms,
849};
850
851 /*
852 * ADP5520 Keypad Data
853 */
854
855static const unsigned short adp5520_keymap[ADP5520_KEYMAPSIZE] = {
856 [ADP5520_KEY(3, 3)] = KEY_1,
857 [ADP5520_KEY(2, 3)] = KEY_2,
858 [ADP5520_KEY(1, 3)] = KEY_3,
859 [ADP5520_KEY(0, 3)] = KEY_UP,
860 [ADP5520_KEY(3, 2)] = KEY_4,
861 [ADP5520_KEY(2, 2)] = KEY_5,
862 [ADP5520_KEY(1, 2)] = KEY_6,
863 [ADP5520_KEY(0, 2)] = KEY_DOWN,
864 [ADP5520_KEY(3, 1)] = KEY_7,
865 [ADP5520_KEY(2, 1)] = KEY_8,
866 [ADP5520_KEY(1, 1)] = KEY_9,
867 [ADP5520_KEY(0, 1)] = KEY_DOT,
868 [ADP5520_KEY(3, 0)] = KEY_BACKSPACE,
869 [ADP5520_KEY(2, 0)] = KEY_0,
870 [ADP5520_KEY(1, 0)] = KEY_HELP,
871 [ADP5520_KEY(0, 0)] = KEY_ENTER,
872};
873
874static struct adp5520_keys_platform_data adp5520_keys_data = {
875 .rows_en_mask = ADP5520_ROW_R3 | ADP5520_ROW_R2 | ADP5520_ROW_R1 | ADP5520_ROW_R0,
876 .cols_en_mask = ADP5520_COL_C3 | ADP5520_COL_C2 | ADP5520_COL_C1 | ADP5520_COL_C0,
877 .keymap = adp5520_keymap,
878 .keymapsize = ARRAY_SIZE(adp5520_keymap),
879 .repeat = 0,
880};
881
882 /*
883 * ADP5520/5501 Multifuction Device Init Data
884 */
885
886static struct adp5520_platform_data adp5520_pdev_data = {
887 .leds = &adp5520_leds_data,
888 .keys = &adp5520_keys_data,
889};
890
891#endif
892
752static struct i2c_board_info __initdata bfin_i2c_board_info[] = { 893static struct i2c_board_info __initdata bfin_i2c_board_info[] = {
753#if defined(CONFIG_BFIN_TWI_LCD) || defined(CONFIG_BFIN_TWI_LCD_MODULE) 894#if defined(CONFIG_BFIN_TWI_LCD) || defined(CONFIG_BFIN_TWI_LCD_MODULE)
754 { 895 {
@@ -766,22 +907,99 @@ static struct i2c_board_info __initdata bfin_i2c_board_info[] = {
766 I2C_BOARD_INFO("bfin-adv7393", 0x2B), 907 I2C_BOARD_INFO("bfin-adv7393", 0x2B),
767 }, 908 },
768#endif 909#endif
910#if defined(CONFIG_TOUCHSCREEN_AD7879_I2C) || defined(CONFIG_TOUCHSCREEN_AD7879_I2C_MODULE)
911 {
912 I2C_BOARD_INFO("ad7879", 0x2C),
913 .irq = IRQ_PF8,
914 .platform_data = (void *)&bfin_ad7879_ts_info,
915 },
916#endif
917#if defined(CONFIG_PMIC_ADP5520) || defined(CONFIG_PMIC_ADP5520_MODULE)
918 {
919 I2C_BOARD_INFO("pmic-adp5520", 0x32),
920 .irq = IRQ_PF9,
921 .platform_data = (void *)&adp5520_pdev_data,
922 },
923#endif
924#if defined(CONFIG_SND_SOC_SSM2602) || defined(CONFIG_SND_SOC_SSM2602_MODULE)
925 {
926 I2C_BOARD_INFO("ssm2602", 0x1b),
927 },
928#endif
769}; 929};
770 930
771#if defined(CONFIG_SERIAL_BFIN_SPORT) || defined(CONFIG_SERIAL_BFIN_SPORT_MODULE) 931#if defined(CONFIG_SERIAL_BFIN_SPORT) || defined(CONFIG_SERIAL_BFIN_SPORT_MODULE)
932#ifdef CONFIG_SERIAL_BFIN_SPORT0_UART
933static struct resource bfin_sport0_uart_resources[] = {
934 {
935 .start = SPORT0_TCR1,
936 .end = SPORT0_MRCS3+4,
937 .flags = IORESOURCE_MEM,
938 },
939 {
940 .start = IRQ_SPORT0_RX,
941 .end = IRQ_SPORT0_RX+1,
942 .flags = IORESOURCE_IRQ,
943 },
944 {
945 .start = IRQ_SPORT0_ERROR,
946 .end = IRQ_SPORT0_ERROR,
947 .flags = IORESOURCE_IRQ,
948 },
949};
950
951unsigned short bfin_sport0_peripherals[] = {
952 P_SPORT0_TFS, P_SPORT0_DTPRI, P_SPORT0_TSCLK, P_SPORT0_RFS,
953 P_SPORT0_DRPRI, P_SPORT0_RSCLK, P_SPORT0_DRSEC, P_SPORT0_DTSEC, 0
954};
955
772static struct platform_device bfin_sport0_uart_device = { 956static struct platform_device bfin_sport0_uart_device = {
773 .name = "bfin-sport-uart", 957 .name = "bfin-sport-uart",
774 .id = 0, 958 .id = 0,
959 .num_resources = ARRAY_SIZE(bfin_sport0_uart_resources),
960 .resource = bfin_sport0_uart_resources,
961 .dev = {
962 .platform_data = &bfin_sport0_peripherals, /* Passed to driver */
963 },
964};
965#endif
966#ifdef CONFIG_SERIAL_BFIN_SPORT1_UART
967static struct resource bfin_sport1_uart_resources[] = {
968 {
969 .start = SPORT1_TCR1,
970 .end = SPORT1_MRCS3+4,
971 .flags = IORESOURCE_MEM,
972 },
973 {
974 .start = IRQ_SPORT1_RX,
975 .end = IRQ_SPORT1_RX+1,
976 .flags = IORESOURCE_IRQ,
977 },
978 {
979 .start = IRQ_SPORT1_ERROR,
980 .end = IRQ_SPORT1_ERROR,
981 .flags = IORESOURCE_IRQ,
982 },
983};
984
985unsigned short bfin_sport1_peripherals[] = {
986 P_SPORT1_TFS, P_SPORT1_DTPRI, P_SPORT1_TSCLK, P_SPORT1_RFS,
987 P_SPORT1_DRPRI, P_SPORT1_RSCLK, P_SPORT1_DRSEC, P_SPORT1_DTSEC, 0
775}; 988};
776 989
777static struct platform_device bfin_sport1_uart_device = { 990static struct platform_device bfin_sport1_uart_device = {
778 .name = "bfin-sport-uart", 991 .name = "bfin-sport-uart",
779 .id = 1, 992 .id = 1,
993 .num_resources = ARRAY_SIZE(bfin_sport1_uart_resources),
994 .resource = bfin_sport1_uart_resources,
995 .dev = {
996 .platform_data = &bfin_sport1_peripherals, /* Passed to driver */
997 },
780}; 998};
781#endif 999#endif
1000#endif
782 1001
783#if defined(CONFIG_KEYBOARD_GPIO) || defined(CONFIG_KEYBOARD_GPIO_MODULE) 1002#if defined(CONFIG_KEYBOARD_GPIO) || defined(CONFIG_KEYBOARD_GPIO_MODULE)
784#include <linux/input.h>
785#include <linux/gpio_keys.h> 1003#include <linux/gpio_keys.h>
786 1004
787static struct gpio_keys_button bfin_gpio_keys_table[] = { 1005static struct gpio_keys_button bfin_gpio_keys_table[] = {
@@ -803,7 +1021,6 @@ static struct platform_device bfin_device_gpiokeys = {
803#endif 1021#endif
804 1022
805#if defined(CONFIG_INPUT_BFIN_ROTARY) || defined(CONFIG_INPUT_BFIN_ROTARY_MODULE) 1023#if defined(CONFIG_INPUT_BFIN_ROTARY) || defined(CONFIG_INPUT_BFIN_ROTARY_MODULE)
806#include <linux/input.h>
807#include <asm/bfin_rotary.h> 1024#include <asm/bfin_rotary.h>
808 1025
809static struct bfin_rotary_platform_data bfin_rotary_data = { 1026static struct bfin_rotary_platform_data bfin_rotary_data = {
@@ -872,10 +1089,6 @@ static struct platform_device *stamp_devices[] __initdata = {
872 &rtc_device, 1089 &rtc_device,
873#endif 1090#endif
874 1091
875#if defined(CONFIG_USB_SL811_HCD) || defined(CONFIG_USB_SL811_HCD_MODULE)
876 &sl811_hcd_device,
877#endif
878
879#if defined(CONFIG_USB_ISP1760_HCD) || defined(CONFIG_USB_ISP1760_HCD_MODULE) 1092#if defined(CONFIG_USB_ISP1760_HCD) || defined(CONFIG_USB_ISP1760_HCD_MODULE)
880 &bfin_isp1760_device, 1093 &bfin_isp1760_device,
881#endif 1094#endif
@@ -909,8 +1122,17 @@ static struct platform_device *stamp_devices[] __initdata = {
909 &bf52x_t350mcqb_device, 1122 &bf52x_t350mcqb_device,
910#endif 1123#endif
911 1124
1125#if defined(CONFIG_FB_BFIN_LQ035Q1) || defined(CONFIG_FB_BFIN_LQ035Q1_MODULE)
1126 &bfin_lq035q1_device,
1127#endif
1128
912#if defined(CONFIG_SERIAL_BFIN) || defined(CONFIG_SERIAL_BFIN_MODULE) 1129#if defined(CONFIG_SERIAL_BFIN) || defined(CONFIG_SERIAL_BFIN_MODULE)
913 &bfin_uart_device, 1130#ifdef CONFIG_SERIAL_BFIN_UART0
1131 &bfin_uart0_device,
1132#endif
1133#ifdef CONFIG_SERIAL_BFIN_UART1
1134 &bfin_uart1_device,
1135#endif
914#endif 1136#endif
915 1137
916#if defined(CONFIG_BFIN_SIR) || defined(CONFIG_BFIN_SIR_MODULE) 1138#if defined(CONFIG_BFIN_SIR) || defined(CONFIG_BFIN_SIR_MODULE)
@@ -927,9 +1149,13 @@ static struct platform_device *stamp_devices[] __initdata = {
927#endif 1149#endif
928 1150
929#if defined(CONFIG_SERIAL_BFIN_SPORT) || defined(CONFIG_SERIAL_BFIN_SPORT_MODULE) 1151#if defined(CONFIG_SERIAL_BFIN_SPORT) || defined(CONFIG_SERIAL_BFIN_SPORT_MODULE)
1152#ifdef CONFIG_SERIAL_BFIN_SPORT0_UART
930 &bfin_sport0_uart_device, 1153 &bfin_sport0_uart_device,
1154#endif
1155#ifdef CONFIG_SERIAL_BFIN_SPORT1_UART
931 &bfin_sport1_uart_device, 1156 &bfin_sport1_uart_device,
932#endif 1157#endif
1158#endif
933 1159
934#if defined(CONFIG_KEYBOARD_GPIO) || defined(CONFIG_KEYBOARD_GPIO_MODULE) 1160#if defined(CONFIG_KEYBOARD_GPIO) || defined(CONFIG_KEYBOARD_GPIO_MODULE)
935 &bfin_device_gpiokeys, 1161 &bfin_device_gpiokeys,
@@ -942,6 +1168,14 @@ static struct platform_device *stamp_devices[] __initdata = {
942#if defined(CONFIG_MTD_PHYSMAP) || defined(CONFIG_MTD_PHYSMAP_MODULE) 1168#if defined(CONFIG_MTD_PHYSMAP) || defined(CONFIG_MTD_PHYSMAP_MODULE)
943 &ezkit_flash_device, 1169 &ezkit_flash_device,
944#endif 1170#endif
1171
1172#if defined(CONFIG_SND_BF5XX_I2S) || defined(CONFIG_SND_BF5XX_I2S_MODULE)
1173 &bfin_i2s,
1174#endif
1175
1176#if defined(CONFIG_SND_BF5XX_TDM) || defined(CONFIG_SND_BF5XX_TDM_MODULE)
1177 &bfin_tdm,
1178#endif
945}; 1179};
946 1180
947static int __init ezkit_init(void) 1181static int __init ezkit_init(void)
@@ -956,6 +1190,33 @@ static int __init ezkit_init(void)
956 1190
957arch_initcall(ezkit_init); 1191arch_initcall(ezkit_init);
958 1192
1193static struct platform_device *ezkit_early_devices[] __initdata = {
1194#if defined(CONFIG_SERIAL_BFIN_CONSOLE) || defined(CONFIG_EARLY_PRINTK)
1195#ifdef CONFIG_SERIAL_BFIN_UART0
1196 &bfin_uart0_device,
1197#endif
1198#ifdef CONFIG_SERIAL_BFIN_UART1
1199 &bfin_uart1_device,
1200#endif
1201#endif
1202
1203#if defined(CONFIG_SERIAL_BFIN_SPORT_CONSOLE)
1204#ifdef CONFIG_SERIAL_BFIN_SPORT0_UART
1205 &bfin_sport0_uart_device,
1206#endif
1207#ifdef CONFIG_SERIAL_BFIN_SPORT1_UART
1208 &bfin_sport1_uart_device,
1209#endif
1210#endif
1211};
1212
1213void __init native_machine_early_platform_add_devices(void)
1214{
1215 printk(KERN_INFO "register early platform devices\n");
1216 early_platform_add_devices(ezkit_early_devices,
1217 ARRAY_SIZE(ezkit_early_devices));
1218}
1219
959void native_machine_restart(char *cmd) 1220void native_machine_restart(char *cmd)
960{ 1221{
961 /* workaround reboot hang when booting from SPI */ 1222 /* workaround reboot hang when booting from SPI */
diff --git a/arch/blackfin/mach-bf527/include/mach/irq.h b/arch/blackfin/mach-bf527/include/mach/irq.h
index aa6579a64a2f..704d9253e41d 100644
--- a/arch/blackfin/mach-bf527/include/mach/irq.h
+++ b/arch/blackfin/mach-bf527/include/mach/irq.h
@@ -151,7 +151,17 @@
151 151
152#define GPIO_IRQ_BASE IRQ_PF0 152#define GPIO_IRQ_BASE IRQ_PF0
153 153
154#define NR_IRQS (IRQ_PH15+1) 154#define IRQ_MAC_PHYINT 119 /* PHY_INT Interrupt */
155#define IRQ_MAC_MMCINT 120 /* MMC Counter Interrupt */
156#define IRQ_MAC_RXFSINT 121 /* RX Frame-Status Interrupt */
157#define IRQ_MAC_TXFSINT 122 /* TX Frame-Status Interrupt */
158#define IRQ_MAC_WAKEDET 123 /* Wake-Up Interrupt */
159#define IRQ_MAC_RXDMAERR 124 /* RX DMA Direction Error Interrupt */
160#define IRQ_MAC_TXDMAERR 125 /* TX DMA Direction Error Interrupt */
161#define IRQ_MAC_STMDONE 126 /* Station Mgt. Transfer Done Interrupt */
162
163#define NR_MACH_IRQS (IRQ_MAC_STMDONE + 1)
164#define NR_IRQS (NR_MACH_IRQS + NR_SPARE_IRQS)
155 165
156#define IVG7 7 166#define IVG7 7
157#define IVG8 8 167#define IVG8 8
diff --git a/arch/blackfin/mach-bf533/boards/H8606.c b/arch/blackfin/mach-bf533/boards/H8606.c
index 4adceb0bdb6d..175371af0692 100644
--- a/arch/blackfin/mach-bf533/boards/H8606.c
+++ b/arch/blackfin/mach-bf533/boards/H8606.c
@@ -171,7 +171,7 @@ static struct bfin5xx_spi_chip spi_adc_chip_info = {
171}; 171};
172#endif 172#endif
173 173
174#if defined(CONFIG_SND_BLACKFIN_AD1836) || defined(CONFIG_SND_BLACKFIN_AD1836_MODULE) 174#if defined(CONFIG_SND_BLACKFIN_AD183X) || defined(CONFIG_SND_BLACKFIN_AD183X_MODULE)
175static struct bfin5xx_spi_chip ad1836_spi_chip_info = { 175static struct bfin5xx_spi_chip ad1836_spi_chip_info = {
176 .enable_dma = 0, 176 .enable_dma = 0,
177 .bits_per_word = 16, 177 .bits_per_word = 16,
@@ -206,7 +206,7 @@ static struct spi_board_info bfin_spi_board_info[] __initdata = {
206 }, 206 },
207#endif 207#endif
208 208
209#if defined(CONFIG_SND_BLACKFIN_AD1836) || defined(CONFIG_SND_BLACKFIN_AD1836_MODULE) 209#if defined(CONFIG_SND_BLACKFIN_AD183X) || defined(CONFIG_SND_BLACKFIN_AD183X_MODULE)
210 { 210 {
211 .modalias = "ad1836", 211 .modalias = "ad1836",
212 .max_speed_hz = 16, 212 .max_speed_hz = 16,
@@ -257,21 +257,50 @@ static struct platform_device bfin_spi0_device = {
257#endif /* spi master and devices */ 257#endif /* spi master and devices */
258 258
259#if defined(CONFIG_SERIAL_BFIN) || defined(CONFIG_SERIAL_BFIN_MODULE) 259#if defined(CONFIG_SERIAL_BFIN) || defined(CONFIG_SERIAL_BFIN_MODULE)
260static struct resource bfin_uart_resources[] = { 260#ifdef CONFIG_SERIAL_BFIN_UART0
261static struct resource bfin_uart0_resources[] = {
261 { 262 {
262 .start = 0xFFC00400, 263 .start = BFIN_UART_THR,
263 .end = 0xFFC004FF, 264 .end = BFIN_UART_GCTL+2,
264 .flags = IORESOURCE_MEM, 265 .flags = IORESOURCE_MEM,
265 }, 266 },
267 {
268 .start = IRQ_UART0_RX,
269 .end = IRQ_UART0_RX + 1,
270 .flags = IORESOURCE_IRQ,
271 },
272 {
273 .start = IRQ_UART0_ERROR,
274 .end = IRQ_UART0_ERROR,
275 .flags = IORESOURCE_IRQ,
276 },
277 {
278 .start = CH_UART0_TX,
279 .end = CH_UART0_TX,
280 .flags = IORESOURCE_DMA,
281 },
282 {
283 .start = CH_UART0_RX,
284 .end = CH_UART0_RX,
285 .flags = IORESOURCE_DMA,
286 },
287};
288
289unsigned short bfin_uart0_peripherals[] = {
290 P_UART0_TX, P_UART0_RX, 0
266}; 291};
267 292
268static struct platform_device bfin_uart_device = { 293static struct platform_device bfin_uart0_device = {
269 .name = "bfin-uart", 294 .name = "bfin-uart",
270 .id = 1, 295 .id = 0,
271 .num_resources = ARRAY_SIZE(bfin_uart_resources), 296 .num_resources = ARRAY_SIZE(bfin_uart0_resources),
272 .resource = bfin_uart_resources, 297 .resource = bfin_uart0_resources,
298 .dev = {
299 .platform_data = &bfin_uart0_peripherals, /* Passed to driver */
300 },
273}; 301};
274#endif 302#endif
303#endif
275 304
276#if defined(CONFIG_BFIN_SIR) || defined(CONFIG_BFIN_SIR_MODULE) 305#if defined(CONFIG_BFIN_SIR) || defined(CONFIG_BFIN_SIR_MODULE)
277#ifdef CONFIG_BFIN_SIR0 306#ifdef CONFIG_BFIN_SIR0
@@ -394,7 +423,9 @@ static struct platform_device *h8606_devices[] __initdata = {
394#endif 423#endif
395 424
396#if defined(CONFIG_SERIAL_BFIN) || defined(CONFIG_SERIAL_BFIN_MODULE) 425#if defined(CONFIG_SERIAL_BFIN) || defined(CONFIG_SERIAL_BFIN_MODULE)
397 &bfin_uart_device, 426#ifdef CONFIG_SERIAL_BFIN_UART0
427 &bfin_uart0_device,
428#endif
398#endif 429#endif
399 430
400#if defined(CONFIG_SERIAL_8250) || defined(CONFIG_SERIAL_8250_MODULE) 431#if defined(CONFIG_SERIAL_8250) || defined(CONFIG_SERIAL_8250_MODULE)
@@ -424,3 +455,18 @@ static int __init H8606_init(void)
424} 455}
425 456
426arch_initcall(H8606_init); 457arch_initcall(H8606_init);
458
459static struct platform_device *H8606_early_devices[] __initdata = {
460#if defined(CONFIG_SERIAL_BFIN_CONSOLE) || defined(CONFIG_EARLY_PRINTK)
461#ifdef CONFIG_SERIAL_BFIN_UART0
462 &bfin_uart0_device,
463#endif
464#endif
465};
466
467void __init native_machine_early_platform_add_devices(void)
468{
469 printk(KERN_INFO "register early platform devices\n");
470 early_platform_add_devices(H8606_early_devices,
471 ARRAY_SIZE(H8606_early_devices));
472}
diff --git a/arch/blackfin/mach-bf533/boards/blackstamp.c b/arch/blackfin/mach-bf533/boards/blackstamp.c
index b580884848d4..842b4fa76ea9 100644
--- a/arch/blackfin/mach-bf533/boards/blackstamp.c
+++ b/arch/blackfin/mach-bf533/boards/blackstamp.c
@@ -195,21 +195,50 @@ static struct platform_device bfin_spi0_device = {
195#endif /* spi master and devices */ 195#endif /* spi master and devices */
196 196
197#if defined(CONFIG_SERIAL_BFIN) || defined(CONFIG_SERIAL_BFIN_MODULE) 197#if defined(CONFIG_SERIAL_BFIN) || defined(CONFIG_SERIAL_BFIN_MODULE)
198static struct resource bfin_uart_resources[] = { 198#ifdef CONFIG_SERIAL_BFIN_UART0
199static struct resource bfin_uart0_resources[] = {
199 { 200 {
200 .start = 0xFFC00400, 201 .start = BFIN_UART_THR,
201 .end = 0xFFC004FF, 202 .end = BFIN_UART_GCTL+2,
202 .flags = IORESOURCE_MEM, 203 .flags = IORESOURCE_MEM,
203 }, 204 },
205 {
206 .start = IRQ_UART0_RX,
207 .end = IRQ_UART0_RX + 1,
208 .flags = IORESOURCE_IRQ,
209 },
210 {
211 .start = IRQ_UART0_ERROR,
212 .end = IRQ_UART0_ERROR,
213 .flags = IORESOURCE_IRQ,
214 },
215 {
216 .start = CH_UART0_TX,
217 .end = CH_UART0_TX,
218 .flags = IORESOURCE_DMA,
219 },
220 {
221 .start = CH_UART0_RX,
222 .end = CH_UART0_RX,
223 .flags = IORESOURCE_DMA,
224 },
225};
226
227unsigned short bfin_uart0_peripherals[] = {
228 P_UART0_TX, P_UART0_RX, 0
204}; 229};
205 230
206static struct platform_device bfin_uart_device = { 231static struct platform_device bfin_uart0_device = {
207 .name = "bfin-uart", 232 .name = "bfin-uart",
208 .id = 1, 233 .id = 0,
209 .num_resources = ARRAY_SIZE(bfin_uart_resources), 234 .num_resources = ARRAY_SIZE(bfin_uart0_resources),
210 .resource = bfin_uart_resources, 235 .resource = bfin_uart0_resources,
236 .dev = {
237 .platform_data = &bfin_uart0_peripherals, /* Passed to driver */
238 },
211}; 239};
212#endif 240#endif
241#endif
213 242
214#if defined(CONFIG_BFIN_SIR) || defined(CONFIG_BFIN_SIR_MODULE) 243#if defined(CONFIG_BFIN_SIR) || defined(CONFIG_BFIN_SIR_MODULE)
215#ifdef CONFIG_BFIN_SIR0 244#ifdef CONFIG_BFIN_SIR0
@@ -241,16 +270,75 @@ static struct platform_device bfin_sir0_device = {
241#endif 270#endif
242 271
243#if defined(CONFIG_SERIAL_BFIN_SPORT) || defined(CONFIG_SERIAL_BFIN_SPORT_MODULE) 272#if defined(CONFIG_SERIAL_BFIN_SPORT) || defined(CONFIG_SERIAL_BFIN_SPORT_MODULE)
273#ifdef CONFIG_SERIAL_BFIN_SPORT0_UART
274static struct resource bfin_sport0_uart_resources[] = {
275 {
276 .start = SPORT0_TCR1,
277 .end = SPORT0_MRCS3+4,
278 .flags = IORESOURCE_MEM,
279 },
280 {
281 .start = IRQ_SPORT0_RX,
282 .end = IRQ_SPORT0_RX+1,
283 .flags = IORESOURCE_IRQ,
284 },
285 {
286 .start = IRQ_SPORT0_ERROR,
287 .end = IRQ_SPORT0_ERROR,
288 .flags = IORESOURCE_IRQ,
289 },
290};
291
292unsigned short bfin_sport0_peripherals[] = {
293 P_SPORT0_TFS, P_SPORT0_DTPRI, P_SPORT0_TSCLK, P_SPORT0_RFS,
294 P_SPORT0_DRPRI, P_SPORT0_RSCLK, P_SPORT0_DRSEC, P_SPORT0_DTSEC, 0
295};
296
244static struct platform_device bfin_sport0_uart_device = { 297static struct platform_device bfin_sport0_uart_device = {
245 .name = "bfin-sport-uart", 298 .name = "bfin-sport-uart",
246 .id = 0, 299 .id = 0,
300 .num_resources = ARRAY_SIZE(bfin_sport0_uart_resources),
301 .resource = bfin_sport0_uart_resources,
302 .dev = {
303 .platform_data = &bfin_sport0_peripherals, /* Passed to driver */
304 },
305};
306#endif
307#ifdef CONFIG_SERIAL_BFIN_SPORT1_UART
308static struct resource bfin_sport1_uart_resources[] = {
309 {
310 .start = SPORT1_TCR1,
311 .end = SPORT1_MRCS3+4,
312 .flags = IORESOURCE_MEM,
313 },
314 {
315 .start = IRQ_SPORT1_RX,
316 .end = IRQ_SPORT1_RX+1,
317 .flags = IORESOURCE_IRQ,
318 },
319 {
320 .start = IRQ_SPORT1_ERROR,
321 .end = IRQ_SPORT1_ERROR,
322 .flags = IORESOURCE_IRQ,
323 },
324};
325
326unsigned short bfin_sport1_peripherals[] = {
327 P_SPORT1_TFS, P_SPORT1_DTPRI, P_SPORT1_TSCLK, P_SPORT1_RFS,
328 P_SPORT1_DRPRI, P_SPORT1_RSCLK, P_SPORT1_DRSEC, P_SPORT1_DTSEC, 0
247}; 329};
248 330
249static struct platform_device bfin_sport1_uart_device = { 331static struct platform_device bfin_sport1_uart_device = {
250 .name = "bfin-sport-uart", 332 .name = "bfin-sport-uart",
251 .id = 1, 333 .id = 1,
334 .num_resources = ARRAY_SIZE(bfin_sport1_uart_resources),
335 .resource = bfin_sport1_uart_resources,
336 .dev = {
337 .platform_data = &bfin_sport1_peripherals, /* Passed to driver */
338 },
252}; 339};
253#endif 340#endif
341#endif
254 342
255#if defined(CONFIG_KEYBOARD_GPIO) || defined(CONFIG_KEYBOARD_GPIO_MODULE) 343#if defined(CONFIG_KEYBOARD_GPIO) || defined(CONFIG_KEYBOARD_GPIO_MODULE)
256#include <linux/input.h> 344#include <linux/input.h>
@@ -344,7 +432,9 @@ static struct platform_device *stamp_devices[] __initdata = {
344#endif 432#endif
345 433
346#if defined(CONFIG_SERIAL_BFIN) || defined(CONFIG_SERIAL_BFIN_MODULE) 434#if defined(CONFIG_SERIAL_BFIN) || defined(CONFIG_SERIAL_BFIN_MODULE)
347 &bfin_uart_device, 435#ifdef CONFIG_SERIAL_BFIN_UART0
436 &bfin_uart0_device,
437#endif
348#endif 438#endif
349 439
350#if defined(CONFIG_BFIN_SIR) || defined(CONFIG_BFIN_SIR_MODULE) 440#if defined(CONFIG_BFIN_SIR) || defined(CONFIG_BFIN_SIR_MODULE)
@@ -354,9 +444,13 @@ static struct platform_device *stamp_devices[] __initdata = {
354#endif 444#endif
355 445
356#if defined(CONFIG_SERIAL_BFIN_SPORT) || defined(CONFIG_SERIAL_BFIN_SPORT_MODULE) 446#if defined(CONFIG_SERIAL_BFIN_SPORT) || defined(CONFIG_SERIAL_BFIN_SPORT_MODULE)
447#ifdef CONFIG_SERIAL_BFIN_SPORT0_UART
357 &bfin_sport0_uart_device, 448 &bfin_sport0_uart_device,
449#endif
450#ifdef CONFIG_SERIAL_BFIN_SPORT1_UART
358 &bfin_sport1_uart_device, 451 &bfin_sport1_uart_device,
359#endif 452#endif
453#endif
360 454
361#if defined(CONFIG_KEYBOARD_GPIO) || defined(CONFIG_KEYBOARD_GPIO_MODULE) 455#if defined(CONFIG_KEYBOARD_GPIO) || defined(CONFIG_KEYBOARD_GPIO_MODULE)
362 &bfin_device_gpiokeys, 456 &bfin_device_gpiokeys,
@@ -392,3 +486,27 @@ static int __init blackstamp_init(void)
392} 486}
393 487
394arch_initcall(blackstamp_init); 488arch_initcall(blackstamp_init);
489
490static struct platform_device *stamp_early_devices[] __initdata = {
491#if defined(CONFIG_SERIAL_BFIN_CONSOLE) || defined(CONFIG_EARLY_PRINTK)
492#ifdef CONFIG_SERIAL_BFIN_UART0
493 &bfin_uart0_device,
494#endif
495#endif
496
497#if defined(CONFIG_SERIAL_BFIN_SPORT_CONSOLE)
498#ifdef CONFIG_SERIAL_BFIN_SPORT0_UART
499 &bfin_sport0_uart_device,
500#endif
501#ifdef CONFIG_SERIAL_BFIN_SPORT1_UART
502 &bfin_sport1_uart_device,
503#endif
504#endif
505};
506
507void __init native_machine_early_platform_add_devices(void)
508{
509 printk(KERN_INFO "register early platform devices\n");
510 early_platform_add_devices(stamp_early_devices,
511 ARRAY_SIZE(stamp_early_devices));
512}
diff --git a/arch/blackfin/mach-bf533/boards/cm_bf533.c b/arch/blackfin/mach-bf533/boards/cm_bf533.c
index 7fc3b860d4ae..fdcde61906dc 100644
--- a/arch/blackfin/mach-bf533/boards/cm_bf533.c
+++ b/arch/blackfin/mach-bf533/boards/cm_bf533.c
@@ -71,7 +71,7 @@ static struct bfin5xx_spi_chip spi_adc_chip_info = {
71}; 71};
72#endif 72#endif
73 73
74#if defined(CONFIG_SND_BLACKFIN_AD1836) || defined(CONFIG_SND_BLACKFIN_AD1836_MODULE) 74#if defined(CONFIG_SND_BLACKFIN_AD183X) || defined(CONFIG_SND_BLACKFIN_AD183X_MODULE)
75static struct bfin5xx_spi_chip ad1836_spi_chip_info = { 75static struct bfin5xx_spi_chip ad1836_spi_chip_info = {
76 .enable_dma = 0, 76 .enable_dma = 0,
77 .bits_per_word = 16, 77 .bits_per_word = 16,
@@ -110,7 +110,7 @@ static struct spi_board_info bfin_spi_board_info[] __initdata = {
110 }, 110 },
111#endif 111#endif
112 112
113#if defined(CONFIG_SND_BLACKFIN_AD1836) || defined(CONFIG_SND_BLACKFIN_AD1836_MODULE) 113#if defined(CONFIG_SND_BLACKFIN_AD183X) || defined(CONFIG_SND_BLACKFIN_AD183X_MODULE)
114 { 114 {
115 .modalias = "ad1836", 115 .modalias = "ad1836",
116 .max_speed_hz = 3125000, /* max spi clock (SCK) speed in HZ */ 116 .max_speed_hz = 3125000, /* max spi clock (SCK) speed in HZ */
@@ -242,21 +242,50 @@ static struct platform_device smsc911x_device = {
242#endif 242#endif
243 243
244#if defined(CONFIG_SERIAL_BFIN) || defined(CONFIG_SERIAL_BFIN_MODULE) 244#if defined(CONFIG_SERIAL_BFIN) || defined(CONFIG_SERIAL_BFIN_MODULE)
245static struct resource bfin_uart_resources[] = { 245#ifdef CONFIG_SERIAL_BFIN_UART0
246static struct resource bfin_uart0_resources[] = {
246 { 247 {
247 .start = 0xFFC00400, 248 .start = BFIN_UART_THR,
248 .end = 0xFFC004FF, 249 .end = BFIN_UART_GCTL+2,
249 .flags = IORESOURCE_MEM, 250 .flags = IORESOURCE_MEM,
250 }, 251 },
252 {
253 .start = IRQ_UART0_RX,
254 .end = IRQ_UART0_RX + 1,
255 .flags = IORESOURCE_IRQ,
256 },
257 {
258 .start = IRQ_UART0_ERROR,
259 .end = IRQ_UART0_ERROR,
260 .flags = IORESOURCE_IRQ,
261 },
262 {
263 .start = CH_UART0_TX,
264 .end = CH_UART0_TX,
265 .flags = IORESOURCE_DMA,
266 },
267 {
268 .start = CH_UART0_RX,
269 .end = CH_UART0_RX,
270 .flags = IORESOURCE_DMA,
271 },
272};
273
274unsigned short bfin_uart0_peripherals[] = {
275 P_UART0_TX, P_UART0_RX, 0
251}; 276};
252 277
253static struct platform_device bfin_uart_device = { 278static struct platform_device bfin_uart0_device = {
254 .name = "bfin-uart", 279 .name = "bfin-uart",
255 .id = 1, 280 .id = 0,
256 .num_resources = ARRAY_SIZE(bfin_uart_resources), 281 .num_resources = ARRAY_SIZE(bfin_uart0_resources),
257 .resource = bfin_uart_resources, 282 .resource = bfin_uart0_resources,
283 .dev = {
284 .platform_data = &bfin_uart0_peripherals, /* Passed to driver */
285 },
258}; 286};
259#endif 287#endif
288#endif
260 289
261#if defined(CONFIG_BFIN_SIR) || defined(CONFIG_BFIN_SIR_MODULE) 290#if defined(CONFIG_BFIN_SIR) || defined(CONFIG_BFIN_SIR_MODULE)
262#ifdef CONFIG_BFIN_SIR0 291#ifdef CONFIG_BFIN_SIR0
@@ -288,16 +317,75 @@ static struct platform_device bfin_sir0_device = {
288#endif 317#endif
289 318
290#if defined(CONFIG_SERIAL_BFIN_SPORT) || defined(CONFIG_SERIAL_BFIN_SPORT_MODULE) 319#if defined(CONFIG_SERIAL_BFIN_SPORT) || defined(CONFIG_SERIAL_BFIN_SPORT_MODULE)
320#ifdef CONFIG_SERIAL_BFIN_SPORT0_UART
321static struct resource bfin_sport0_uart_resources[] = {
322 {
323 .start = SPORT0_TCR1,
324 .end = SPORT0_MRCS3+4,
325 .flags = IORESOURCE_MEM,
326 },
327 {
328 .start = IRQ_SPORT0_RX,
329 .end = IRQ_SPORT0_RX+1,
330 .flags = IORESOURCE_IRQ,
331 },
332 {
333 .start = IRQ_SPORT0_ERROR,
334 .end = IRQ_SPORT0_ERROR,
335 .flags = IORESOURCE_IRQ,
336 },
337};
338
339unsigned short bfin_sport0_peripherals[] = {
340 P_SPORT0_TFS, P_SPORT0_DTPRI, P_SPORT0_TSCLK, P_SPORT0_RFS,
341 P_SPORT0_DRPRI, P_SPORT0_RSCLK, P_SPORT0_DRSEC, P_SPORT0_DTSEC, 0
342};
343
291static struct platform_device bfin_sport0_uart_device = { 344static struct platform_device bfin_sport0_uart_device = {
292 .name = "bfin-sport-uart", 345 .name = "bfin-sport-uart",
293 .id = 0, 346 .id = 0,
347 .num_resources = ARRAY_SIZE(bfin_sport0_uart_resources),
348 .resource = bfin_sport0_uart_resources,
349 .dev = {
350 .platform_data = &bfin_sport0_peripherals, /* Passed to driver */
351 },
352};
353#endif
354#ifdef CONFIG_SERIAL_BFIN_SPORT1_UART
355static struct resource bfin_sport1_uart_resources[] = {
356 {
357 .start = SPORT1_TCR1,
358 .end = SPORT1_MRCS3+4,
359 .flags = IORESOURCE_MEM,
360 },
361 {
362 .start = IRQ_SPORT1_RX,
363 .end = IRQ_SPORT1_RX+1,
364 .flags = IORESOURCE_IRQ,
365 },
366 {
367 .start = IRQ_SPORT1_ERROR,
368 .end = IRQ_SPORT1_ERROR,
369 .flags = IORESOURCE_IRQ,
370 },
371};
372
373unsigned short bfin_sport1_peripherals[] = {
374 P_SPORT1_TFS, P_SPORT1_DTPRI, P_SPORT1_TSCLK, P_SPORT1_RFS,
375 P_SPORT1_DRPRI, P_SPORT1_RSCLK, P_SPORT1_DRSEC, P_SPORT1_DTSEC, 0
294}; 376};
295 377
296static struct platform_device bfin_sport1_uart_device = { 378static struct platform_device bfin_sport1_uart_device = {
297 .name = "bfin-sport-uart", 379 .name = "bfin-sport-uart",
298 .id = 1, 380 .id = 1,
381 .num_resources = ARRAY_SIZE(bfin_sport1_uart_resources),
382 .resource = bfin_sport1_uart_resources,
383 .dev = {
384 .platform_data = &bfin_sport1_peripherals, /* Passed to driver */
385 },
299}; 386};
300#endif 387#endif
388#endif
301 389
302#if defined(CONFIG_USB_ISP1362_HCD) || defined(CONFIG_USB_ISP1362_HCD_MODULE) 390#if defined(CONFIG_USB_ISP1362_HCD) || defined(CONFIG_USB_ISP1362_HCD_MODULE)
303static struct resource isp1362_hcd_resources[] = { 391static struct resource isp1362_hcd_resources[] = {
@@ -432,7 +520,9 @@ static struct platform_device *cm_bf533_devices[] __initdata = {
432 &bfin_dpmc, 520 &bfin_dpmc,
433 521
434#if defined(CONFIG_SERIAL_BFIN) || defined(CONFIG_SERIAL_BFIN_MODULE) 522#if defined(CONFIG_SERIAL_BFIN) || defined(CONFIG_SERIAL_BFIN_MODULE)
435 &bfin_uart_device, 523#ifdef CONFIG_SERIAL_BFIN_UART0
524 &bfin_uart0_device,
525#endif
436#endif 526#endif
437 527
438#if defined(CONFIG_BFIN_SIR) || defined(CONFIG_BFIN_SIR_MODULE) 528#if defined(CONFIG_BFIN_SIR) || defined(CONFIG_BFIN_SIR_MODULE)
@@ -442,9 +532,13 @@ static struct platform_device *cm_bf533_devices[] __initdata = {
442#endif 532#endif
443 533
444#if defined(CONFIG_SERIAL_BFIN_SPORT) || defined(CONFIG_SERIAL_BFIN_SPORT_MODULE) 534#if defined(CONFIG_SERIAL_BFIN_SPORT) || defined(CONFIG_SERIAL_BFIN_SPORT_MODULE)
535#ifdef CONFIG_SERIAL_BFIN_SPORT0_UART
445 &bfin_sport0_uart_device, 536 &bfin_sport0_uart_device,
537#endif
538#ifdef CONFIG_SERIAL_BFIN_SPORT1_UART
446 &bfin_sport1_uart_device, 539 &bfin_sport1_uart_device,
447#endif 540#endif
541#endif
448 542
449#if defined(CONFIG_RTC_DRV_BFIN) || defined(CONFIG_RTC_DRV_BFIN_MODULE) 543#if defined(CONFIG_RTC_DRV_BFIN) || defined(CONFIG_RTC_DRV_BFIN_MODULE)
450 &rtc_device, 544 &rtc_device,
@@ -486,3 +580,27 @@ static int __init cm_bf533_init(void)
486} 580}
487 581
488arch_initcall(cm_bf533_init); 582arch_initcall(cm_bf533_init);
583
584static struct platform_device *cm_bf533_early_devices[] __initdata = {
585#if defined(CONFIG_SERIAL_BFIN_CONSOLE) || defined(CONFIG_EARLY_PRINTK)
586#ifdef CONFIG_SERIAL_BFIN_UART0
587 &bfin_uart0_device,
588#endif
589#endif
590
591#if defined(CONFIG_SERIAL_BFIN_SPORT_CONSOLE)
592#ifdef CONFIG_SERIAL_BFIN_SPORT0_UART
593 &bfin_sport0_uart_device,
594#endif
595#ifdef CONFIG_SERIAL_BFIN_SPORT1_UART
596 &bfin_sport1_uart_device,
597#endif
598#endif
599};
600
601void __init native_machine_early_platform_add_devices(void)
602{
603 printk(KERN_INFO "register early platform devices\n");
604 early_platform_add_devices(cm_bf533_early_devices,
605 ARRAY_SIZE(cm_bf533_early_devices));
606}
diff --git a/arch/blackfin/mach-bf533/boards/ezkit.c b/arch/blackfin/mach-bf533/boards/ezkit.c
index d4689dcc198e..739773cb7fc6 100644
--- a/arch/blackfin/mach-bf533/boards/ezkit.c
+++ b/arch/blackfin/mach-bf533/boards/ezkit.c
@@ -222,7 +222,7 @@ static struct bfin5xx_spi_chip spi_adc_chip_info = {
222}; 222};
223#endif 223#endif
224 224
225#if defined(CONFIG_SND_BLACKFIN_AD1836) || defined(CONFIG_SND_BLACKFIN_AD1836_MODULE) 225#if defined(CONFIG_SND_BLACKFIN_AD183X) || defined(CONFIG_SND_BLACKFIN_AD183X_MODULE)
226static struct bfin5xx_spi_chip ad1836_spi_chip_info = { 226static struct bfin5xx_spi_chip ad1836_spi_chip_info = {
227 .enable_dma = 0, 227 .enable_dma = 0,
228 .bits_per_word = 16, 228 .bits_per_word = 16,
@@ -261,7 +261,7 @@ static struct spi_board_info bfin_spi_board_info[] __initdata = {
261 }, 261 },
262#endif 262#endif
263 263
264#if defined(CONFIG_SND_BLACKFIN_AD1836) || defined(CONFIG_SND_BLACKFIN_AD1836_MODULE) 264#if defined(CONFIG_SND_BLACKFIN_AD183X) || defined(CONFIG_SND_BLACKFIN_AD183X_MODULE)
265 { 265 {
266 .modalias = "ad1836", 266 .modalias = "ad1836",
267 .max_speed_hz = 3125000, /* max spi clock (SCK) speed in HZ */ 267 .max_speed_hz = 3125000, /* max spi clock (SCK) speed in HZ */
@@ -320,21 +320,50 @@ static struct platform_device bfin_spi0_device = {
320#endif /* spi master and devices */ 320#endif /* spi master and devices */
321 321
322#if defined(CONFIG_SERIAL_BFIN) || defined(CONFIG_SERIAL_BFIN_MODULE) 322#if defined(CONFIG_SERIAL_BFIN) || defined(CONFIG_SERIAL_BFIN_MODULE)
323static struct resource bfin_uart_resources[] = { 323#ifdef CONFIG_SERIAL_BFIN_UART0
324static struct resource bfin_uart0_resources[] = {
324 { 325 {
325 .start = 0xFFC00400, 326 .start = BFIN_UART_THR,
326 .end = 0xFFC004FF, 327 .end = BFIN_UART_GCTL+2,
327 .flags = IORESOURCE_MEM, 328 .flags = IORESOURCE_MEM,
328 }, 329 },
330 {
331 .start = IRQ_UART0_RX,
332 .end = IRQ_UART0_RX + 1,
333 .flags = IORESOURCE_IRQ,
334 },
335 {
336 .start = IRQ_UART0_ERROR,
337 .end = IRQ_UART0_ERROR,
338 .flags = IORESOURCE_IRQ,
339 },
340 {
341 .start = CH_UART0_TX,
342 .end = CH_UART0_TX,
343 .flags = IORESOURCE_DMA,
344 },
345 {
346 .start = CH_UART0_RX,
347 .end = CH_UART0_RX,
348 .flags = IORESOURCE_DMA,
349 },
350};
351
352unsigned short bfin_uart0_peripherals[] = {
353 P_UART0_TX, P_UART0_RX, 0
329}; 354};
330 355
331static struct platform_device bfin_uart_device = { 356static struct platform_device bfin_uart0_device = {
332 .name = "bfin-uart", 357 .name = "bfin-uart",
333 .id = 1, 358 .id = 0,
334 .num_resources = ARRAY_SIZE(bfin_uart_resources), 359 .num_resources = ARRAY_SIZE(bfin_uart0_resources),
335 .resource = bfin_uart_resources, 360 .resource = bfin_uart0_resources,
361 .dev = {
362 .platform_data = &bfin_uart0_peripherals, /* Passed to driver */
363 },
336}; 364};
337#endif 365#endif
366#endif
338 367
339#if defined(CONFIG_BFIN_SIR) || defined(CONFIG_BFIN_SIR_MODULE) 368#if defined(CONFIG_BFIN_SIR) || defined(CONFIG_BFIN_SIR_MODULE)
340#ifdef CONFIG_BFIN_SIR0 369#ifdef CONFIG_BFIN_SIR0
@@ -444,6 +473,30 @@ static struct i2c_board_info __initdata bfin_i2c_board_info[] = {
444#endif 473#endif
445}; 474};
446 475
476#if defined(CONFIG_SND_BF5XX_I2S) || defined(CONFIG_SND_BF5XX_I2S_MODULE)
477static struct platform_device bfin_i2s = {
478 .name = "bfin-i2s",
479 .id = CONFIG_SND_BF5XX_SPORT_NUM,
480 /* TODO: add platform data here */
481};
482#endif
483
484#if defined(CONFIG_SND_BF5XX_TDM) || defined(CONFIG_SND_BF5XX_TDM_MODULE)
485static struct platform_device bfin_tdm = {
486 .name = "bfin-tdm",
487 .id = CONFIG_SND_BF5XX_SPORT_NUM,
488 /* TODO: add platform data here */
489};
490#endif
491
492#if defined(CONFIG_SND_BF5XX_AC97) || defined(CONFIG_SND_BF5XX_AC97_MODULE)
493static struct platform_device bfin_ac97 = {
494 .name = "bfin-ac97",
495 .id = CONFIG_SND_BF5XX_SPORT_NUM,
496 /* TODO: add platform data here */
497};
498#endif
499
447static struct platform_device *ezkit_devices[] __initdata = { 500static struct platform_device *ezkit_devices[] __initdata = {
448 501
449 &bfin_dpmc, 502 &bfin_dpmc,
@@ -471,7 +524,9 @@ static struct platform_device *ezkit_devices[] __initdata = {
471#endif 524#endif
472 525
473#if defined(CONFIG_SERIAL_BFIN) || defined(CONFIG_SERIAL_BFIN_MODULE) 526#if defined(CONFIG_SERIAL_BFIN) || defined(CONFIG_SERIAL_BFIN_MODULE)
474 &bfin_uart_device, 527#ifdef CONFIG_SERIAL_BFIN_UART0
528 &bfin_uart0_device,
529#endif
475#endif 530#endif
476 531
477#if defined(CONFIG_BFIN_SIR) || defined(CONFIG_BFIN_SIR_MODULE) 532#if defined(CONFIG_BFIN_SIR) || defined(CONFIG_BFIN_SIR_MODULE)
@@ -487,6 +542,18 @@ static struct platform_device *ezkit_devices[] __initdata = {
487#if defined(CONFIG_I2C_GPIO) || defined(CONFIG_I2C_GPIO_MODULE) 542#if defined(CONFIG_I2C_GPIO) || defined(CONFIG_I2C_GPIO_MODULE)
488 &i2c_gpio_device, 543 &i2c_gpio_device,
489#endif 544#endif
545
546#if defined(CONFIG_SND_BF5XX_I2S) || defined(CONFIG_SND_BF5XX_I2S_MODULE)
547 &bfin_i2s,
548#endif
549
550#if defined(CONFIG_SND_BF5XX_TDM) || defined(CONFIG_SND_BF5XX_TDM_MODULE)
551 &bfin_tdm,
552#endif
553
554#if defined(CONFIG_SND_BF5XX_AC97) || defined(CONFIG_SND_BF5XX_AC97_MODULE)
555 &bfin_ac97,
556#endif
490}; 557};
491 558
492static int __init ezkit_init(void) 559static int __init ezkit_init(void)
@@ -500,3 +567,18 @@ static int __init ezkit_init(void)
500} 567}
501 568
502arch_initcall(ezkit_init); 569arch_initcall(ezkit_init);
570
571static struct platform_device *ezkit_early_devices[] __initdata = {
572#if defined(CONFIG_SERIAL_BFIN_CONSOLE) || defined(CONFIG_EARLY_PRINTK)
573#ifdef CONFIG_SERIAL_BFIN_UART0
574 &bfin_uart0_device,
575#endif
576#endif
577};
578
579void __init native_machine_early_platform_add_devices(void)
580{
581 printk(KERN_INFO "register early platform devices\n");
582 early_platform_add_devices(ezkit_early_devices,
583 ARRAY_SIZE(ezkit_early_devices));
584}
diff --git a/arch/blackfin/mach-bf533/boards/ip0x.c b/arch/blackfin/mach-bf533/boards/ip0x.c
index 8ec42ba35b9e..7349970db978 100644
--- a/arch/blackfin/mach-bf533/boards/ip0x.c
+++ b/arch/blackfin/mach-bf533/boards/ip0x.c
@@ -19,6 +19,7 @@
19#include <linux/usb/isp1362.h> 19#include <linux/usb/isp1362.h>
20#endif 20#endif
21#include <asm/irq.h> 21#include <asm/irq.h>
22#include <asm/dma.h>
22#include <asm/bfin5xx_spi.h> 23#include <asm/bfin5xx_spi.h>
23#include <asm/portmux.h> 24#include <asm/portmux.h>
24 25
@@ -143,21 +144,50 @@ static struct platform_device spi_bfin_master_device = {
143#endif /* spi master and devices */ 144#endif /* spi master and devices */
144 145
145#if defined(CONFIG_SERIAL_BFIN) || defined(CONFIG_SERIAL_BFIN_MODULE) 146#if defined(CONFIG_SERIAL_BFIN) || defined(CONFIG_SERIAL_BFIN_MODULE)
146static struct resource bfin_uart_resources[] = { 147#ifdef CONFIG_SERIAL_BFIN_UART0
148static struct resource bfin_uart0_resources[] = {
147 { 149 {
148 .start = 0xFFC00400, 150 .start = BFIN_UART_THR,
149 .end = 0xFFC004FF, 151 .end = BFIN_UART_GCTL+2,
150 .flags = IORESOURCE_MEM, 152 .flags = IORESOURCE_MEM,
151 }, 153 },
154 {
155 .start = IRQ_UART0_RX,
156 .end = IRQ_UART0_RX + 1,
157 .flags = IORESOURCE_IRQ,
158 },
159 {
160 .start = IRQ_UART0_ERROR,
161 .end = IRQ_UART0_ERROR,
162 .flags = IORESOURCE_IRQ,
163 },
164 {
165 .start = CH_UART0_TX,
166 .end = CH_UART0_TX,
167 .flags = IORESOURCE_DMA,
168 },
169 {
170 .start = CH_UART0_RX,
171 .end = CH_UART0_RX,
172 .flags = IORESOURCE_DMA,
173 },
174};
175
176unsigned short bfin_uart0_peripherals[] = {
177 P_UART0_TX, P_UART0_RX, 0
152}; 178};
153 179
154static struct platform_device bfin_uart_device = { 180static struct platform_device bfin_uart0_device = {
155 .name = "bfin-uart", 181 .name = "bfin-uart",
156 .id = 1, 182 .id = 0,
157 .num_resources = ARRAY_SIZE(bfin_uart_resources), 183 .num_resources = ARRAY_SIZE(bfin_uart0_resources),
158 .resource = bfin_uart_resources, 184 .resource = bfin_uart0_resources,
185 .dev = {
186 .platform_data = &bfin_uart0_peripherals, /* Passed to driver */
187 },
159}; 188};
160#endif 189#endif
190#endif
161 191
162#if defined(CONFIG_BFIN_SIR) || defined(CONFIG_BFIN_SIR_MODULE) 192#if defined(CONFIG_BFIN_SIR) || defined(CONFIG_BFIN_SIR_MODULE)
163#ifdef CONFIG_BFIN_SIR0 193#ifdef CONFIG_BFIN_SIR0
@@ -241,7 +271,9 @@ static struct platform_device *ip0x_devices[] __initdata = {
241#endif 271#endif
242 272
243#if defined(CONFIG_SERIAL_BFIN) || defined(CONFIG_SERIAL_BFIN_MODULE) 273#if defined(CONFIG_SERIAL_BFIN) || defined(CONFIG_SERIAL_BFIN_MODULE)
244 &bfin_uart_device, 274#ifdef CONFIG_SERIAL_BFIN_UART0
275 &bfin_uart0_device,
276#endif
245#endif 277#endif
246 278
247#if defined(CONFIG_BFIN_SIR) || defined(CONFIG_BFIN_SIR_MODULE) 279#if defined(CONFIG_BFIN_SIR) || defined(CONFIG_BFIN_SIR_MODULE)
@@ -276,3 +308,18 @@ static int __init ip0x_init(void)
276} 308}
277 309
278arch_initcall(ip0x_init); 310arch_initcall(ip0x_init);
311
312static struct platform_device *ip0x_early_devices[] __initdata = {
313#if defined(CONFIG_SERIAL_BFIN_CONSOLE) || defined(CONFIG_EARLY_PRINTK)
314#ifdef CONFIG_SERIAL_BFIN_UART0
315 &bfin_uart0_device,
316#endif
317#endif
318};
319
320void __init native_machine_early_platform_add_devices(void)
321{
322 printk(KERN_INFO "register early platform devices\n");
323 early_platform_add_devices(ip0x_early_devices,
324 ARRAY_SIZE(ip0x_early_devices));
325}
diff --git a/arch/blackfin/mach-bf533/boards/stamp.c b/arch/blackfin/mach-bf533/boards/stamp.c
index 6d68dcfa2da2..c457eaa60239 100644
--- a/arch/blackfin/mach-bf533/boards/stamp.c
+++ b/arch/blackfin/mach-bf533/boards/stamp.c
@@ -184,7 +184,7 @@ static struct bfin5xx_spi_chip spi_adc_chip_info = {
184}; 184};
185#endif 185#endif
186 186
187#if defined(CONFIG_SND_BLACKFIN_AD1836) || defined(CONFIG_SND_BLACKFIN_AD1836_MODULE) 187#if defined(CONFIG_SND_BLACKFIN_AD183X) || defined(CONFIG_SND_BLACKFIN_AD183X_MODULE)
188static struct bfin5xx_spi_chip ad1836_spi_chip_info = { 188static struct bfin5xx_spi_chip ad1836_spi_chip_info = {
189 .enable_dma = 0, 189 .enable_dma = 0,
190 .bits_per_word = 16, 190 .bits_per_word = 16,
@@ -251,7 +251,7 @@ static struct spi_board_info bfin_spi_board_info[] __initdata = {
251 }, 251 },
252#endif 252#endif
253 253
254#if defined(CONFIG_SND_BLACKFIN_AD1836) || defined(CONFIG_SND_BLACKFIN_AD1836_MODULE) 254#if defined(CONFIG_SND_BLACKFIN_AD183X) || defined(CONFIG_SND_BLACKFIN_AD183X_MODULE)
255 { 255 {
256 .modalias = "ad1836", 256 .modalias = "ad1836",
257 .max_speed_hz = 3125000, /* max spi clock (SCK) speed in HZ */ 257 .max_speed_hz = 3125000, /* max spi clock (SCK) speed in HZ */
@@ -322,21 +322,50 @@ static struct platform_device bfin_spi0_device = {
322#endif /* spi master and devices */ 322#endif /* spi master and devices */
323 323
324#if defined(CONFIG_SERIAL_BFIN) || defined(CONFIG_SERIAL_BFIN_MODULE) 324#if defined(CONFIG_SERIAL_BFIN) || defined(CONFIG_SERIAL_BFIN_MODULE)
325static struct resource bfin_uart_resources[] = { 325#ifdef CONFIG_SERIAL_BFIN_UART0
326static struct resource bfin_uart0_resources[] = {
326 { 327 {
327 .start = 0xFFC00400, 328 .start = BFIN_UART_THR,
328 .end = 0xFFC004FF, 329 .end = BFIN_UART_GCTL+2,
329 .flags = IORESOURCE_MEM, 330 .flags = IORESOURCE_MEM,
330 }, 331 },
332 {
333 .start = IRQ_UART0_RX,
334 .end = IRQ_UART0_RX + 1,
335 .flags = IORESOURCE_IRQ,
336 },
337 {
338 .start = IRQ_UART0_ERROR,
339 .end = IRQ_UART0_ERROR,
340 .flags = IORESOURCE_IRQ,
341 },
342 {
343 .start = CH_UART0_TX,
344 .end = CH_UART0_TX,
345 .flags = IORESOURCE_DMA,
346 },
347 {
348 .start = CH_UART0_RX,
349 .end = CH_UART0_RX,
350 .flags = IORESOURCE_DMA,
351 },
331}; 352};
332 353
333static struct platform_device bfin_uart_device = { 354unsigned short bfin_uart0_peripherals[] = {
355 P_UART0_TX, P_UART0_RX, 0
356};
357
358static struct platform_device bfin_uart0_device = {
334 .name = "bfin-uart", 359 .name = "bfin-uart",
335 .id = 1, 360 .id = 0,
336 .num_resources = ARRAY_SIZE(bfin_uart_resources), 361 .num_resources = ARRAY_SIZE(bfin_uart0_resources),
337 .resource = bfin_uart_resources, 362 .resource = bfin_uart0_resources,
363 .dev = {
364 .platform_data = &bfin_uart0_peripherals, /* Passed to driver */
365 },
338}; 366};
339#endif 367#endif
368#endif
340 369
341#if defined(CONFIG_BFIN_SIR) || defined(CONFIG_BFIN_SIR_MODULE) 370#if defined(CONFIG_BFIN_SIR) || defined(CONFIG_BFIN_SIR_MODULE)
342#ifdef CONFIG_BFIN_SIR0 371#ifdef CONFIG_BFIN_SIR0
@@ -368,16 +397,75 @@ static struct platform_device bfin_sir0_device = {
368#endif 397#endif
369 398
370#if defined(CONFIG_SERIAL_BFIN_SPORT) || defined(CONFIG_SERIAL_BFIN_SPORT_MODULE) 399#if defined(CONFIG_SERIAL_BFIN_SPORT) || defined(CONFIG_SERIAL_BFIN_SPORT_MODULE)
400#ifdef CONFIG_SERIAL_BFIN_SPORT0_UART
401static struct resource bfin_sport0_uart_resources[] = {
402 {
403 .start = SPORT0_TCR1,
404 .end = SPORT0_MRCS3+4,
405 .flags = IORESOURCE_MEM,
406 },
407 {
408 .start = IRQ_SPORT0_RX,
409 .end = IRQ_SPORT0_RX+1,
410 .flags = IORESOURCE_IRQ,
411 },
412 {
413 .start = IRQ_SPORT0_ERROR,
414 .end = IRQ_SPORT0_ERROR,
415 .flags = IORESOURCE_IRQ,
416 },
417};
418
419unsigned short bfin_sport0_peripherals[] = {
420 P_SPORT0_TFS, P_SPORT0_DTPRI, P_SPORT0_TSCLK, P_SPORT0_RFS,
421 P_SPORT0_DRPRI, P_SPORT0_RSCLK, P_SPORT0_DRSEC, P_SPORT0_DTSEC, 0
422};
423
371static struct platform_device bfin_sport0_uart_device = { 424static struct platform_device bfin_sport0_uart_device = {
372 .name = "bfin-sport-uart", 425 .name = "bfin-sport-uart",
373 .id = 0, 426 .id = 0,
427 .num_resources = ARRAY_SIZE(bfin_sport0_uart_resources),
428 .resource = bfin_sport0_uart_resources,
429 .dev = {
430 .platform_data = &bfin_sport0_peripherals, /* Passed to driver */
431 },
432};
433#endif
434#ifdef CONFIG_SERIAL_BFIN_SPORT1_UART
435static struct resource bfin_sport1_uart_resources[] = {
436 {
437 .start = SPORT1_TCR1,
438 .end = SPORT1_MRCS3+4,
439 .flags = IORESOURCE_MEM,
440 },
441 {
442 .start = IRQ_SPORT1_RX,
443 .end = IRQ_SPORT1_RX+1,
444 .flags = IORESOURCE_IRQ,
445 },
446 {
447 .start = IRQ_SPORT1_ERROR,
448 .end = IRQ_SPORT1_ERROR,
449 .flags = IORESOURCE_IRQ,
450 },
451};
452
453unsigned short bfin_sport1_peripherals[] = {
454 P_SPORT1_TFS, P_SPORT1_DTPRI, P_SPORT1_TSCLK, P_SPORT1_RFS,
455 P_SPORT1_DRPRI, P_SPORT1_RSCLK, P_SPORT1_DRSEC, P_SPORT1_DTSEC, 0
374}; 456};
375 457
376static struct platform_device bfin_sport1_uart_device = { 458static struct platform_device bfin_sport1_uart_device = {
377 .name = "bfin-sport-uart", 459 .name = "bfin-sport-uart",
378 .id = 1, 460 .id = 1,
461 .num_resources = ARRAY_SIZE(bfin_sport1_uart_resources),
462 .resource = bfin_sport1_uart_resources,
463 .dev = {
464 .platform_data = &bfin_sport1_peripherals, /* Passed to driver */
465 },
379}; 466};
380#endif 467#endif
468#endif
381 469
382#if defined(CONFIG_KEYBOARD_GPIO) || defined(CONFIG_KEYBOARD_GPIO_MODULE) 470#if defined(CONFIG_KEYBOARD_GPIO) || defined(CONFIG_KEYBOARD_GPIO_MODULE)
383#include <linux/input.h> 471#include <linux/input.h>
@@ -474,6 +562,30 @@ static struct platform_device bfin_dpmc = {
474 }, 562 },
475}; 563};
476 564
565#if defined(CONFIG_SND_BF5XX_I2S) || defined(CONFIG_SND_BF5XX_I2S_MODULE)
566static struct platform_device bfin_i2s = {
567 .name = "bfin-i2s",
568 .id = CONFIG_SND_BF5XX_SPORT_NUM,
569 /* TODO: add platform data here */
570};
571#endif
572
573#if defined(CONFIG_SND_BF5XX_TDM) || defined(CONFIG_SND_BF5XX_TDM_MODULE)
574static struct platform_device bfin_tdm = {
575 .name = "bfin-tdm",
576 .id = CONFIG_SND_BF5XX_SPORT_NUM,
577 /* TODO: add platform data here */
578};
579#endif
580
581#if defined(CONFIG_SND_BF5XX_AC97) || defined(CONFIG_SND_BF5XX_AC97_MODULE)
582static struct platform_device bfin_ac97 = {
583 .name = "bfin-ac97",
584 .id = CONFIG_SND_BF5XX_SPORT_NUM,
585 /* TODO: add platform data here */
586};
587#endif
588
477static struct platform_device *stamp_devices[] __initdata = { 589static struct platform_device *stamp_devices[] __initdata = {
478 590
479 &bfin_dpmc, 591 &bfin_dpmc,
@@ -495,7 +607,9 @@ static struct platform_device *stamp_devices[] __initdata = {
495#endif 607#endif
496 608
497#if defined(CONFIG_SERIAL_BFIN) || defined(CONFIG_SERIAL_BFIN_MODULE) 609#if defined(CONFIG_SERIAL_BFIN) || defined(CONFIG_SERIAL_BFIN_MODULE)
498 &bfin_uart_device, 610#ifdef CONFIG_SERIAL_BFIN_UART0
611 &bfin_uart0_device,
612#endif
499#endif 613#endif
500 614
501#if defined(CONFIG_BFIN_SIR) || defined(CONFIG_BFIN_SIR_MODULE) 615#if defined(CONFIG_BFIN_SIR) || defined(CONFIG_BFIN_SIR_MODULE)
@@ -505,9 +619,13 @@ static struct platform_device *stamp_devices[] __initdata = {
505#endif 619#endif
506 620
507#if defined(CONFIG_SERIAL_BFIN_SPORT) || defined(CONFIG_SERIAL_BFIN_SPORT_MODULE) 621#if defined(CONFIG_SERIAL_BFIN_SPORT) || defined(CONFIG_SERIAL_BFIN_SPORT_MODULE)
622#ifdef CONFIG_SERIAL_BFIN_SPORT0_UART
508 &bfin_sport0_uart_device, 623 &bfin_sport0_uart_device,
624#endif
625#ifdef CONFIG_SERIAL_BFIN_SPORT1_UART
509 &bfin_sport1_uart_device, 626 &bfin_sport1_uart_device,
510#endif 627#endif
628#endif
511 629
512#if defined(CONFIG_KEYBOARD_GPIO) || defined(CONFIG_KEYBOARD_GPIO_MODULE) 630#if defined(CONFIG_KEYBOARD_GPIO) || defined(CONFIG_KEYBOARD_GPIO_MODULE)
513 &bfin_device_gpiokeys, 631 &bfin_device_gpiokeys,
@@ -520,6 +638,18 @@ static struct platform_device *stamp_devices[] __initdata = {
520#if defined(CONFIG_MTD_BFIN_ASYNC) || defined(CONFIG_MTD_BFIN_ASYNC_MODULE) 638#if defined(CONFIG_MTD_BFIN_ASYNC) || defined(CONFIG_MTD_BFIN_ASYNC_MODULE)
521 &stamp_flash_device, 639 &stamp_flash_device,
522#endif 640#endif
641
642#if defined(CONFIG_SND_BF5XX_I2S) || defined(CONFIG_SND_BF5XX_I2S_MODULE)
643 &bfin_i2s,
644#endif
645
646#if defined(CONFIG_SND_BF5XX_TDM) || defined(CONFIG_SND_BF5XX_TDM_MODULE)
647 &bfin_tdm,
648#endif
649
650#if defined(CONFIG_SND_BF5XX_AC97) || defined(CONFIG_SND_BF5XX_AC97_MODULE)
651 &bfin_ac97,
652#endif
523}; 653};
524 654
525static int __init stamp_init(void) 655static int __init stamp_init(void)
@@ -548,6 +678,30 @@ static int __init stamp_init(void)
548 678
549arch_initcall(stamp_init); 679arch_initcall(stamp_init);
550 680
681static struct platform_device *stamp_early_devices[] __initdata = {
682#if defined(CONFIG_SERIAL_BFIN_CONSOLE) || defined(CONFIG_EARLY_PRINTK)
683#ifdef CONFIG_SERIAL_BFIN_UART0
684 &bfin_uart0_device,
685#endif
686#endif
687
688#if defined(CONFIG_SERIAL_BFIN_SPORT_CONSOLE)
689#ifdef CONFIG_SERIAL_BFIN_SPORT0_UART
690 &bfin_sport0_uart_device,
691#endif
692#ifdef CONFIG_SERIAL_BFIN_SPORT1_UART
693 &bfin_sport1_uart_device,
694#endif
695#endif
696};
697
698void __init native_machine_early_platform_add_devices(void)
699{
700 printk(KERN_INFO "register early platform devices\n");
701 early_platform_add_devices(stamp_early_devices,
702 ARRAY_SIZE(stamp_early_devices));
703}
704
551void native_machine_restart(char *cmd) 705void native_machine_restart(char *cmd)
552{ 706{
553 /* workaround pull up on cpld / flash pin not being strong enough */ 707 /* workaround pull up on cpld / flash pin not being strong enough */
diff --git a/arch/blackfin/mach-bf533/include/mach/irq.h b/arch/blackfin/mach-bf533/include/mach/irq.h
index c31498be0bbb..1f7e9765d954 100644
--- a/arch/blackfin/mach-bf533/include/mach/irq.h
+++ b/arch/blackfin/mach-bf533/include/mach/irq.h
@@ -104,7 +104,8 @@ Core Emulation **
104 104
105#define GPIO_IRQ_BASE IRQ_PF0 105#define GPIO_IRQ_BASE IRQ_PF0
106 106
107#define NR_IRQS (IRQ_PF15+1) 107#define NR_MACH_IRQS (IRQ_PF15 + 1)
108#define NR_IRQS (NR_MACH_IRQS + NR_SPARE_IRQS)
108 109
109#define IVG7 7 110#define IVG7 7
110#define IVG8 8 111#define IVG8 8
diff --git a/arch/blackfin/mach-bf537/boards/cm_bf537e.c b/arch/blackfin/mach-bf537/boards/cm_bf537e.c
index c85f4d770535..d35fc5fe4c2b 100644
--- a/arch/blackfin/mach-bf537/boards/cm_bf537e.c
+++ b/arch/blackfin/mach-bf537/boards/cm_bf537e.c
@@ -73,7 +73,7 @@ static struct bfin5xx_spi_chip spi_adc_chip_info = {
73}; 73};
74#endif 74#endif
75 75
76#if defined(CONFIG_SND_BLACKFIN_AD1836) || defined(CONFIG_SND_BLACKFIN_AD1836_MODULE) 76#if defined(CONFIG_SND_BLACKFIN_AD183X) || defined(CONFIG_SND_BLACKFIN_AD183X_MODULE)
77static struct bfin5xx_spi_chip ad1836_spi_chip_info = { 77static struct bfin5xx_spi_chip ad1836_spi_chip_info = {
78 .enable_dma = 0, 78 .enable_dma = 0,
79 .bits_per_word = 16, 79 .bits_per_word = 16,
@@ -112,7 +112,7 @@ static struct spi_board_info bfin_spi_board_info[] __initdata = {
112 }, 112 },
113#endif 113#endif
114 114
115#if defined(CONFIG_SND_BLACKFIN_AD1836) || defined(CONFIG_SND_BLACKFIN_AD1836_MODULE) 115#if defined(CONFIG_SND_BLACKFIN_AD183X) || defined(CONFIG_SND_BLACKFIN_AD183X_MODULE)
116 { 116 {
117 .modalias = "ad1836", 117 .modalias = "ad1836",
118 .max_speed_hz = 3125000, /* max spi clock (SCK) speed in HZ */ 118 .max_speed_hz = 3125000, /* max spi clock (SCK) speed in HZ */
@@ -329,8 +329,8 @@ static struct platform_device cm_flash_device = {
329#ifdef CONFIG_SERIAL_BFIN_UART0 329#ifdef CONFIG_SERIAL_BFIN_UART0
330static struct resource bfin_uart0_resources[] = { 330static struct resource bfin_uart0_resources[] = {
331 { 331 {
332 .start = 0xFFC00400, 332 .start = UART0_THR,
333 .end = 0xFFC004FF, 333 .end = UART0_GCTL+2,
334 .flags = IORESOURCE_MEM, 334 .flags = IORESOURCE_MEM,
335 }, 335 },
336 { 336 {
@@ -373,18 +373,25 @@ static struct resource bfin_uart0_resources[] = {
373#endif 373#endif
374}; 374};
375 375
376unsigned short bfin_uart0_peripherals[] = {
377 P_UART0_TX, P_UART0_RX, 0
378};
379
376static struct platform_device bfin_uart0_device = { 380static struct platform_device bfin_uart0_device = {
377 .name = "bfin-uart", 381 .name = "bfin-uart",
378 .id = 0, 382 .id = 0,
379 .num_resources = ARRAY_SIZE(bfin_uart0_resources), 383 .num_resources = ARRAY_SIZE(bfin_uart0_resources),
380 .resource = bfin_uart0_resources, 384 .resource = bfin_uart0_resources,
385 .dev = {
386 .platform_data = &bfin_uart0_peripherals, /* Passed to driver */
387 },
381}; 388};
382#endif 389#endif
383#ifdef CONFIG_SERIAL_BFIN_UART1 390#ifdef CONFIG_SERIAL_BFIN_UART1
384static struct resource bfin_uart1_resources[] = { 391static struct resource bfin_uart1_resources[] = {
385 { 392 {
386 .start = 0xFFC02000, 393 .start = UART1_THR,
387 .end = 0xFFC020FF, 394 .end = UART1_GCTL+2,
388 .flags = IORESOURCE_MEM, 395 .flags = IORESOURCE_MEM,
389 }, 396 },
390 { 397 {
@@ -427,11 +434,18 @@ static struct resource bfin_uart1_resources[] = {
427#endif 434#endif
428}; 435};
429 436
437unsigned short bfin_uart1_peripherals[] = {
438 P_UART1_TX, P_UART1_RX, 0
439};
440
430static struct platform_device bfin_uart1_device = { 441static struct platform_device bfin_uart1_device = {
431 .name = "bfin-uart", 442 .name = "bfin-uart",
432 .id = 1, 443 .id = 1,
433 .num_resources = ARRAY_SIZE(bfin_uart1_resources), 444 .num_resources = ARRAY_SIZE(bfin_uart1_resources),
434 .resource = bfin_uart1_resources, 445 .resource = bfin_uart1_resources,
446 .dev = {
447 .platform_data = &bfin_uart1_peripherals, /* Passed to driver */
448 },
435}; 449};
436#endif 450#endif
437#endif 451#endif
@@ -512,16 +526,75 @@ static struct platform_device i2c_bfin_twi_device = {
512#endif 526#endif
513 527
514#if defined(CONFIG_SERIAL_BFIN_SPORT) || defined(CONFIG_SERIAL_BFIN_SPORT_MODULE) 528#if defined(CONFIG_SERIAL_BFIN_SPORT) || defined(CONFIG_SERIAL_BFIN_SPORT_MODULE)
529#ifdef CONFIG_SERIAL_BFIN_SPORT0_UART
530static struct resource bfin_sport0_uart_resources[] = {
531 {
532 .start = SPORT0_TCR1,
533 .end = SPORT0_MRCS3+4,
534 .flags = IORESOURCE_MEM,
535 },
536 {
537 .start = IRQ_SPORT0_RX,
538 .end = IRQ_SPORT0_RX+1,
539 .flags = IORESOURCE_IRQ,
540 },
541 {
542 .start = IRQ_SPORT0_ERROR,
543 .end = IRQ_SPORT0_ERROR,
544 .flags = IORESOURCE_IRQ,
545 },
546};
547
548unsigned short bfin_sport0_peripherals[] = {
549 P_SPORT0_TFS, P_SPORT0_DTPRI, P_SPORT0_TSCLK, P_SPORT0_RFS,
550 P_SPORT0_DRPRI, P_SPORT0_RSCLK, P_SPORT0_DRSEC, P_SPORT0_DTSEC, 0
551};
552
515static struct platform_device bfin_sport0_uart_device = { 553static struct platform_device bfin_sport0_uart_device = {
516 .name = "bfin-sport-uart", 554 .name = "bfin-sport-uart",
517 .id = 0, 555 .id = 0,
556 .num_resources = ARRAY_SIZE(bfin_sport0_uart_resources),
557 .resource = bfin_sport0_uart_resources,
558 .dev = {
559 .platform_data = &bfin_sport0_peripherals, /* Passed to driver */
560 },
561};
562#endif
563#ifdef CONFIG_SERIAL_BFIN_SPORT1_UART
564static struct resource bfin_sport1_uart_resources[] = {
565 {
566 .start = SPORT1_TCR1,
567 .end = SPORT1_MRCS3+4,
568 .flags = IORESOURCE_MEM,
569 },
570 {
571 .start = IRQ_SPORT1_RX,
572 .end = IRQ_SPORT1_RX+1,
573 .flags = IORESOURCE_IRQ,
574 },
575 {
576 .start = IRQ_SPORT1_ERROR,
577 .end = IRQ_SPORT1_ERROR,
578 .flags = IORESOURCE_IRQ,
579 },
580};
581
582unsigned short bfin_sport1_peripherals[] = {
583 P_SPORT1_TFS, P_SPORT1_DTPRI, P_SPORT1_TSCLK, P_SPORT1_RFS,
584 P_SPORT1_DRPRI, P_SPORT1_RSCLK, P_SPORT1_DRSEC, P_SPORT1_DTSEC, 0
518}; 585};
519 586
520static struct platform_device bfin_sport1_uart_device = { 587static struct platform_device bfin_sport1_uart_device = {
521 .name = "bfin-sport-uart", 588 .name = "bfin-sport-uart",
522 .id = 1, 589 .id = 1,
590 .num_resources = ARRAY_SIZE(bfin_sport1_uart_resources),
591 .resource = bfin_sport1_uart_resources,
592 .dev = {
593 .platform_data = &bfin_sport1_peripherals, /* Passed to driver */
594 },
523}; 595};
524#endif 596#endif
597#endif
525 598
526#if defined(CONFIG_BFIN_MAC) || defined(CONFIG_BFIN_MAC_MODULE) 599#if defined(CONFIG_BFIN_MAC) || defined(CONFIG_BFIN_MAC_MODULE)
527static struct platform_device bfin_mii_bus = { 600static struct platform_device bfin_mii_bus = {
@@ -633,9 +706,13 @@ static struct platform_device *cm_bf537e_devices[] __initdata = {
633#endif 706#endif
634 707
635#if defined(CONFIG_SERIAL_BFIN_SPORT) || defined(CONFIG_SERIAL_BFIN_SPORT_MODULE) 708#if defined(CONFIG_SERIAL_BFIN_SPORT) || defined(CONFIG_SERIAL_BFIN_SPORT_MODULE)
709#ifdef CONFIG_SERIAL_BFIN_SPORT0_UART
636 &bfin_sport0_uart_device, 710 &bfin_sport0_uart_device,
711#endif
712#ifdef CONFIG_SERIAL_BFIN_SPORT1_UART
637 &bfin_sport1_uart_device, 713 &bfin_sport1_uart_device,
638#endif 714#endif
715#endif
639 716
640#if defined(CONFIG_USB_ISP1362_HCD) || defined(CONFIG_USB_ISP1362_HCD_MODULE) 717#if defined(CONFIG_USB_ISP1362_HCD) || defined(CONFIG_USB_ISP1362_HCD_MODULE)
641 &isp1362_hcd_device, 718 &isp1362_hcd_device,
@@ -683,6 +760,33 @@ static int __init cm_bf537e_init(void)
683 760
684arch_initcall(cm_bf537e_init); 761arch_initcall(cm_bf537e_init);
685 762
763static struct platform_device *cm_bf537e_early_devices[] __initdata = {
764#if defined(CONFIG_SERIAL_BFIN_CONSOLE) || defined(CONFIG_EARLY_PRINTK)
765#ifdef CONFIG_SERIAL_BFIN_UART0
766 &bfin_uart0_device,
767#endif
768#ifdef CONFIG_SERIAL_BFIN_UART1
769 &bfin_uart1_device,
770#endif
771#endif
772
773#if defined(CONFIG_SERIAL_BFIN_SPORT_CONSOLE)
774#ifdef CONFIG_SERIAL_BFIN_SPORT0_UART
775 &bfin_sport0_uart_device,
776#endif
777#ifdef CONFIG_SERIAL_BFIN_SPORT1_UART
778 &bfin_sport1_uart_device,
779#endif
780#endif
781};
782
783void __init native_machine_early_platform_add_devices(void)
784{
785 printk(KERN_INFO "register early platform devices\n");
786 early_platform_add_devices(cm_bf537e_early_devices,
787 ARRAY_SIZE(cm_bf537e_early_devices));
788}
789
686void bfin_get_ether_addr(char *addr) 790void bfin_get_ether_addr(char *addr)
687{ 791{
688 random_ether_addr(addr); 792 random_ether_addr(addr);
diff --git a/arch/blackfin/mach-bf537/boards/cm_bf537u.c b/arch/blackfin/mach-bf537/boards/cm_bf537u.c
index ea11aa81340d..d464ad5b72b2 100644
--- a/arch/blackfin/mach-bf537/boards/cm_bf537u.c
+++ b/arch/blackfin/mach-bf537/boards/cm_bf537u.c
@@ -74,7 +74,7 @@ static struct bfin5xx_spi_chip spi_adc_chip_info = {
74}; 74};
75#endif 75#endif
76 76
77#if defined(CONFIG_SND_BLACKFIN_AD1836) || defined(CONFIG_SND_BLACKFIN_AD1836_MODULE) 77#if defined(CONFIG_SND_BLACKFIN_AD183X) || defined(CONFIG_SND_BLACKFIN_AD183X_MODULE)
78static struct bfin5xx_spi_chip ad1836_spi_chip_info = { 78static struct bfin5xx_spi_chip ad1836_spi_chip_info = {
79 .enable_dma = 0, 79 .enable_dma = 0,
80 .bits_per_word = 16, 80 .bits_per_word = 16,
@@ -113,7 +113,7 @@ static struct spi_board_info bfin_spi_board_info[] __initdata = {
113 }, 113 },
114#endif 114#endif
115 115
116#if defined(CONFIG_SND_BLACKFIN_AD1836) || defined(CONFIG_SND_BLACKFIN_AD1836_MODULE) 116#if defined(CONFIG_SND_BLACKFIN_AD183X) || defined(CONFIG_SND_BLACKFIN_AD183X_MODULE)
117 { 117 {
118 .modalias = "ad1836", 118 .modalias = "ad1836",
119 .max_speed_hz = 3125000, /* max spi clock (SCK) speed in HZ */ 119 .max_speed_hz = 3125000, /* max spi clock (SCK) speed in HZ */
@@ -327,25 +327,93 @@ static struct platform_device cm_flash_device = {
327#endif 327#endif
328 328
329#if defined(CONFIG_SERIAL_BFIN) || defined(CONFIG_SERIAL_BFIN_MODULE) 329#if defined(CONFIG_SERIAL_BFIN) || defined(CONFIG_SERIAL_BFIN_MODULE)
330static struct resource bfin_uart_resources[] = { 330#ifdef CONFIG_SERIAL_BFIN_UART0
331static struct resource bfin_uart0_resources[] = {
331 { 332 {
332 .start = 0xFFC00400, 333 .start = UART0_THR,
333 .end = 0xFFC004FF, 334 .end = UART0_GCTL+2,
334 .flags = IORESOURCE_MEM, 335 .flags = IORESOURCE_MEM,
335 }, { 336 },
336 .start = 0xFFC02000, 337 {
337 .end = 0xFFC020FF, 338 .start = IRQ_UART0_RX,
339 .end = IRQ_UART0_RX+1,
340 .flags = IORESOURCE_IRQ,
341 },
342 {
343 .start = IRQ_UART0_ERROR,
344 .end = IRQ_UART0_ERROR,
345 .flags = IORESOURCE_IRQ,
346 },
347 {
348 .start = CH_UART0_TX,
349 .end = CH_UART0_TX,
350 .flags = IORESOURCE_DMA,
351 },
352 {
353 .start = CH_UART0_RX,
354 .end = CH_UART0_RX,
355 .flags = IORESOURCE_DMA,
356 },
357};
358
359unsigned short bfin_uart0_peripherals[] = {
360 P_UART0_TX, P_UART0_RX, 0
361};
362
363static struct platform_device bfin_uart0_device = {
364 .name = "bfin-uart",
365 .id = 0,
366 .num_resources = ARRAY_SIZE(bfin_uart0_resources),
367 .resource = bfin_uart0_resources,
368 .dev = {
369 .platform_data = &bfin_uart0_peripherals, /* Passed to driver */
370 },
371};
372#endif
373#ifdef CONFIG_SERIAL_BFIN_UART1
374static struct resource bfin_uart1_resources[] = {
375 {
376 .start = UART1_THR,
377 .end = UART1_GCTL+2,
338 .flags = IORESOURCE_MEM, 378 .flags = IORESOURCE_MEM,
339 }, 379 },
380 {
381 .start = IRQ_UART1_RX,
382 .end = IRQ_UART1_RX+1,
383 .flags = IORESOURCE_IRQ,
384 },
385 {
386 .start = IRQ_UART1_ERROR,
387 .end = IRQ_UART1_ERROR,
388 .flags = IORESOURCE_IRQ,
389 },
390 {
391 .start = CH_UART1_TX,
392 .end = CH_UART1_TX,
393 .flags = IORESOURCE_DMA,
394 },
395 {
396 .start = CH_UART1_RX,
397 .end = CH_UART1_RX,
398 .flags = IORESOURCE_DMA,
399 },
400};
401
402unsigned short bfin_uart1_peripherals[] = {
403 P_UART1_TX, P_UART1_RX, 0
340}; 404};
341 405
342static struct platform_device bfin_uart_device = { 406static struct platform_device bfin_uart1_device = {
343 .name = "bfin-uart", 407 .name = "bfin-uart",
344 .id = 1, 408 .id = 1,
345 .num_resources = ARRAY_SIZE(bfin_uart_resources), 409 .num_resources = ARRAY_SIZE(bfin_uart1_resources),
346 .resource = bfin_uart_resources, 410 .resource = bfin_uart1_resources,
411 .dev = {
412 .platform_data = &bfin_uart1_peripherals, /* Passed to driver */
413 },
347}; 414};
348#endif 415#endif
416#endif
349 417
350#if defined(CONFIG_BFIN_SIR) || defined(CONFIG_BFIN_SIR_MODULE) 418#if defined(CONFIG_BFIN_SIR) || defined(CONFIG_BFIN_SIR_MODULE)
351#ifdef CONFIG_BFIN_SIR0 419#ifdef CONFIG_BFIN_SIR0
@@ -423,16 +491,75 @@ static struct platform_device i2c_bfin_twi_device = {
423#endif 491#endif
424 492
425#if defined(CONFIG_SERIAL_BFIN_SPORT) || defined(CONFIG_SERIAL_BFIN_SPORT_MODULE) 493#if defined(CONFIG_SERIAL_BFIN_SPORT) || defined(CONFIG_SERIAL_BFIN_SPORT_MODULE)
494#ifdef CONFIG_SERIAL_BFIN_SPORT0_UART
495static struct resource bfin_sport0_uart_resources[] = {
496 {
497 .start = SPORT0_TCR1,
498 .end = SPORT0_MRCS3+4,
499 .flags = IORESOURCE_MEM,
500 },
501 {
502 .start = IRQ_SPORT0_RX,
503 .end = IRQ_SPORT0_RX+1,
504 .flags = IORESOURCE_IRQ,
505 },
506 {
507 .start = IRQ_SPORT0_ERROR,
508 .end = IRQ_SPORT0_ERROR,
509 .flags = IORESOURCE_IRQ,
510 },
511};
512
513unsigned short bfin_sport0_peripherals[] = {
514 P_SPORT0_TFS, P_SPORT0_DTPRI, P_SPORT0_TSCLK, P_SPORT0_RFS,
515 P_SPORT0_DRPRI, P_SPORT0_RSCLK, P_SPORT0_DRSEC, P_SPORT0_DTSEC, 0
516};
517
426static struct platform_device bfin_sport0_uart_device = { 518static struct platform_device bfin_sport0_uart_device = {
427 .name = "bfin-sport-uart", 519 .name = "bfin-sport-uart",
428 .id = 0, 520 .id = 0,
521 .num_resources = ARRAY_SIZE(bfin_sport0_uart_resources),
522 .resource = bfin_sport0_uart_resources,
523 .dev = {
524 .platform_data = &bfin_sport0_peripherals, /* Passed to driver */
525 },
526};
527#endif
528#ifdef CONFIG_SERIAL_BFIN_SPORT1_UART
529static struct resource bfin_sport1_uart_resources[] = {
530 {
531 .start = SPORT1_TCR1,
532 .end = SPORT1_MRCS3+4,
533 .flags = IORESOURCE_MEM,
534 },
535 {
536 .start = IRQ_SPORT1_RX,
537 .end = IRQ_SPORT1_RX+1,
538 .flags = IORESOURCE_IRQ,
539 },
540 {
541 .start = IRQ_SPORT1_ERROR,
542 .end = IRQ_SPORT1_ERROR,
543 .flags = IORESOURCE_IRQ,
544 },
545};
546
547unsigned short bfin_sport1_peripherals[] = {
548 P_SPORT1_TFS, P_SPORT1_DTPRI, P_SPORT1_TSCLK, P_SPORT1_RFS,
549 P_SPORT1_DRPRI, P_SPORT1_RSCLK, P_SPORT1_DRSEC, P_SPORT1_DTSEC, 0
429}; 550};
430 551
431static struct platform_device bfin_sport1_uart_device = { 552static struct platform_device bfin_sport1_uart_device = {
432 .name = "bfin-sport-uart", 553 .name = "bfin-sport-uart",
433 .id = 1, 554 .id = 1,
555 .num_resources = ARRAY_SIZE(bfin_sport1_uart_resources),
556 .resource = bfin_sport1_uart_resources,
557 .dev = {
558 .platform_data = &bfin_sport1_peripherals, /* Passed to driver */
559 },
434}; 560};
435#endif 561#endif
562#endif
436 563
437#if defined(CONFIG_BFIN_MAC) || defined(CONFIG_BFIN_MAC_MODULE) 564#if defined(CONFIG_BFIN_MAC) || defined(CONFIG_BFIN_MAC_MODULE)
438static struct platform_device bfin_mii_bus = { 565static struct platform_device bfin_mii_bus = {
@@ -522,7 +649,12 @@ static struct platform_device *cm_bf537u_devices[] __initdata = {
522#endif 649#endif
523 650
524#if defined(CONFIG_SERIAL_BFIN) || defined(CONFIG_SERIAL_BFIN_MODULE) 651#if defined(CONFIG_SERIAL_BFIN) || defined(CONFIG_SERIAL_BFIN_MODULE)
525 &bfin_uart_device, 652#ifdef CONFIG_SERIAL_BFIN_UART0
653 &bfin_uart0_device,
654#endif
655#ifdef CONFIG_SERIAL_BFIN_UART1
656 &bfin_uart1_device,
657#endif
526#endif 658#endif
527 659
528#if defined(CONFIG_BFIN_SIR) || defined(CONFIG_BFIN_SIR_MODULE) 660#if defined(CONFIG_BFIN_SIR) || defined(CONFIG_BFIN_SIR_MODULE)
@@ -539,9 +671,13 @@ static struct platform_device *cm_bf537u_devices[] __initdata = {
539#endif 671#endif
540 672
541#if defined(CONFIG_SERIAL_BFIN_SPORT) || defined(CONFIG_SERIAL_BFIN_SPORT_MODULE) 673#if defined(CONFIG_SERIAL_BFIN_SPORT) || defined(CONFIG_SERIAL_BFIN_SPORT_MODULE)
674#ifdef CONFIG_SERIAL_BFIN_SPORT0_UART
542 &bfin_sport0_uart_device, 675 &bfin_sport0_uart_device,
676#endif
677#ifdef CONFIG_SERIAL_BFIN_SPORT1_UART
543 &bfin_sport1_uart_device, 678 &bfin_sport1_uart_device,
544#endif 679#endif
680#endif
545 681
546#if defined(CONFIG_USB_ISP1362_HCD) || defined(CONFIG_USB_ISP1362_HCD_MODULE) 682#if defined(CONFIG_USB_ISP1362_HCD) || defined(CONFIG_USB_ISP1362_HCD_MODULE)
547 &isp1362_hcd_device, 683 &isp1362_hcd_device,
@@ -589,6 +725,33 @@ static int __init cm_bf537u_init(void)
589 725
590arch_initcall(cm_bf537u_init); 726arch_initcall(cm_bf537u_init);
591 727
728static struct platform_device *cm_bf537u_early_devices[] __initdata = {
729#if defined(CONFIG_SERIAL_BFIN_CONSOLE) || defined(CONFIG_EARLY_PRINTK)
730#ifdef CONFIG_SERIAL_BFIN_UART0
731 &bfin_uart0_device,
732#endif
733#ifdef CONFIG_SERIAL_BFIN_UART1
734 &bfin_uart1_device,
735#endif
736#endif
737
738#if defined(CONFIG_SERIAL_BFIN_SPORT_CONSOLE)
739#ifdef CONFIG_SERIAL_BFIN_SPORT0_UART
740 &bfin_sport0_uart_device,
741#endif
742#ifdef CONFIG_SERIAL_BFIN_SPORT1_UART
743 &bfin_sport1_uart_device,
744#endif
745#endif
746};
747
748void __init native_machine_early_platform_add_devices(void)
749{
750 printk(KERN_INFO "register early platform devices\n");
751 early_platform_add_devices(cm_bf537u_early_devices,
752 ARRAY_SIZE(cm_bf537u_early_devices));
753}
754
592void bfin_get_ether_addr(char *addr) 755void bfin_get_ether_addr(char *addr)
593{ 756{
594 random_ether_addr(addr); 757 random_ether_addr(addr);
diff --git a/arch/blackfin/mach-bf537/boards/minotaur.c b/arch/blackfin/mach-bf537/boards/minotaur.c
index 0da927252701..c489d602c590 100644
--- a/arch/blackfin/mach-bf537/boards/minotaur.c
+++ b/arch/blackfin/mach-bf537/boards/minotaur.c
@@ -211,25 +211,93 @@ static struct platform_device bfin_spi0_device = {
211#endif /* spi master and devices */ 211#endif /* spi master and devices */
212 212
213#if defined(CONFIG_SERIAL_BFIN) || defined(CONFIG_SERIAL_BFIN_MODULE) 213#if defined(CONFIG_SERIAL_BFIN) || defined(CONFIG_SERIAL_BFIN_MODULE)
214static struct resource bfin_uart_resources[] = { 214#ifdef CONFIG_SERIAL_BFIN_UART0
215static struct resource bfin_uart0_resources[] = {
215 { 216 {
216 .start = 0xFFC00400, 217 .start = UART0_THR,
217 .end = 0xFFC004FF, 218 .end = UART0_GCTL+2,
218 .flags = IORESOURCE_MEM, 219 .flags = IORESOURCE_MEM,
219 }, { 220 },
220 .start = 0xFFC02000, 221 {
221 .end = 0xFFC020FF, 222 .start = IRQ_UART0_RX,
223 .end = IRQ_UART0_RX+1,
224 .flags = IORESOURCE_IRQ,
225 },
226 {
227 .start = IRQ_UART0_ERROR,
228 .end = IRQ_UART0_ERROR,
229 .flags = IORESOURCE_IRQ,
230 },
231 {
232 .start = CH_UART0_TX,
233 .end = CH_UART0_TX,
234 .flags = IORESOURCE_DMA,
235 },
236 {
237 .start = CH_UART0_RX,
238 .end = CH_UART0_RX,
239 .flags = IORESOURCE_DMA,
240 },
241};
242
243unsigned short bfin_uart0_peripherals[] = {
244 P_UART0_TX, P_UART0_RX, 0
245};
246
247static struct platform_device bfin_uart0_device = {
248 .name = "bfin-uart",
249 .id = 0,
250 .num_resources = ARRAY_SIZE(bfin_uart0_resources),
251 .resource = bfin_uart0_resources,
252 .dev = {
253 .platform_data = &bfin_uart0_peripherals, /* Passed to driver */
254 },
255};
256#endif
257#ifdef CONFIG_SERIAL_BFIN_UART1
258static struct resource bfin_uart1_resources[] = {
259 {
260 .start = UART1_THR,
261 .end = UART1_GCTL+2,
222 .flags = IORESOURCE_MEM, 262 .flags = IORESOURCE_MEM,
223 }, 263 },
264 {
265 .start = IRQ_UART1_RX,
266 .end = IRQ_UART1_RX+1,
267 .flags = IORESOURCE_IRQ,
268 },
269 {
270 .start = IRQ_UART1_ERROR,
271 .end = IRQ_UART1_ERROR,
272 .flags = IORESOURCE_IRQ,
273 },
274 {
275 .start = CH_UART1_TX,
276 .end = CH_UART1_TX,
277 .flags = IORESOURCE_DMA,
278 },
279 {
280 .start = CH_UART1_RX,
281 .end = CH_UART1_RX,
282 .flags = IORESOURCE_DMA,
283 },
284};
285
286unsigned short bfin_uart1_peripherals[] = {
287 P_UART1_TX, P_UART1_RX, 0
224}; 288};
225 289
226static struct platform_device bfin_uart_device = { 290static struct platform_device bfin_uart1_device = {
227 .name = "bfin-uart", 291 .name = "bfin-uart",
228 .id = 1, 292 .id = 1,
229 .num_resources = ARRAY_SIZE(bfin_uart_resources), 293 .num_resources = ARRAY_SIZE(bfin_uart1_resources),
230 .resource = bfin_uart_resources, 294 .resource = bfin_uart1_resources,
295 .dev = {
296 .platform_data = &bfin_uart1_peripherals, /* Passed to driver */
297 },
231}; 298};
232#endif 299#endif
300#endif
233 301
234#if defined(CONFIG_BFIN_SIR) || defined(CONFIG_BFIN_SIR_MODULE) 302#if defined(CONFIG_BFIN_SIR) || defined(CONFIG_BFIN_SIR_MODULE)
235#ifdef CONFIG_BFIN_SIR0 303#ifdef CONFIG_BFIN_SIR0
@@ -309,16 +377,75 @@ static struct platform_device i2c_bfin_twi_device = {
309#endif 377#endif
310 378
311#if defined(CONFIG_SERIAL_BFIN_SPORT) || defined(CONFIG_SERIAL_BFIN_SPORT_MODULE) 379#if defined(CONFIG_SERIAL_BFIN_SPORT) || defined(CONFIG_SERIAL_BFIN_SPORT_MODULE)
380#ifdef CONFIG_SERIAL_BFIN_SPORT0_UART
381static struct resource bfin_sport0_uart_resources[] = {
382 {
383 .start = SPORT0_TCR1,
384 .end = SPORT0_MRCS3+4,
385 .flags = IORESOURCE_MEM,
386 },
387 {
388 .start = IRQ_SPORT0_RX,
389 .end = IRQ_SPORT0_RX+1,
390 .flags = IORESOURCE_IRQ,
391 },
392 {
393 .start = IRQ_SPORT0_ERROR,
394 .end = IRQ_SPORT0_ERROR,
395 .flags = IORESOURCE_IRQ,
396 },
397};
398
399unsigned short bfin_sport0_peripherals[] = {
400 P_SPORT0_TFS, P_SPORT0_DTPRI, P_SPORT0_TSCLK, P_SPORT0_RFS,
401 P_SPORT0_DRPRI, P_SPORT0_RSCLK, P_SPORT0_DRSEC, P_SPORT0_DTSEC, 0
402};
403
312static struct platform_device bfin_sport0_uart_device = { 404static struct platform_device bfin_sport0_uart_device = {
313 .name = "bfin-sport-uart", 405 .name = "bfin-sport-uart",
314 .id = 0, 406 .id = 0,
407 .num_resources = ARRAY_SIZE(bfin_sport0_uart_resources),
408 .resource = bfin_sport0_uart_resources,
409 .dev = {
410 .platform_data = &bfin_sport0_peripherals, /* Passed to driver */
411 },
412};
413#endif
414#ifdef CONFIG_SERIAL_BFIN_SPORT1_UART
415static struct resource bfin_sport1_uart_resources[] = {
416 {
417 .start = SPORT1_TCR1,
418 .end = SPORT1_MRCS3+4,
419 .flags = IORESOURCE_MEM,
420 },
421 {
422 .start = IRQ_SPORT1_RX,
423 .end = IRQ_SPORT1_RX+1,
424 .flags = IORESOURCE_IRQ,
425 },
426 {
427 .start = IRQ_SPORT1_ERROR,
428 .end = IRQ_SPORT1_ERROR,
429 .flags = IORESOURCE_IRQ,
430 },
431};
432
433unsigned short bfin_sport1_peripherals[] = {
434 P_SPORT1_TFS, P_SPORT1_DTPRI, P_SPORT1_TSCLK, P_SPORT1_RFS,
435 P_SPORT1_DRPRI, P_SPORT1_RSCLK, P_SPORT1_DRSEC, P_SPORT1_DTSEC, 0
315}; 436};
316 437
317static struct platform_device bfin_sport1_uart_device = { 438static struct platform_device bfin_sport1_uart_device = {
318 .name = "bfin-sport-uart", 439 .name = "bfin-sport-uart",
319 .id = 1, 440 .id = 1,
441 .num_resources = ARRAY_SIZE(bfin_sport1_uart_resources),
442 .resource = bfin_sport1_uart_resources,
443 .dev = {
444 .platform_data = &bfin_sport1_peripherals, /* Passed to driver */
445 },
320}; 446};
321#endif 447#endif
448#endif
322 449
323static struct platform_device *minotaur_devices[] __initdata = { 450static struct platform_device *minotaur_devices[] __initdata = {
324#if defined(CONFIG_BFIN_CFPCMCIA) || defined(CONFIG_BFIN_CFPCMCIA_MODULE) 451#if defined(CONFIG_BFIN_CFPCMCIA) || defined(CONFIG_BFIN_CFPCMCIA_MODULE)
@@ -343,7 +470,12 @@ static struct platform_device *minotaur_devices[] __initdata = {
343#endif 470#endif
344 471
345#if defined(CONFIG_SERIAL_BFIN) || defined(CONFIG_SERIAL_BFIN_MODULE) 472#if defined(CONFIG_SERIAL_BFIN) || defined(CONFIG_SERIAL_BFIN_MODULE)
346 &bfin_uart_device, 473#ifdef CONFIG_SERIAL_BFIN_UART0
474 &bfin_uart0_device,
475#endif
476#ifdef CONFIG_SERIAL_BFIN_UART1
477 &bfin_uart1_device,
478#endif
347#endif 479#endif
348 480
349#if defined(CONFIG_BFIN_SIR) || defined(CONFIG_BFIN_SIR_MODULE) 481#if defined(CONFIG_BFIN_SIR) || defined(CONFIG_BFIN_SIR_MODULE)
@@ -360,9 +492,13 @@ static struct platform_device *minotaur_devices[] __initdata = {
360#endif 492#endif
361 493
362#if defined(CONFIG_SERIAL_BFIN_SPORT) || defined(CONFIG_SERIAL_BFIN_SPORT_MODULE) 494#if defined(CONFIG_SERIAL_BFIN_SPORT) || defined(CONFIG_SERIAL_BFIN_SPORT_MODULE)
495#ifdef CONFIG_SERIAL_BFIN_SPORT0_UART
363 &bfin_sport0_uart_device, 496 &bfin_sport0_uart_device,
497#endif
498#ifdef CONFIG_SERIAL_BFIN_SPORT1_UART
364 &bfin_sport1_uart_device, 499 &bfin_sport1_uart_device,
365#endif 500#endif
501#endif
366 502
367}; 503};
368 504
@@ -380,6 +516,33 @@ static int __init minotaur_init(void)
380 516
381arch_initcall(minotaur_init); 517arch_initcall(minotaur_init);
382 518
519static struct platform_device *minotaur_early_devices[] __initdata = {
520#if defined(CONFIG_SERIAL_BFIN_CONSOLE) || defined(CONFIG_EARLY_PRINTK)
521#ifdef CONFIG_SERIAL_BFIN_UART0
522 &bfin_uart0_device,
523#endif
524#ifdef CONFIG_SERIAL_BFIN_UART1
525 &bfin_uart1_device,
526#endif
527#endif
528
529#if defined(CONFIG_SERIAL_BFIN_SPORT_CONSOLE)
530#ifdef CONFIG_SERIAL_BFIN_SPORT0_UART
531 &bfin_sport0_uart_device,
532#endif
533#ifdef CONFIG_SERIAL_BFIN_SPORT1_UART
534 &bfin_sport1_uart_device,
535#endif
536#endif
537};
538
539void __init native_machine_early_platform_add_devices(void)
540{
541 printk(KERN_INFO "register early platform devices\n");
542 early_platform_add_devices(minotaur_early_devices,
543 ARRAY_SIZE(minotaur_early_devices));
544}
545
383void native_machine_restart(char *cmd) 546void native_machine_restart(char *cmd)
384{ 547{
385 /* workaround reboot hang when booting from SPI */ 548 /* workaround reboot hang when booting from SPI */
diff --git a/arch/blackfin/mach-bf537/boards/pnav10.c b/arch/blackfin/mach-bf537/boards/pnav10.c
index 4e0afda472ab..812e8f991601 100644
--- a/arch/blackfin/mach-bf537/boards/pnav10.c
+++ b/arch/blackfin/mach-bf537/boards/pnav10.c
@@ -17,7 +17,6 @@
17#include <asm/dma.h> 17#include <asm/dma.h>
18#include <asm/bfin5xx_spi.h> 18#include <asm/bfin5xx_spi.h>
19#include <asm/portmux.h> 19#include <asm/portmux.h>
20#include <linux/usb/sl811.h>
21 20
22#include <linux/spi/ad7877.h> 21#include <linux/spi/ad7877.h>
23 22
@@ -99,51 +98,6 @@ static struct platform_device smc91x_device = {
99}; 98};
100#endif 99#endif
101 100
102#if defined(CONFIG_USB_SL811_HCD) || defined(CONFIG_USB_SL811_HCD_MODULE)
103static struct resource sl811_hcd_resources[] = {
104 {
105 .start = 0x20340000,
106 .end = 0x20340000,
107 .flags = IORESOURCE_MEM,
108 }, {
109 .start = 0x20340004,
110 .end = 0x20340004,
111 .flags = IORESOURCE_MEM,
112 }, {
113 .start = CONFIG_USB_SL811_BFIN_IRQ,
114 .end = CONFIG_USB_SL811_BFIN_IRQ,
115 .flags = IORESOURCE_IRQ | IORESOURCE_IRQ_HIGHLEVEL,
116 },
117};
118
119#if defined(CONFIG_USB_SL811_BFIN_USE_VBUS)
120void sl811_port_power(struct device *dev, int is_on)
121{
122 gpio_request(CONFIG_USB_SL811_BFIN_GPIO_VBUS, "usb:SL811_VBUS");
123 gpio_direction_output(CONFIG_USB_SL811_BFIN_GPIO_VBUS, is_on);
124
125}
126#endif
127
128static struct sl811_platform_data sl811_priv = {
129 .potpg = 10,
130 .power = 250, /* == 500mA */
131#if defined(CONFIG_USB_SL811_BFIN_USE_VBUS)
132 .port_power = &sl811_port_power,
133#endif
134};
135
136static struct platform_device sl811_hcd_device = {
137 .name = "sl811-hcd",
138 .id = 0,
139 .dev = {
140 .platform_data = &sl811_priv,
141 },
142 .num_resources = ARRAY_SIZE(sl811_hcd_resources),
143 .resource = sl811_hcd_resources,
144};
145#endif
146
147#if defined(CONFIG_BFIN_MAC) || defined(CONFIG_BFIN_MAC_MODULE) 101#if defined(CONFIG_BFIN_MAC) || defined(CONFIG_BFIN_MAC_MODULE)
148static struct platform_device bfin_mii_bus = { 102static struct platform_device bfin_mii_bus = {
149 .name = "bfin_mii_bus", 103 .name = "bfin_mii_bus",
@@ -221,8 +175,8 @@ static struct bfin5xx_spi_chip spi_adc_chip_info = {
221}; 175};
222#endif 176#endif
223 177
224#if defined(CONFIG_SND_BLACKFIN_AD1836) \ 178#if defined(CONFIG_SND_BLACKFIN_AD183X) \
225 || defined(CONFIG_SND_BLACKFIN_AD1836_MODULE) 179 || defined(CONFIG_SND_BLACKFIN_AD183X_MODULE)
226static struct bfin5xx_spi_chip ad1836_spi_chip_info = { 180static struct bfin5xx_spi_chip ad1836_spi_chip_info = {
227 .enable_dma = 0, 181 .enable_dma = 0,
228 .bits_per_word = 16, 182 .bits_per_word = 16,
@@ -284,8 +238,8 @@ static struct spi_board_info bfin_spi_board_info[] __initdata = {
284 }, 238 },
285#endif 239#endif
286 240
287#if defined(CONFIG_SND_BLACKFIN_AD1836) \ 241#if defined(CONFIG_SND_BLACKFIN_AD183X) \
288 || defined(CONFIG_SND_BLACKFIN_AD1836_MODULE) 242 || defined(CONFIG_SND_BLACKFIN_AD183X_MODULE)
289 { 243 {
290 .modalias = "ad1836", 244 .modalias = "ad1836",
291 .max_speed_hz = 3125000, /* max spi clock (SCK) speed in HZ */ 245 .max_speed_hz = 3125000, /* max spi clock (SCK) speed in HZ */
@@ -362,25 +316,93 @@ static struct platform_device bfin_fb_device = {
362#endif 316#endif
363 317
364#if defined(CONFIG_SERIAL_BFIN) || defined(CONFIG_SERIAL_BFIN_MODULE) 318#if defined(CONFIG_SERIAL_BFIN) || defined(CONFIG_SERIAL_BFIN_MODULE)
365static struct resource bfin_uart_resources[] = { 319#ifdef CONFIG_SERIAL_BFIN_UART0
320static struct resource bfin_uart0_resources[] = {
366 { 321 {
367 .start = 0xFFC00400, 322 .start = UART0_THR,
368 .end = 0xFFC004FF, 323 .end = UART0_GCTL+2,
369 .flags = IORESOURCE_MEM, 324 .flags = IORESOURCE_MEM,
370 }, { 325 },
371 .start = 0xFFC02000, 326 {
372 .end = 0xFFC020FF, 327 .start = IRQ_UART0_RX,
328 .end = IRQ_UART0_RX+1,
329 .flags = IORESOURCE_IRQ,
330 },
331 {
332 .start = IRQ_UART0_ERROR,
333 .end = IRQ_UART0_ERROR,
334 .flags = IORESOURCE_IRQ,
335 },
336 {
337 .start = CH_UART0_TX,
338 .end = CH_UART0_TX,
339 .flags = IORESOURCE_DMA,
340 },
341 {
342 .start = CH_UART0_RX,
343 .end = CH_UART0_RX,
344 .flags = IORESOURCE_DMA,
345 },
346};
347
348unsigned short bfin_uart0_peripherals[] = {
349 P_UART0_TX, P_UART0_RX, 0
350};
351
352static struct platform_device bfin_uart0_device = {
353 .name = "bfin-uart",
354 .id = 0,
355 .num_resources = ARRAY_SIZE(bfin_uart0_resources),
356 .resource = bfin_uart0_resources,
357 .dev = {
358 .platform_data = &bfin_uart0_peripherals, /* Passed to driver */
359 },
360};
361#endif
362#ifdef CONFIG_SERIAL_BFIN_UART1
363static struct resource bfin_uart1_resources[] = {
364 {
365 .start = UART1_THR,
366 .end = UART1_GCTL+2,
373 .flags = IORESOURCE_MEM, 367 .flags = IORESOURCE_MEM,
374 }, 368 },
369 {
370 .start = IRQ_UART1_RX,
371 .end = IRQ_UART1_RX+1,
372 .flags = IORESOURCE_IRQ,
373 },
374 {
375 .start = IRQ_UART1_ERROR,
376 .end = IRQ_UART1_ERROR,
377 .flags = IORESOURCE_IRQ,
378 },
379 {
380 .start = CH_UART1_TX,
381 .end = CH_UART1_TX,
382 .flags = IORESOURCE_DMA,
383 },
384 {
385 .start = CH_UART1_RX,
386 .end = CH_UART1_RX,
387 .flags = IORESOURCE_DMA,
388 },
389};
390
391unsigned short bfin_uart1_peripherals[] = {
392 P_UART1_TX, P_UART1_RX, 0
375}; 393};
376 394
377static struct platform_device bfin_uart_device = { 395static struct platform_device bfin_uart1_device = {
378 .name = "bfin-uart", 396 .name = "bfin-uart",
379 .id = 1, 397 .id = 1,
380 .num_resources = ARRAY_SIZE(bfin_uart_resources), 398 .num_resources = ARRAY_SIZE(bfin_uart1_resources),
381 .resource = bfin_uart_resources, 399 .resource = bfin_uart1_resources,
400 .dev = {
401 .platform_data = &bfin_uart1_peripherals, /* Passed to driver */
402 },
382}; 403};
383#endif 404#endif
405#endif
384 406
385#if defined(CONFIG_BFIN_SIR) || defined(CONFIG_BFIN_SIR_MODULE) 407#if defined(CONFIG_BFIN_SIR) || defined(CONFIG_BFIN_SIR_MODULE)
386#ifdef CONFIG_BFIN_SIR0 408#ifdef CONFIG_BFIN_SIR0
@@ -446,10 +468,6 @@ static struct platform_device *stamp_devices[] __initdata = {
446 &rtc_device, 468 &rtc_device,
447#endif 469#endif
448 470
449#if defined(CONFIG_USB_SL811_HCD) || defined(CONFIG_USB_SL811_HCD_MODULE)
450 &sl811_hcd_device,
451#endif
452
453#if defined(CONFIG_SMC91X) || defined(CONFIG_SMC91X_MODULE) 471#if defined(CONFIG_SMC91X) || defined(CONFIG_SMC91X_MODULE)
454 &smc91x_device, 472 &smc91x_device,
455#endif 473#endif
@@ -472,7 +490,12 @@ static struct platform_device *stamp_devices[] __initdata = {
472#endif 490#endif
473 491
474#if defined(CONFIG_SERIAL_BFIN) || defined(CONFIG_SERIAL_BFIN_MODULE) 492#if defined(CONFIG_SERIAL_BFIN) || defined(CONFIG_SERIAL_BFIN_MODULE)
475 &bfin_uart_device, 493#ifdef CONFIG_SERIAL_BFIN_UART0
494 &bfin_uart0_device,
495#endif
496#ifdef CONFIG_SERIAL_BFIN_UART1
497 &bfin_uart1_device,
498#endif
476#endif 499#endif
477 500
478#if defined(CONFIG_BFIN_SIR) || defined(CONFIG_BFIN_SIR_MODULE) 501#if defined(CONFIG_BFIN_SIR) || defined(CONFIG_BFIN_SIR_MODULE)
@@ -498,6 +521,24 @@ static int __init pnav_init(void)
498 521
499arch_initcall(pnav_init); 522arch_initcall(pnav_init);
500 523
524static struct platform_device *stamp_early_devices[] __initdata = {
525#if defined(CONFIG_SERIAL_BFIN_CONSOLE) || defined(CONFIG_EARLY_PRINTK)
526#ifdef CONFIG_SERIAL_BFIN_UART0
527 &bfin_uart0_device,
528#endif
529#ifdef CONFIG_SERIAL_BFIN_UART1
530 &bfin_uart1_device,
531#endif
532#endif
533};
534
535void __init native_machine_early_platform_add_devices(void)
536{
537 printk(KERN_INFO "register early platform devices\n");
538 early_platform_add_devices(stamp_early_devices,
539 ARRAY_SIZE(stamp_early_devices));
540}
541
501void bfin_get_ether_addr(char *addr) 542void bfin_get_ether_addr(char *addr)
502{ 543{
503 random_ether_addr(addr); 544 random_ether_addr(addr);
diff --git a/arch/blackfin/mach-bf537/boards/stamp.c b/arch/blackfin/mach-bf537/boards/stamp.c
index ac9b52e0087c..9eaf5b05c11e 100644
--- a/arch/blackfin/mach-bf537/boards/stamp.c
+++ b/arch/blackfin/mach-bf537/boards/stamp.c
@@ -20,10 +20,12 @@
20#if defined(CONFIG_USB_ISP1362_HCD) || defined(CONFIG_USB_ISP1362_HCD_MODULE) 20#if defined(CONFIG_USB_ISP1362_HCD) || defined(CONFIG_USB_ISP1362_HCD_MODULE)
21#include <linux/usb/isp1362.h> 21#include <linux/usb/isp1362.h>
22#endif 22#endif
23#include <linux/i2c.h>
24#include <linux/i2c/adp5588.h>
25#include <linux/etherdevice.h>
23#include <linux/ata_platform.h> 26#include <linux/ata_platform.h>
24#include <linux/irq.h> 27#include <linux/irq.h>
25#include <linux/interrupt.h> 28#include <linux/interrupt.h>
26#include <linux/i2c.h>
27#include <linux/usb/sl811.h> 29#include <linux/usb/sl811.h>
28#include <linux/spi/mmc_spi.h> 30#include <linux/spi/mmc_spi.h>
29#include <linux/leds.h> 31#include <linux/leds.h>
@@ -33,6 +35,14 @@
33#include <asm/reboot.h> 35#include <asm/reboot.h>
34#include <asm/portmux.h> 36#include <asm/portmux.h>
35#include <asm/dpmc.h> 37#include <asm/dpmc.h>
38#ifdef CONFIG_REGULATOR_ADP_SWITCH
39#include <linux/regulator/adp_switch.h>
40#endif
41#ifdef CONFIG_REGULATOR_AD5398
42#include <linux/regulator/ad5398.h>
43#endif
44#include <linux/regulator/consumer.h>
45#include <linux/regulator/userspace-consumer.h>
36 46
37/* 47/*
38 * Name the Board for the /proc/cpuinfo 48 * Name the Board for the /proc/cpuinfo
@@ -208,8 +218,8 @@ static struct resource sl811_hcd_resources[] = {
208 .end = 0x20340004, 218 .end = 0x20340004,
209 .flags = IORESOURCE_MEM, 219 .flags = IORESOURCE_MEM,
210 }, { 220 }, {
211 .start = CONFIG_USB_SL811_BFIN_IRQ, 221 .start = IRQ_PF4,
212 .end = CONFIG_USB_SL811_BFIN_IRQ, 222 .end = IRQ_PF4,
213 .flags = IORESOURCE_IRQ | IORESOURCE_IRQ_HIGHLEVEL, 223 .flags = IORESOURCE_IRQ | IORESOURCE_IRQ_HIGHLEVEL,
214 }, 224 },
215}; 225};
@@ -454,6 +464,9 @@ static struct physmap_flash_data stamp_flash_data = {
454 .width = 2, 464 .width = 2,
455 .parts = stamp_partitions, 465 .parts = stamp_partitions,
456 .nr_parts = ARRAY_SIZE(stamp_partitions), 466 .nr_parts = ARRAY_SIZE(stamp_partitions),
467#ifdef CONFIG_ROMKERNEL
468 .probe_type = "map_rom",
469#endif
457}; 470};
458 471
459static struct resource stamp_flash_resource = { 472static struct resource stamp_flash_resource = {
@@ -515,20 +528,19 @@ static struct bfin5xx_spi_chip spi_adc_chip_info = {
515}; 528};
516#endif 529#endif
517 530
518#if defined(CONFIG_SND_BF5XX_SOC_AD1836) \ 531#if defined(CONFIG_SND_BF5XX_SOC_AD183X) \
519 || defined(CONFIG_SND_BF5XX_SOC_AD1836_MODULE) 532 || defined(CONFIG_SND_BF5XX_SOC_AD183X_MODULE)
520static struct bfin5xx_spi_chip ad1836_spi_chip_info = { 533static struct bfin5xx_spi_chip ad1836_spi_chip_info = {
521 .enable_dma = 0, 534 .enable_dma = 0,
522 .bits_per_word = 16, 535 .bits_per_word = 16,
523}; 536};
524#endif 537#endif
525 538
526#if defined(CONFIG_SND_BF5XX_SOC_AD1938) \ 539#if defined(CONFIG_SND_BF5XX_SOC_AD193X) \
527 || defined(CONFIG_SND_BF5XX_SOC_AD1938_MODULE) 540 || defined(CONFIG_SND_BF5XX_SOC_AD193X_MODULE)
528static struct bfin5xx_spi_chip ad1938_spi_chip_info = { 541static struct bfin5xx_spi_chip ad1938_spi_chip_info = {
529 .enable_dma = 0, 542 .enable_dma = 0,
530 .bits_per_word = 8, 543 .bits_per_word = 8,
531 .cs_gpio = GPIO_PF5,
532}; 544};
533#endif 545#endif
534 546
@@ -644,6 +656,42 @@ static struct ad714x_platform_data ad7142_i2c_platform_data = {
644}; 656};
645#endif 657#endif
646 658
659#if defined(CONFIG_AD2S90) || defined(CONFIG_AD2S90_MODULE)
660static struct bfin5xx_spi_chip ad2s90_spi_chip_info = {
661 .enable_dma = 0,
662 .bits_per_word = 16,
663};
664#endif
665
666#if defined(CONFIG_AD2S120X) || defined(CONFIG_AD2S120X_MODULE)
667unsigned short ad2s120x_platform_data[] = {
668 /* used as SAMPLE and RDVEL */
669 GPIO_PF5, GPIO_PF6, 0
670};
671
672static struct bfin5xx_spi_chip ad2s120x_spi_chip_info = {
673 .enable_dma = 0,
674 .bits_per_word = 16,
675};
676#endif
677
678#if defined(CONFIG_AD2S1210) || defined(CONFIG_AD2S1210_MODULE)
679unsigned short ad2s1210_platform_data[] = {
680 /* use as SAMPLE, A0, A1 */
681 GPIO_PF7, GPIO_PF8, GPIO_PF9,
682# if defined(CONFIG_AD2S1210_GPIO_INPUT) || defined(CONFIG_AD2S1210_GPIO_OUTPUT)
683 /* the RES0 and RES1 pins */
684 GPIO_PF4, GPIO_PF5,
685# endif
686 0,
687};
688
689static struct bfin5xx_spi_chip ad2s1210_spi_chip_info = {
690 .enable_dma = 0,
691 .bits_per_word = 8,
692};
693#endif
694
647#if defined(CONFIG_MMC_SPI) || defined(CONFIG_MMC_SPI_MODULE) 695#if defined(CONFIG_MMC_SPI) || defined(CONFIG_MMC_SPI_MODULE)
648#define MMC_SPI_CARD_DETECT_INT IRQ_PF5 696#define MMC_SPI_CARD_DETECT_INT IRQ_PF5
649 697
@@ -686,11 +734,11 @@ static const struct ad7877_platform_data bfin_ad7877_ts_info = {
686 .y_plate_ohms = 486, 734 .y_plate_ohms = 486,
687 .pressure_max = 1000, 735 .pressure_max = 1000,
688 .pressure_min = 0, 736 .pressure_min = 0,
689 .stopacq_polarity = 1, 737 .stopacq_polarity = 1,
690 .first_conversion_delay = 3, 738 .first_conversion_delay = 3,
691 .acquisition_time = 1, 739 .acquisition_time = 1,
692 .averaging = 1, 740 .averaging = 1,
693 .pen_down_acc_interval = 1, 741 .pen_down_acc_interval = 1,
694}; 742};
695#endif 743#endif
696 744
@@ -701,13 +749,13 @@ static const struct ad7879_platform_data bfin_ad7879_ts_info = {
701 .x_plate_ohms = 620, /* 620 Ohm from the touch datasheet */ 749 .x_plate_ohms = 620, /* 620 Ohm from the touch datasheet */
702 .pressure_max = 10000, 750 .pressure_max = 10000,
703 .pressure_min = 0, 751 .pressure_min = 0,
704 .first_conversion_delay = 3, /* wait 512us before do a first conversion */ 752 .first_conversion_delay = 3, /* wait 512us before do a first conversion */
705 .acquisition_time = 1, /* 4us acquisition time per sample */ 753 .acquisition_time = 1, /* 4us acquisition time per sample */
706 .median = 2, /* do 8 measurements */ 754 .median = 2, /* do 8 measurements */
707 .averaging = 1, /* take the average of 4 middle samples */ 755 .averaging = 1, /* take the average of 4 middle samples */
708 .pen_down_acc_interval = 255, /* 9.4 ms */ 756 .pen_down_acc_interval = 255, /* 9.4 ms */
709 .gpio_output = 1, /* configure AUX/VBAT/GPIO as GPIO output */ 757 .gpio_export = 1, /* Export GPIO to gpiolib */
710 .gpio_default = 1, /* During initialization set GPIO = HIGH */ 758 .gpio_base = -1, /* Dynamic allocation */
711}; 759};
712#endif 760#endif
713 761
@@ -742,6 +790,11 @@ static const struct adxl34x_platform_data adxl34x_info = {
742/* .ev_code_act_inactivity = KEY_A,*/ /* EV_KEY */ 790/* .ev_code_act_inactivity = KEY_A,*/ /* EV_KEY */
743 .power_mode = ADXL_AUTO_SLEEP | ADXL_LINK, 791 .power_mode = ADXL_AUTO_SLEEP | ADXL_LINK,
744 .fifo_mode = ADXL_FIFO_STREAM, 792 .fifo_mode = ADXL_FIFO_STREAM,
793 .orientation_enable = ADXL_EN_ORIENTATION_3D,
794 .deadzone_angle = ADXL_DEADZONE_ANGLE_10p8,
795 .divisor_length = ADXL_LP_FILTER_DIVISOR_16,
796 /* EV_KEY {+Z, +Y, +X, -X, -Y, -Z} */
797 .ev_codes_orient_3d = {BTN_Z, BTN_Y, BTN_X, BTN_A, BTN_B, BTN_C},
745}; 798};
746#endif 799#endif
747 800
@@ -813,6 +866,35 @@ static struct adf702x_platform_data adf7021_platform_data = {
813 .adf702x_regs = adf7021_regs, 866 .adf702x_regs = adf7021_regs,
814 .tx_reg = TXREG, 867 .tx_reg = TXREG,
815}; 868};
869static inline void adf702x_mac_init(void)
870{
871 random_ether_addr(adf7021_platform_data.mac_addr);
872}
873#else
874static inline void adf702x_mac_init(void) {}
875#endif
876
877#if defined(CONFIG_TOUCHSCREEN_ADS7846) || defined(CONFIG_TOUCHSCREEN_ADS7846_MODULE)
878#include <linux/spi/ads7846.h>
879static struct bfin5xx_spi_chip ad7873_spi_chip_info = {
880 .bits_per_word = 8,
881};
882
883static int ads7873_get_pendown_state(void)
884{
885 return gpio_get_value(GPIO_PF6);
886}
887
888static struct ads7846_platform_data __initdata ad7873_pdata = {
889 .model = 7873, /* AD7873 */
890 .x_max = 0xfff,
891 .y_max = 0xfff,
892 .x_plate_ohms = 620,
893 .debounce_max = 1,
894 .debounce_rep = 0,
895 .debounce_tol = (~0),
896 .get_pendown_state = ads7873_get_pendown_state,
897};
816#endif 898#endif
817 899
818#if defined(CONFIG_MTD_DATAFLASH) \ 900#if defined(CONFIG_MTD_DATAFLASH) \
@@ -893,24 +975,25 @@ static struct spi_board_info bfin_spi_board_info[] __initdata = {
893 }, 975 },
894#endif 976#endif
895 977
896#if defined(CONFIG_SND_BF5XX_SOC_AD1836) \ 978#if defined(CONFIG_SND_BF5XX_SOC_AD183X) \
897 || defined(CONFIG_SND_BF5XX_SOC_AD1836_MODULE) 979 || defined(CONFIG_SND_BF5XX_SOC_AD183X_MODULE)
898 { 980 {
899 .modalias = "ad1836", 981 .modalias = "ad183x",
900 .max_speed_hz = 3125000, /* max spi clock (SCK) speed in HZ */ 982 .max_speed_hz = 3125000, /* max spi clock (SCK) speed in HZ */
901 .bus_num = 0, 983 .bus_num = 0,
902 .chip_select = 4,/* CONFIG_SND_BLACKFIN_SPI_PFBIT */ 984 .chip_select = 4,/* CONFIG_SND_BLACKFIN_SPI_PFBIT */
985 .platform_data = "ad1836", /* only includes chip name for the moment */
903 .controller_data = &ad1836_spi_chip_info, 986 .controller_data = &ad1836_spi_chip_info,
904 .mode = SPI_MODE_3, 987 .mode = SPI_MODE_3,
905 }, 988 },
906#endif 989#endif
907 990
908#if defined(CONFIG_SND_BF5XX_SOC_AD1938) || defined(CONFIG_SND_BF5XX_SOC_AD1938_MODULE) 991#if defined(CONFIG_SND_BF5XX_SOC_AD193X) || defined(CONFIG_SND_BF5XX_SOC_AD193X_MODULE)
909 { 992 {
910 .modalias = "ad1938", 993 .modalias = "ad193x",
911 .max_speed_hz = 3125000, /* max spi clock (SCK) speed in HZ */ 994 .max_speed_hz = 3125000, /* max spi clock (SCK) speed in HZ */
912 .bus_num = 0, 995 .bus_num = 0,
913 .chip_select = 0,/* CONFIG_SND_BLACKFIN_SPI_PFBIT */ 996 .chip_select = 5,
914 .controller_data = &ad1938_spi_chip_info, 997 .controller_data = &ad1938_spi_chip_info,
915 .mode = SPI_MODE_3, 998 .mode = SPI_MODE_3,
916 }, 999 },
@@ -929,6 +1012,37 @@ static struct spi_board_info bfin_spi_board_info[] __initdata = {
929 }, 1012 },
930#endif 1013#endif
931 1014
1015#if defined(CONFIG_AD2S90) || defined(CONFIG_AD2S90_MODULE)
1016 {
1017 .modalias = "ad2s90",
1018 .bus_num = 0,
1019 .chip_select = 3, /* change it for your board */
1020 .platform_data = NULL,
1021 .controller_data = &ad2s90_spi_chip_info,
1022 },
1023#endif
1024
1025#if defined(CONFIG_AD2S120X) || defined(CONFIG_AD2S120X_MODULE)
1026 {
1027 .modalias = "ad2s120x",
1028 .bus_num = 0,
1029 .chip_select = 4, /* CS, change it for your board */
1030 .platform_data = ad2s120x_platform_data,
1031 .controller_data = &ad2s120x_spi_chip_info,
1032 },
1033#endif
1034
1035#if defined(CONFIG_AD2S1210) || defined(CONFIG_AD2S1210_MODULE)
1036 {
1037 .modalias = "ad2s1210",
1038 .max_speed_hz = 8192000,
1039 .bus_num = 0,
1040 .chip_select = 4, /* CS, change it for your board */
1041 .platform_data = ad2s1210_platform_data,
1042 .controller_data = &ad2s1210_spi_chip_info,
1043 },
1044#endif
1045
932#if defined(CONFIG_MMC_SPI) || defined(CONFIG_MMC_SPI_MODULE) 1046#if defined(CONFIG_MMC_SPI) || defined(CONFIG_MMC_SPI_MODULE)
933 { 1047 {
934 .modalias = "mmc_spi", 1048 .modalias = "mmc_spi",
@@ -1016,7 +1130,18 @@ static struct spi_board_info bfin_spi_board_info[] __initdata = {
1016 .mode = SPI_MODE_0, 1130 .mode = SPI_MODE_0,
1017 }, 1131 },
1018#endif 1132#endif
1019 1133#if defined(CONFIG_TOUCHSCREEN_ADS7846) || defined(CONFIG_TOUCHSCREEN_ADS7846_MODULE)
1134 {
1135 .modalias = "ads7846",
1136 .max_speed_hz = 2000000, /* max spi clock (SCK) speed in HZ */
1137 .bus_num = 0,
1138 .irq = IRQ_PF6,
1139 .chip_select = GPIO_PF10 + MAX_CTRL_CS, /* GPIO controlled SSEL */
1140 .controller_data = &ad7873_spi_chip_info,
1141 .platform_data = &ad7873_pdata,
1142 .mode = SPI_MODE_0,
1143 },
1144#endif
1020}; 1145};
1021 1146
1022#if defined(CONFIG_SPI_BFIN) || defined(CONFIG_SPI_BFIN_MODULE) 1147#if defined(CONFIG_SPI_BFIN) || defined(CONFIG_SPI_BFIN_MODULE)
@@ -1132,9 +1257,10 @@ static struct platform_device bfin_fb_device = {
1132#include <asm/bfin-lq035q1.h> 1257#include <asm/bfin-lq035q1.h>
1133 1258
1134static struct bfin_lq035q1fb_disp_info bfin_lq035q1_data = { 1259static struct bfin_lq035q1fb_disp_info bfin_lq035q1_data = {
1135 .mode = LQ035_NORM | LQ035_RGB | LQ035_RL | LQ035_TB, 1260 .mode = LQ035_NORM | LQ035_RGB | LQ035_RL | LQ035_TB,
1136 .use_bl = 0, /* let something else control the LCD Blacklight */ 1261 .ppi_mode = USE_RGB565_16_BIT_PPI,
1137 .gpio_bl = GPIO_PF7, 1262 .use_bl = 0, /* let something else control the LCD Blacklight */
1263 .gpio_bl = GPIO_PF7,
1138}; 1264};
1139 1265
1140static struct resource bfin_lq035q1_resources[] = { 1266static struct resource bfin_lq035q1_resources[] = {
@@ -1148,8 +1274,8 @@ static struct resource bfin_lq035q1_resources[] = {
1148static struct platform_device bfin_lq035q1_device = { 1274static struct platform_device bfin_lq035q1_device = {
1149 .name = "bfin-lq035q1", 1275 .name = "bfin-lq035q1",
1150 .id = -1, 1276 .id = -1,
1151 .num_resources = ARRAY_SIZE(bfin_lq035q1_resources), 1277 .num_resources = ARRAY_SIZE(bfin_lq035q1_resources),
1152 .resource = bfin_lq035q1_resources, 1278 .resource = bfin_lq035q1_resources,
1153 .dev = { 1279 .dev = {
1154 .platform_data = &bfin_lq035q1_data, 1280 .platform_data = &bfin_lq035q1_data,
1155 }, 1281 },
@@ -1157,30 +1283,105 @@ static struct platform_device bfin_lq035q1_device = {
1157#endif 1283#endif
1158 1284
1159#if defined(CONFIG_SERIAL_BFIN) || defined(CONFIG_SERIAL_BFIN_MODULE) 1285#if defined(CONFIG_SERIAL_BFIN) || defined(CONFIG_SERIAL_BFIN_MODULE)
1160static struct resource bfin_uart_resources[] = {
1161#ifdef CONFIG_SERIAL_BFIN_UART0 1286#ifdef CONFIG_SERIAL_BFIN_UART0
1287static struct resource bfin_uart0_resources[] = {
1162 { 1288 {
1163 .start = 0xFFC00400, 1289 .start = UART0_THR,
1164 .end = 0xFFC004FF, 1290 .end = UART0_GCTL+2,
1165 .flags = IORESOURCE_MEM, 1291 .flags = IORESOURCE_MEM,
1166 }, 1292 },
1293 {
1294 .start = IRQ_UART0_RX,
1295 .end = IRQ_UART0_RX+1,
1296 .flags = IORESOURCE_IRQ,
1297 },
1298 {
1299 .start = IRQ_UART0_ERROR,
1300 .end = IRQ_UART0_ERROR,
1301 .flags = IORESOURCE_IRQ,
1302 },
1303 {
1304 .start = CH_UART0_TX,
1305 .end = CH_UART0_TX,
1306 .flags = IORESOURCE_DMA,
1307 },
1308 {
1309 .start = CH_UART0_RX,
1310 .end = CH_UART0_RX,
1311 .flags = IORESOURCE_DMA,
1312 },
1313#ifdef CONFIG_BFIN_UART0_CTSRTS
1314 { /* CTS pin */
1315 .start = GPIO_PG7,
1316 .end = GPIO_PG7,
1317 .flags = IORESOURCE_IO,
1318 },
1319 { /* RTS pin */
1320 .start = GPIO_PG6,
1321 .end = GPIO_PG6,
1322 .flags = IORESOURCE_IO,
1323 },
1324#endif
1325};
1326
1327unsigned short bfin_uart0_peripherals[] = {
1328 P_UART0_TX, P_UART0_RX, 0
1329};
1330
1331static struct platform_device bfin_uart0_device = {
1332 .name = "bfin-uart",
1333 .id = 0,
1334 .num_resources = ARRAY_SIZE(bfin_uart0_resources),
1335 .resource = bfin_uart0_resources,
1336 .dev = {
1337 .platform_data = &bfin_uart0_peripherals, /* Passed to driver */
1338 },
1339};
1167#endif 1340#endif
1168#ifdef CONFIG_SERIAL_BFIN_UART1 1341#ifdef CONFIG_SERIAL_BFIN_UART1
1342static struct resource bfin_uart1_resources[] = {
1169 { 1343 {
1170 .start = 0xFFC02000, 1344 .start = UART1_THR,
1171 .end = 0xFFC020FF, 1345 .end = UART1_GCTL+2,
1172 .flags = IORESOURCE_MEM, 1346 .flags = IORESOURCE_MEM,
1173 }, 1347 },
1174#endif 1348 {
1349 .start = IRQ_UART1_RX,
1350 .end = IRQ_UART1_RX+1,
1351 .flags = IORESOURCE_IRQ,
1352 },
1353 {
1354 .start = IRQ_UART1_ERROR,
1355 .end = IRQ_UART1_ERROR,
1356 .flags = IORESOURCE_IRQ,
1357 },
1358 {
1359 .start = CH_UART1_TX,
1360 .end = CH_UART1_TX,
1361 .flags = IORESOURCE_DMA,
1362 },
1363 {
1364 .start = CH_UART1_RX,
1365 .end = CH_UART1_RX,
1366 .flags = IORESOURCE_DMA,
1367 },
1368};
1369
1370unsigned short bfin_uart1_peripherals[] = {
1371 P_UART1_TX, P_UART1_RX, 0
1175}; 1372};
1176 1373
1177static struct platform_device bfin_uart_device = { 1374static struct platform_device bfin_uart1_device = {
1178 .name = "bfin-uart", 1375 .name = "bfin-uart",
1179 .id = 1, 1376 .id = 1,
1180 .num_resources = ARRAY_SIZE(bfin_uart_resources), 1377 .num_resources = ARRAY_SIZE(bfin_uart1_resources),
1181 .resource = bfin_uart_resources, 1378 .resource = bfin_uart1_resources,
1379 .dev = {
1380 .platform_data = &bfin_uart1_peripherals, /* Passed to driver */
1381 },
1182}; 1382};
1183#endif 1383#endif
1384#endif
1184 1385
1185#if defined(CONFIG_BFIN_SIR) || defined(CONFIG_BFIN_SIR_MODULE) 1386#if defined(CONFIG_BFIN_SIR) || defined(CONFIG_BFIN_SIR_MODULE)
1186#ifdef CONFIG_BFIN_SIR0 1387#ifdef CONFIG_BFIN_SIR0
@@ -1260,7 +1461,6 @@ static struct platform_device i2c_bfin_twi_device = {
1260#endif 1461#endif
1261 1462
1262#if defined(CONFIG_KEYBOARD_ADP5588) || defined(CONFIG_KEYBOARD_ADP5588_MODULE) 1463#if defined(CONFIG_KEYBOARD_ADP5588) || defined(CONFIG_KEYBOARD_ADP5588_MODULE)
1263#include <linux/i2c/adp5588.h>
1264static const unsigned short adp5588_keymap[ADP5588_KEYMAPSIZE] = { 1464static const unsigned short adp5588_keymap[ADP5588_KEYMAPSIZE] = {
1265 [0] = KEY_GRAVE, 1465 [0] = KEY_GRAVE,
1266 [1] = KEY_1, 1466 [1] = KEY_1,
@@ -1457,7 +1657,6 @@ static struct adp5520_platform_data adp5520_pdev_data = {
1457#endif 1657#endif
1458 1658
1459#if defined(CONFIG_GPIO_ADP5588) || defined(CONFIG_GPIO_ADP5588_MODULE) 1659#if defined(CONFIG_GPIO_ADP5588) || defined(CONFIG_GPIO_ADP5588_MODULE)
1460#include <linux/i2c/adp5588.h>
1461static struct adp5588_gpio_platform_data adp5588_gpio_data = { 1660static struct adp5588_gpio_platform_data adp5588_gpio_data = {
1462 .gpio_start = 50, 1661 .gpio_start = 50,
1463 .pullup_dis_mask = 0, 1662 .pullup_dis_mask = 0,
@@ -1516,6 +1715,101 @@ static struct adp8870_backlight_platform_data adp8870_pdata = {
1516}; 1715};
1517#endif 1716#endif
1518 1717
1718#if defined(CONFIG_BACKLIGHT_ADP8860) || defined(CONFIG_BACKLIGHT_ADP8860_MODULE)
1719#include <linux/i2c/adp8860.h>
1720static struct led_info adp8860_leds[] = {
1721 {
1722 .name = "adp8860-led7",
1723 .default_trigger = "none",
1724 .flags = ADP8860_LED_D7 | ADP8860_LED_OFFT_600ms,
1725 },
1726};
1727
1728static struct adp8860_backlight_platform_data adp8860_pdata = {
1729 .bl_led_assign = ADP8860_BL_D1 | ADP8860_BL_D2 | ADP8860_BL_D3 |
1730 ADP8860_BL_D4 | ADP8860_BL_D5 | ADP8860_BL_D6, /* 1 = Backlight 0 = Individual LED */
1731
1732 .bl_fade_in = ADP8860_FADE_T_1200ms, /* Backlight Fade-In Timer */
1733 .bl_fade_out = ADP8860_FADE_T_1200ms, /* Backlight Fade-Out Timer */
1734 .bl_fade_law = ADP8860_FADE_LAW_CUBIC1, /* fade-on/fade-off transfer characteristic */
1735
1736 .en_ambl_sens = 1, /* 1 = enable ambient light sensor */
1737 .abml_filt = ADP8860_BL_AMBL_FILT_320ms, /* Light sensor filter time */
1738
1739 .l1_daylight_max = ADP8860_BL_CUR_mA(20), /* use BL_CUR_mA(I) 0 <= I <= 30 mA */
1740 .l1_daylight_dim = ADP8860_BL_CUR_mA(0), /* typ = 0, use BL_CUR_mA(I) 0 <= I <= 30 mA */
1741 .l2_office_max = ADP8860_BL_CUR_mA(6), /* use BL_CUR_mA(I) 0 <= I <= 30 mA */
1742 .l2_office_dim = ADP8860_BL_CUR_mA(0), /* typ = 0, use BL_CUR_mA(I) 0 <= I <= 30 mA */
1743 .l3_dark_max = ADP8860_BL_CUR_mA(2), /* use BL_CUR_mA(I) 0 <= I <= 30 mA */
1744 .l3_dark_dim = ADP8860_BL_CUR_mA(0), /* typ = 0, use BL_CUR_mA(I) 0 <= I <= 30 mA */
1745
1746 .l2_trip = ADP8860_L2_COMP_CURR_uA(710), /* use L2_COMP_CURR_uA(I) 0 <= I <= 1106 uA */
1747 .l2_hyst = ADP8860_L2_COMP_CURR_uA(73), /* use L2_COMP_CURR_uA(I) 0 <= I <= 1106 uA */
1748 .l3_trip = ADP8860_L3_COMP_CURR_uA(43), /* use L3_COMP_CURR_uA(I) 0 <= I <= 138 uA */
1749 .l3_hyst = ADP8860_L3_COMP_CURR_uA(11), /* use L3_COMP_CURR_uA(I) 0 <= I <= 138 uA */
1750
1751 .leds = adp8860_leds,
1752 .num_leds = ARRAY_SIZE(adp8860_leds),
1753 .led_fade_law = ADP8860_FADE_LAW_SQUARE, /* fade-on/fade-off transfer characteristic */
1754 .led_fade_in = ADP8860_FADE_T_600ms,
1755 .led_fade_out = ADP8860_FADE_T_600ms,
1756 .led_on_time = ADP8860_LED_ONT_200ms,
1757};
1758#endif
1759
1760#if defined(CONFIG_REGULATOR_AD5398) || defined(CONFIG_REGULATOR_AD5398_MODULE)
1761static struct regulator_consumer_supply ad5398_consumer = {
1762 .supply = "current",
1763};
1764
1765static struct regulator_init_data ad5398_regulator_data = {
1766 .constraints = {
1767 .name = "current range",
1768 .max_uA = 120000,
1769 .valid_ops_mask = REGULATOR_CHANGE_CURRENT | REGULATOR_CHANGE_STATUS,
1770 },
1771 .num_consumer_supplies = 1,
1772 .consumer_supplies = &ad5398_consumer,
1773};
1774
1775static struct ad5398_platform_data ad5398_i2c_platform_data = {
1776 .current_bits = 10,
1777 .current_offset = 4,
1778 .regulator_data = &ad5398_regulator_data,
1779};
1780
1781#if defined(CONFIG_REGULATOR_VIRTUAL_CONSUMER) || \
1782 defined(CONFIG_REGULATOR_VIRTUAL_CONSUMER_MODULE)
1783static struct platform_device ad5398_virt_consumer_device = {
1784 .name = "reg-virt-consumer",
1785 .id = 0,
1786 .dev = {
1787 .platform_data = "current", /* Passed to driver */
1788 },
1789};
1790#endif
1791#if defined(CONFIG_REGULATOR_USERSPACE_CONSUMER) || \
1792 defined(CONFIG_REGULATOR_USERSPACE_CONSUMER_MODULE)
1793static struct regulator_bulk_data ad5398_bulk_data = {
1794 .supply = "current",
1795};
1796
1797static struct regulator_userspace_consumer_data ad5398_userspace_comsumer_data = {
1798 .name = "ad5398",
1799 .num_supplies = 1,
1800 .supplies = &ad5398_bulk_data,
1801};
1802
1803static struct platform_device ad5398_userspace_consumer_device = {
1804 .name = "reg-userspace-consumer",
1805 .id = 0,
1806 .dev = {
1807 .platform_data = &ad5398_userspace_comsumer_data,
1808 },
1809};
1810#endif
1811#endif
1812
1519static struct i2c_board_info __initdata bfin_i2c_board_info[] = { 1813static struct i2c_board_info __initdata bfin_i2c_board_info[] = {
1520#if defined(CONFIG_INPUT_AD714X_I2C) || defined(CONFIG_INPUT_AD714X_I2C_MODULE) 1814#if defined(CONFIG_INPUT_AD714X_I2C) || defined(CONFIG_INPUT_AD714X_I2C_MODULE)
1521 { 1815 {
@@ -1524,6 +1818,52 @@ static struct i2c_board_info __initdata bfin_i2c_board_info[] = {
1524 .platform_data = (void *)&ad7142_i2c_platform_data, 1818 .platform_data = (void *)&ad7142_i2c_platform_data,
1525 }, 1819 },
1526#endif 1820#endif
1821
1822#if defined(CONFIG_AD7150) || defined(CONFIG_AD7150_MODULE)
1823 {
1824 I2C_BOARD_INFO("ad7150", 0x48),
1825 .irq = IRQ_PG5, /* fixme: use real interrupt number */
1826 },
1827#endif
1828
1829#if defined(CONFIG_AD7152) || defined(CONFIG_AD7152_MODULE)
1830 {
1831 I2C_BOARD_INFO("ad7152", 0x48),
1832 },
1833#endif
1834
1835#if defined(CONFIG_AD774X) || defined(CONFIG_AD774X_MODULE)
1836 {
1837 I2C_BOARD_INFO("ad774x", 0x48),
1838 },
1839#endif
1840
1841#if defined(CONFIG_AD7414) || defined(CONFIG_AD7414_MODULE)
1842 {
1843 I2C_BOARD_INFO("ad7414", 0x9),
1844 .irq = IRQ_PG5,
1845 /*
1846 * platform_data pointer is borrwoed by the driver to
1847 * store custimer defined IRQ ALART level mode.
1848 * only IRQF_TRIGGER_HIGH and IRQF_TRIGGER_LOW are valid.
1849 */
1850 .platform_data = (void *)IRQF_TRIGGER_LOW,
1851 },
1852#endif
1853
1854#if defined(CONFIG_AD7416) || defined(CONFIG_AD7416_MODULE)
1855 {
1856 I2C_BOARD_INFO("ad7417", 0xb),
1857 .irq = IRQ_PG5,
1858 /*
1859 * platform_data pointer is borrwoed by the driver to
1860 * store custimer defined IRQ ALART level mode.
1861 * only IRQF_TRIGGER_HIGH and IRQF_TRIGGER_LOW are valid.
1862 */
1863 .platform_data = (void *)IRQF_TRIGGER_LOW,
1864 },
1865#endif
1866
1527#if defined(CONFIG_BFIN_TWI_LCD) || defined(CONFIG_BFIN_TWI_LCD_MODULE) 1867#if defined(CONFIG_BFIN_TWI_LCD) || defined(CONFIG_BFIN_TWI_LCD_MODULE)
1528 { 1868 {
1529 I2C_BOARD_INFO("pcf8574_lcd", 0x22), 1869 I2C_BOARD_INFO("pcf8574_lcd", 0x22),
@@ -1595,24 +1935,105 @@ static struct i2c_board_info __initdata bfin_i2c_board_info[] = {
1595 I2C_BOARD_INFO("adau1761", 0x38), 1935 I2C_BOARD_INFO("adau1761", 0x38),
1596 }, 1936 },
1597#endif 1937#endif
1938#if defined(CONFIG_SND_SOC_ADAU1361) || defined(CONFIG_SND_SOC_ADAU1361_MODULE)
1939 {
1940 I2C_BOARD_INFO("adau1361", 0x38),
1941 },
1942#endif
1598#if defined(CONFIG_AD525X_DPOT) || defined(CONFIG_AD525X_DPOT_MODULE) 1943#if defined(CONFIG_AD525X_DPOT) || defined(CONFIG_AD525X_DPOT_MODULE)
1599 { 1944 {
1600 I2C_BOARD_INFO("ad5258", 0x18), 1945 I2C_BOARD_INFO("ad5258", 0x18),
1601 }, 1946 },
1602#endif 1947#endif
1948#if defined(CONFIG_SND_SOC_SSM2602) || defined(CONFIG_SND_SOC_SSM2602_MODULE)
1949 {
1950 I2C_BOARD_INFO("ssm2602", 0x1b),
1951 },
1952#endif
1953#if defined(CONFIG_REGULATOR_AD5398) || defined(CONFIG_REGULATOR_AD5398_MODULE)
1954 {
1955 I2C_BOARD_INFO("ad5398", 0xC),
1956 .platform_data = (void *)&ad5398_i2c_platform_data,
1957 },
1958#endif
1959#if defined(CONFIG_BACKLIGHT_ADP8860) || defined(CONFIG_BACKLIGHT_ADP8860_MODULE)
1960 {
1961 I2C_BOARD_INFO("adp8860", 0x2A),
1962 .platform_data = (void *)&adp8860_pdata,
1963 },
1964#endif
1603}; 1965};
1604 1966
1605#if defined(CONFIG_SERIAL_BFIN_SPORT) || defined(CONFIG_SERIAL_BFIN_SPORT_MODULE) 1967#if defined(CONFIG_SERIAL_BFIN_SPORT) || defined(CONFIG_SERIAL_BFIN_SPORT_MODULE)
1968#ifdef CONFIG_SERIAL_BFIN_SPORT0_UART
1969static struct resource bfin_sport0_uart_resources[] = {
1970 {
1971 .start = SPORT0_TCR1,
1972 .end = SPORT0_MRCS3+4,
1973 .flags = IORESOURCE_MEM,
1974 },
1975 {
1976 .start = IRQ_SPORT0_RX,
1977 .end = IRQ_SPORT0_RX+1,
1978 .flags = IORESOURCE_IRQ,
1979 },
1980 {
1981 .start = IRQ_SPORT0_ERROR,
1982 .end = IRQ_SPORT0_ERROR,
1983 .flags = IORESOURCE_IRQ,
1984 },
1985};
1986
1987unsigned short bfin_sport0_peripherals[] = {
1988 P_SPORT0_TFS, P_SPORT0_DTPRI, P_SPORT0_TSCLK, P_SPORT0_RFS,
1989 P_SPORT0_DRPRI, P_SPORT0_RSCLK, P_SPORT0_DRSEC, P_SPORT0_DTSEC, 0
1990};
1991
1606static struct platform_device bfin_sport0_uart_device = { 1992static struct platform_device bfin_sport0_uart_device = {
1607 .name = "bfin-sport-uart", 1993 .name = "bfin-sport-uart",
1608 .id = 0, 1994 .id = 0,
1995 .num_resources = ARRAY_SIZE(bfin_sport0_uart_resources),
1996 .resource = bfin_sport0_uart_resources,
1997 .dev = {
1998 .platform_data = &bfin_sport0_peripherals, /* Passed to driver */
1999 },
2000};
2001#endif
2002#ifdef CONFIG_SERIAL_BFIN_SPORT1_UART
2003static struct resource bfin_sport1_uart_resources[] = {
2004 {
2005 .start = SPORT1_TCR1,
2006 .end = SPORT1_MRCS3+4,
2007 .flags = IORESOURCE_MEM,
2008 },
2009 {
2010 .start = IRQ_SPORT1_RX,
2011 .end = IRQ_SPORT1_RX+1,
2012 .flags = IORESOURCE_IRQ,
2013 },
2014 {
2015 .start = IRQ_SPORT1_ERROR,
2016 .end = IRQ_SPORT1_ERROR,
2017 .flags = IORESOURCE_IRQ,
2018 },
2019};
2020
2021unsigned short bfin_sport1_peripherals[] = {
2022 P_SPORT1_TFS, P_SPORT1_DTPRI, P_SPORT1_TSCLK, P_SPORT1_RFS,
2023 P_SPORT1_DRPRI, P_SPORT1_RSCLK, P_SPORT1_DRSEC, P_SPORT1_DTSEC, 0
1609}; 2024};
1610 2025
1611static struct platform_device bfin_sport1_uart_device = { 2026static struct platform_device bfin_sport1_uart_device = {
1612 .name = "bfin-sport-uart", 2027 .name = "bfin-sport-uart",
1613 .id = 1, 2028 .id = 1,
2029 .num_resources = ARRAY_SIZE(bfin_sport1_uart_resources),
2030 .resource = bfin_sport1_uart_resources,
2031 .dev = {
2032 .platform_data = &bfin_sport1_peripherals, /* Passed to driver */
2033 },
1614}; 2034};
1615#endif 2035#endif
2036#endif
1616 2037
1617#if defined(CONFIG_PATA_PLATFORM) || defined(CONFIG_PATA_PLATFORM_MODULE) 2038#if defined(CONFIG_PATA_PLATFORM) || defined(CONFIG_PATA_PLATFORM_MODULE)
1618#define CF_IDE_NAND_CARD_USE_HDD_INTERFACE 2039#define CF_IDE_NAND_CARD_USE_HDD_INTERFACE
@@ -1701,13 +2122,121 @@ static struct platform_device bfin_dpmc = {
1701 }, 2122 },
1702}; 2123};
1703 2124
2125#if defined(CONFIG_SND_BF5XX_I2S) || defined(CONFIG_SND_BF5XX_I2S_MODULE)
2126static struct platform_device bfin_i2s = {
2127 .name = "bfin-i2s",
2128 .id = CONFIG_SND_BF5XX_SPORT_NUM,
2129 /* TODO: add platform data here */
2130};
2131#endif
2132
1704#if defined(CONFIG_SND_BF5XX_TDM) || defined(CONFIG_SND_BF5XX_TDM_MODULE) 2133#if defined(CONFIG_SND_BF5XX_TDM) || defined(CONFIG_SND_BF5XX_TDM_MODULE)
1705static struct platform_device bfin_tdm = { 2134static struct platform_device bfin_tdm = {
1706 .name = "bfin-tdm", 2135 .name = "bfin-tdm",
2136 .id = CONFIG_SND_BF5XX_SPORT_NUM,
1707 /* TODO: add platform data here */ 2137 /* TODO: add platform data here */
1708}; 2138};
1709#endif 2139#endif
1710 2140
2141#if defined(CONFIG_SND_BF5XX_AC97) || defined(CONFIG_SND_BF5XX_AC97_MODULE)
2142static struct platform_device bfin_ac97 = {
2143 .name = "bfin-ac97",
2144 .id = CONFIG_SND_BF5XX_SPORT_NUM,
2145 /* TODO: add platform data here */
2146};
2147#endif
2148
2149#if defined(CONFIG_REGULATOR_ADP_SWITCH) || defined(CONFIG_REGULATOR_ADP_SWITCH_MODULE)
2150#define REGULATOR_ADP122 "adp122"
2151#define REGULATOR_ADP150 "adp150"
2152
2153static struct regulator_consumer_supply adp122_consumers = {
2154 .supply = REGULATOR_ADP122,
2155};
2156
2157static struct regulator_consumer_supply adp150_consumers = {
2158 .supply = REGULATOR_ADP150,
2159};
2160
2161static struct regulator_init_data adp_switch_regulator_data[] = {
2162 {
2163 .constraints = {
2164 .name = REGULATOR_ADP122,
2165 .valid_ops_mask = REGULATOR_CHANGE_STATUS,
2166 .min_uA = 0,
2167 .max_uA = 300000,
2168 },
2169 .num_consumer_supplies = 1, /* only 1 */
2170 .consumer_supplies = &adp122_consumers,
2171 .driver_data = (void *)GPIO_PF2, /* gpio port only */
2172 },
2173 {
2174 .constraints = {
2175 .name = REGULATOR_ADP150,
2176 .valid_ops_mask = REGULATOR_CHANGE_STATUS,
2177 .min_uA = 0,
2178 .max_uA = 150000,
2179 },
2180 .num_consumer_supplies = 1, /* only 1 */
2181 .consumer_supplies = &adp150_consumers,
2182 .driver_data = (void *)GPIO_PF3, /* gpio port only */
2183 },
2184};
2185
2186static struct adp_switch_platform_data adp_switch_pdata = {
2187 .regulator_num = ARRAY_SIZE(adp_switch_regulator_data),
2188 .regulator_data = adp_switch_regulator_data,
2189};
2190
2191static struct platform_device adp_switch_device = {
2192 .name = "adp_switch",
2193 .id = 0,
2194 .dev = {
2195 .platform_data = &adp_switch_pdata,
2196 },
2197};
2198
2199#if defined(CONFIG_REGULATOR_USERSPACE_CONSUMER) || \
2200 defined(CONFIG_REGULATOR_USERSPACE_CONSUMER_MODULE)
2201static struct regulator_bulk_data adp122_bulk_data = {
2202 .supply = REGULATOR_ADP122,
2203};
2204
2205static struct regulator_userspace_consumer_data adp122_userspace_comsumer_data = {
2206 .name = REGULATOR_ADP122,
2207 .num_supplies = 1,
2208 .supplies = &adp122_bulk_data,
2209};
2210
2211static struct platform_device adp122_userspace_consumer_device = {
2212 .name = "reg-userspace-consumer",
2213 .id = 0,
2214 .dev = {
2215 .platform_data = &adp122_userspace_comsumer_data,
2216 },
2217};
2218
2219static struct regulator_bulk_data adp150_bulk_data = {
2220 .supply = REGULATOR_ADP150,
2221};
2222
2223static struct regulator_userspace_consumer_data adp150_userspace_comsumer_data = {
2224 .name = REGULATOR_ADP150,
2225 .num_supplies = 1,
2226 .supplies = &adp150_bulk_data,
2227};
2228
2229static struct platform_device adp150_userspace_consumer_device = {
2230 .name = "reg-userspace-consumer",
2231 .id = 1,
2232 .dev = {
2233 .platform_data = &adp150_userspace_comsumer_data,
2234 },
2235};
2236#endif
2237#endif
2238
2239
1711static struct platform_device *stamp_devices[] __initdata = { 2240static struct platform_device *stamp_devices[] __initdata = {
1712 2241
1713 &bfin_dpmc, 2242 &bfin_dpmc,
@@ -1771,7 +2300,12 @@ static struct platform_device *stamp_devices[] __initdata = {
1771#endif 2300#endif
1772 2301
1773#if defined(CONFIG_SERIAL_BFIN) || defined(CONFIG_SERIAL_BFIN_MODULE) 2302#if defined(CONFIG_SERIAL_BFIN) || defined(CONFIG_SERIAL_BFIN_MODULE)
1774 &bfin_uart_device, 2303#ifdef CONFIG_SERIAL_BFIN_UART0
2304 &bfin_uart0_device,
2305#endif
2306#ifdef CONFIG_SERIAL_BFIN_UART1
2307 &bfin_uart1_device,
2308#endif
1775#endif 2309#endif
1776 2310
1777#if defined(CONFIG_BFIN_SIR) || defined(CONFIG_BFIN_SIR_MODULE) 2311#if defined(CONFIG_BFIN_SIR) || defined(CONFIG_BFIN_SIR_MODULE)
@@ -1788,9 +2322,13 @@ static struct platform_device *stamp_devices[] __initdata = {
1788#endif 2322#endif
1789 2323
1790#if defined(CONFIG_SERIAL_BFIN_SPORT) || defined(CONFIG_SERIAL_BFIN_SPORT_MODULE) 2324#if defined(CONFIG_SERIAL_BFIN_SPORT) || defined(CONFIG_SERIAL_BFIN_SPORT_MODULE)
2325#ifdef CONFIG_SERIAL_BFIN_SPORT0_UART
1791 &bfin_sport0_uart_device, 2326 &bfin_sport0_uart_device,
2327#endif
2328#ifdef CONFIG_SERIAL_BFIN_SPORT1_UART
1792 &bfin_sport1_uart_device, 2329 &bfin_sport1_uart_device,
1793#endif 2330#endif
2331#endif
1794 2332
1795#if defined(CONFIG_PATA_PLATFORM) || defined(CONFIG_PATA_PLATFORM_MODULE) 2333#if defined(CONFIG_PATA_PLATFORM) || defined(CONFIG_PATA_PLATFORM_MODULE)
1796 &bfin_pata_device, 2334 &bfin_pata_device,
@@ -1808,18 +2346,46 @@ static struct platform_device *stamp_devices[] __initdata = {
1808 &stamp_flash_device, 2346 &stamp_flash_device,
1809#endif 2347#endif
1810 2348
2349#if defined(CONFIG_SND_BF5XX_I2S) || defined(CONFIG_SND_BF5XX_I2S_MODULE)
2350 &bfin_i2s,
2351#endif
2352
1811#if defined(CONFIG_SND_BF5XX_TDM) || defined(CONFIG_SND_BF5XX_TDM_MODULE) 2353#if defined(CONFIG_SND_BF5XX_TDM) || defined(CONFIG_SND_BF5XX_TDM_MODULE)
1812 &bfin_tdm, 2354 &bfin_tdm,
1813#endif 2355#endif
2356
2357#if defined(CONFIG_SND_BF5XX_AC97) || defined(CONFIG_SND_BF5XX_AC97_MODULE)
2358 &bfin_ac97,
2359#endif
2360#if defined(CONFIG_REGULATOR_AD5398) || defined(CONFIG_REGULATOR_AD5398_MODULE)
2361#if defined(CONFIG_REGULATOR_VIRTUAL_CONSUMER) || \
2362 defined(CONFIG_REGULATOR_VIRTUAL_CONSUMER_MODULE)
2363 &ad5398_virt_consumer_device,
2364#endif
2365#if defined(CONFIG_REGULATOR_USERSPACE_CONSUMER) || \
2366 defined(CONFIG_REGULATOR_USERSPACE_CONSUMER_MODULE)
2367 &ad5398_userspace_consumer_device,
2368#endif
2369#endif
2370
2371#if defined(CONFIG_REGULATOR_ADP_SWITCH) || defined(CONFIG_REGULATOR_ADP_SWITCH_MODULE)
2372 &adp_switch_device,
2373#if defined(CONFIG_REGULATOR_USERSPACE_CONSUMER) || \
2374 defined(CONFIG_REGULATOR_USERSPACE_CONSUMER_MODULE)
2375 &adp122_userspace_consumer_device,
2376 &adp150_userspace_consumer_device,
2377#endif
2378#endif
1814}; 2379};
1815 2380
1816static int __init stamp_init(void) 2381static int __init stamp_init(void)
1817{ 2382{
1818 printk(KERN_INFO "%s(): registering device resources\n", __func__); 2383 printk(KERN_INFO "%s(): registering device resources\n", __func__);
1819 i2c_register_board_info(0, bfin_i2c_board_info,
1820 ARRAY_SIZE(bfin_i2c_board_info));
1821 bfin_plat_nand_init(); 2384 bfin_plat_nand_init();
2385 adf702x_mac_init();
1822 platform_add_devices(stamp_devices, ARRAY_SIZE(stamp_devices)); 2386 platform_add_devices(stamp_devices, ARRAY_SIZE(stamp_devices));
2387 i2c_register_board_info(0, bfin_i2c_board_info,
2388 ARRAY_SIZE(bfin_i2c_board_info));
1823 spi_register_board_info(bfin_spi_board_info, ARRAY_SIZE(bfin_spi_board_info)); 2389 spi_register_board_info(bfin_spi_board_info, ARRAY_SIZE(bfin_spi_board_info));
1824 2390
1825 return 0; 2391 return 0;
@@ -1827,6 +2393,34 @@ static int __init stamp_init(void)
1827 2393
1828arch_initcall(stamp_init); 2394arch_initcall(stamp_init);
1829 2395
2396
2397static struct platform_device *stamp_early_devices[] __initdata = {
2398#if defined(CONFIG_SERIAL_BFIN_CONSOLE) || defined(CONFIG_EARLY_PRINTK)
2399#ifdef CONFIG_SERIAL_BFIN_UART0
2400 &bfin_uart0_device,
2401#endif
2402#ifdef CONFIG_SERIAL_BFIN_UART1
2403 &bfin_uart1_device,
2404#endif
2405#endif
2406
2407#if defined(CONFIG_SERIAL_BFIN_SPORT_CONSOLE)
2408#ifdef CONFIG_SERIAL_BFIN_SPORT0_UART
2409 &bfin_sport0_uart_device,
2410#endif
2411#ifdef CONFIG_SERIAL_BFIN_SPORT1_UART
2412 &bfin_sport1_uart_device,
2413#endif
2414#endif
2415};
2416
2417void __init native_machine_early_platform_add_devices(void)
2418{
2419 printk(KERN_INFO "register early platform devices\n");
2420 early_platform_add_devices(stamp_early_devices,
2421 ARRAY_SIZE(stamp_early_devices));
2422}
2423
1830void native_machine_restart(char *cmd) 2424void native_machine_restart(char *cmd)
1831{ 2425{
1832 /* workaround reboot hang when booting from SPI */ 2426 /* workaround reboot hang when booting from SPI */
diff --git a/arch/blackfin/mach-bf537/boards/tcm_bf537.c b/arch/blackfin/mach-bf537/boards/tcm_bf537.c
index 57163b65a4f5..4f0a2e72ce4c 100644
--- a/arch/blackfin/mach-bf537/boards/tcm_bf537.c
+++ b/arch/blackfin/mach-bf537/boards/tcm_bf537.c
@@ -74,7 +74,7 @@ static struct bfin5xx_spi_chip spi_adc_chip_info = {
74}; 74};
75#endif 75#endif
76 76
77#if defined(CONFIG_SND_BLACKFIN_AD1836) || defined(CONFIG_SND_BLACKFIN_AD1836_MODULE) 77#if defined(CONFIG_SND_BLACKFIN_AD183X) || defined(CONFIG_SND_BLACKFIN_AD183X_MODULE)
78static struct bfin5xx_spi_chip ad1836_spi_chip_info = { 78static struct bfin5xx_spi_chip ad1836_spi_chip_info = {
79 .enable_dma = 0, 79 .enable_dma = 0,
80 .bits_per_word = 16, 80 .bits_per_word = 16,
@@ -113,7 +113,7 @@ static struct spi_board_info bfin_spi_board_info[] __initdata = {
113 }, 113 },
114#endif 114#endif
115 115
116#if defined(CONFIG_SND_BLACKFIN_AD1836) || defined(CONFIG_SND_BLACKFIN_AD1836_MODULE) 116#if defined(CONFIG_SND_BLACKFIN_AD183X) || defined(CONFIG_SND_BLACKFIN_AD183X_MODULE)
117 { 117 {
118 .modalias = "ad1836", 118 .modalias = "ad1836",
119 .max_speed_hz = 3125000, /* max spi clock (SCK) speed in HZ */ 119 .max_speed_hz = 3125000, /* max spi clock (SCK) speed in HZ */
@@ -327,25 +327,93 @@ static struct platform_device cm_flash_device = {
327#endif 327#endif
328 328
329#if defined(CONFIG_SERIAL_BFIN) || defined(CONFIG_SERIAL_BFIN_MODULE) 329#if defined(CONFIG_SERIAL_BFIN) || defined(CONFIG_SERIAL_BFIN_MODULE)
330static struct resource bfin_uart_resources[] = { 330#ifdef CONFIG_SERIAL_BFIN_UART0
331static struct resource bfin_uart0_resources[] = {
331 { 332 {
332 .start = 0xFFC00400, 333 .start = UART0_THR,
333 .end = 0xFFC004FF, 334 .end = UART0_GCTL+2,
334 .flags = IORESOURCE_MEM, 335 .flags = IORESOURCE_MEM,
335 }, { 336 },
336 .start = 0xFFC02000, 337 {
337 .end = 0xFFC020FF, 338 .start = IRQ_UART0_RX,
339 .end = IRQ_UART0_RX+1,
340 .flags = IORESOURCE_IRQ,
341 },
342 {
343 .start = IRQ_UART0_ERROR,
344 .end = IRQ_UART0_ERROR,
345 .flags = IORESOURCE_IRQ,
346 },
347 {
348 .start = CH_UART0_TX,
349 .end = CH_UART0_TX,
350 .flags = IORESOURCE_DMA,
351 },
352 {
353 .start = CH_UART0_RX,
354 .end = CH_UART0_RX,
355 .flags = IORESOURCE_DMA,
356 },
357};
358
359unsigned short bfin_uart0_peripherals[] = {
360 P_UART0_TX, P_UART0_RX, 0
361};
362
363static struct platform_device bfin_uart0_device = {
364 .name = "bfin-uart",
365 .id = 0,
366 .num_resources = ARRAY_SIZE(bfin_uart0_resources),
367 .resource = bfin_uart0_resources,
368 .dev = {
369 .platform_data = &bfin_uart0_peripherals, /* Passed to driver */
370 },
371};
372#endif
373#ifdef CONFIG_SERIAL_BFIN_UART1
374static struct resource bfin_uart1_resources[] = {
375 {
376 .start = UART1_THR,
377 .end = UART1_GCTL+2,
338 .flags = IORESOURCE_MEM, 378 .flags = IORESOURCE_MEM,
339 }, 379 },
380 {
381 .start = IRQ_UART1_RX,
382 .end = IRQ_UART1_RX+1,
383 .flags = IORESOURCE_IRQ,
384 },
385 {
386 .start = IRQ_UART1_ERROR,
387 .end = IRQ_UART1_ERROR,
388 .flags = IORESOURCE_IRQ,
389 },
390 {
391 .start = CH_UART1_TX,
392 .end = CH_UART1_TX,
393 .flags = IORESOURCE_DMA,
394 },
395 {
396 .start = CH_UART1_RX,
397 .end = CH_UART1_RX,
398 .flags = IORESOURCE_DMA,
399 },
400};
401
402unsigned short bfin_uart1_peripherals[] = {
403 P_UART1_TX, P_UART1_RX, 0
340}; 404};
341 405
342static struct platform_device bfin_uart_device = { 406static struct platform_device bfin_uart1_device = {
343 .name = "bfin-uart", 407 .name = "bfin-uart",
344 .id = 1, 408 .id = 1,
345 .num_resources = ARRAY_SIZE(bfin_uart_resources), 409 .num_resources = ARRAY_SIZE(bfin_uart1_resources),
346 .resource = bfin_uart_resources, 410 .resource = bfin_uart1_resources,
411 .dev = {
412 .platform_data = &bfin_uart1_peripherals, /* Passed to driver */
413 },
347}; 414};
348#endif 415#endif
416#endif
349 417
350#if defined(CONFIG_BFIN_SIR) || defined(CONFIG_BFIN_SIR_MODULE) 418#if defined(CONFIG_BFIN_SIR) || defined(CONFIG_BFIN_SIR_MODULE)
351#ifdef CONFIG_BFIN_SIR0 419#ifdef CONFIG_BFIN_SIR0
@@ -425,16 +493,75 @@ static struct platform_device i2c_bfin_twi_device = {
425#endif 493#endif
426 494
427#if defined(CONFIG_SERIAL_BFIN_SPORT) || defined(CONFIG_SERIAL_BFIN_SPORT_MODULE) 495#if defined(CONFIG_SERIAL_BFIN_SPORT) || defined(CONFIG_SERIAL_BFIN_SPORT_MODULE)
496#ifdef CONFIG_SERIAL_BFIN_SPORT0_UART
497static struct resource bfin_sport0_uart_resources[] = {
498 {
499 .start = SPORT0_TCR1,
500 .end = SPORT0_MRCS3+4,
501 .flags = IORESOURCE_MEM,
502 },
503 {
504 .start = IRQ_SPORT0_RX,
505 .end = IRQ_SPORT0_RX+1,
506 .flags = IORESOURCE_IRQ,
507 },
508 {
509 .start = IRQ_SPORT0_ERROR,
510 .end = IRQ_SPORT0_ERROR,
511 .flags = IORESOURCE_IRQ,
512 },
513};
514
515unsigned short bfin_sport0_peripherals[] = {
516 P_SPORT0_TFS, P_SPORT0_DTPRI, P_SPORT0_TSCLK, P_SPORT0_RFS,
517 P_SPORT0_DRPRI, P_SPORT0_RSCLK, P_SPORT0_DRSEC, P_SPORT0_DTSEC, 0
518};
519
428static struct platform_device bfin_sport0_uart_device = { 520static struct platform_device bfin_sport0_uart_device = {
429 .name = "bfin-sport-uart", 521 .name = "bfin-sport-uart",
430 .id = 0, 522 .id = 0,
523 .num_resources = ARRAY_SIZE(bfin_sport0_uart_resources),
524 .resource = bfin_sport0_uart_resources,
525 .dev = {
526 .platform_data = &bfin_sport0_peripherals, /* Passed to driver */
527 },
528};
529#endif
530#ifdef CONFIG_SERIAL_BFIN_SPORT1_UART
531static struct resource bfin_sport1_uart_resources[] = {
532 {
533 .start = SPORT1_TCR1,
534 .end = SPORT1_MRCS3+4,
535 .flags = IORESOURCE_MEM,
536 },
537 {
538 .start = IRQ_SPORT1_RX,
539 .end = IRQ_SPORT1_RX+1,
540 .flags = IORESOURCE_IRQ,
541 },
542 {
543 .start = IRQ_SPORT1_ERROR,
544 .end = IRQ_SPORT1_ERROR,
545 .flags = IORESOURCE_IRQ,
546 },
547};
548
549unsigned short bfin_sport1_peripherals[] = {
550 P_SPORT1_TFS, P_SPORT1_DTPRI, P_SPORT1_TSCLK, P_SPORT1_RFS,
551 P_SPORT1_DRPRI, P_SPORT1_RSCLK, P_SPORT1_DRSEC, P_SPORT1_DTSEC, 0
431}; 552};
432 553
433static struct platform_device bfin_sport1_uart_device = { 554static struct platform_device bfin_sport1_uart_device = {
434 .name = "bfin-sport-uart", 555 .name = "bfin-sport-uart",
435 .id = 1, 556 .id = 1,
557 .num_resources = ARRAY_SIZE(bfin_sport1_uart_resources),
558 .resource = bfin_sport1_uart_resources,
559 .dev = {
560 .platform_data = &bfin_sport1_peripherals, /* Passed to driver */
561 },
436}; 562};
437#endif 563#endif
564#endif
438 565
439#if defined(CONFIG_BFIN_MAC) || defined(CONFIG_BFIN_MAC_MODULE) 566#if defined(CONFIG_BFIN_MAC) || defined(CONFIG_BFIN_MAC_MODULE)
440static struct platform_device bfin_mii_bus = { 567static struct platform_device bfin_mii_bus = {
@@ -524,7 +651,12 @@ static struct platform_device *cm_bf537_devices[] __initdata = {
524#endif 651#endif
525 652
526#if defined(CONFIG_SERIAL_BFIN) || defined(CONFIG_SERIAL_BFIN_MODULE) 653#if defined(CONFIG_SERIAL_BFIN) || defined(CONFIG_SERIAL_BFIN_MODULE)
527 &bfin_uart_device, 654#ifdef CONFIG_SERIAL_BFIN_UART0
655 &bfin_uart0_device,
656#endif
657#ifdef CONFIG_SERIAL_BFIN_UART1
658 &bfin_uart1_device,
659#endif
528#endif 660#endif
529 661
530#if defined(CONFIG_BFIN_SIR) || defined(CONFIG_BFIN_SIR_MODULE) 662#if defined(CONFIG_BFIN_SIR) || defined(CONFIG_BFIN_SIR_MODULE)
@@ -541,9 +673,13 @@ static struct platform_device *cm_bf537_devices[] __initdata = {
541#endif 673#endif
542 674
543#if defined(CONFIG_SERIAL_BFIN_SPORT) || defined(CONFIG_SERIAL_BFIN_SPORT_MODULE) 675#if defined(CONFIG_SERIAL_BFIN_SPORT) || defined(CONFIG_SERIAL_BFIN_SPORT_MODULE)
676#ifdef CONFIG_SERIAL_BFIN_SPORT0_UART
544 &bfin_sport0_uart_device, 677 &bfin_sport0_uart_device,
678#endif
679#ifdef CONFIG_SERIAL_BFIN_SPORT1_UART
545 &bfin_sport1_uart_device, 680 &bfin_sport1_uart_device,
546#endif 681#endif
682#endif
547 683
548#if defined(CONFIG_USB_ISP1362_HCD) || defined(CONFIG_USB_ISP1362_HCD_MODULE) 684#if defined(CONFIG_USB_ISP1362_HCD) || defined(CONFIG_USB_ISP1362_HCD_MODULE)
549 &isp1362_hcd_device, 685 &isp1362_hcd_device,
@@ -591,6 +727,33 @@ static int __init tcm_bf537_init(void)
591 727
592arch_initcall(tcm_bf537_init); 728arch_initcall(tcm_bf537_init);
593 729
730static struct platform_device *cm_bf537_early_devices[] __initdata = {
731#if defined(CONFIG_SERIAL_BFIN_CONSOLE) || defined(CONFIG_EARLY_PRINTK)
732#ifdef CONFIG_SERIAL_BFIN_UART0
733 &bfin_uart0_device,
734#endif
735#ifdef CONFIG_SERIAL_BFIN_UART1
736 &bfin_uart1_device,
737#endif
738#endif
739
740#if defined(CONFIG_SERIAL_BFIN_SPORT_CONSOLE)
741#ifdef CONFIG_SERIAL_BFIN_SPORT0_UART
742 &bfin_sport0_uart_device,
743#endif
744#ifdef CONFIG_SERIAL_BFIN_SPORT1_UART
745 &bfin_sport1_uart_device,
746#endif
747#endif
748};
749
750void __init native_machine_early_platform_add_devices(void)
751{
752 printk(KERN_INFO "register early platform devices\n");
753 early_platform_add_devices(cm_bf537_early_devices,
754 ARRAY_SIZE(cm_bf537_early_devices));
755}
756
594void bfin_get_ether_addr(char *addr) 757void bfin_get_ether_addr(char *addr)
595{ 758{
596 random_ether_addr(addr); 759 random_ether_addr(addr);
diff --git a/arch/blackfin/mach-bf537/include/mach/irq.h b/arch/blackfin/mach-bf537/include/mach/irq.h
index 0defa9457e7f..789a4f226f7b 100644
--- a/arch/blackfin/mach-bf537/include/mach/irq.h
+++ b/arch/blackfin/mach-bf537/include/mach/irq.h
@@ -134,7 +134,17 @@
134 134
135#define GPIO_IRQ_BASE IRQ_PF0 135#define GPIO_IRQ_BASE IRQ_PF0
136 136
137#define NR_IRQS (IRQ_PH15+1) 137#define IRQ_MAC_PHYINT 98 /* PHY_INT Interrupt */
138#define IRQ_MAC_MMCINT 99 /* MMC Counter Interrupt */
139#define IRQ_MAC_RXFSINT 100 /* RX Frame-Status Interrupt */
140#define IRQ_MAC_TXFSINT 101 /* TX Frame-Status Interrupt */
141#define IRQ_MAC_WAKEDET 102 /* Wake-Up Interrupt */
142#define IRQ_MAC_RXDMAERR 103 /* RX DMA Direction Error Interrupt */
143#define IRQ_MAC_TXDMAERR 104 /* TX DMA Direction Error Interrupt */
144#define IRQ_MAC_STMDONE 105 /* Station Mgt. Transfer Done Interrupt */
145
146#define NR_MACH_IRQS (IRQ_MAC_STMDONE + 1)
147#define NR_IRQS (NR_MACH_IRQS + NR_SPARE_IRQS)
138 148
139#define IVG7 7 149#define IVG7 7
140#define IVG8 8 150#define IVG8 8
diff --git a/arch/blackfin/mach-bf538/boards/ezkit.c b/arch/blackfin/mach-bf538/boards/ezkit.c
index c296bb1ed503..1a1f65855b03 100644
--- a/arch/blackfin/mach-bf538/boards/ezkit.c
+++ b/arch/blackfin/mach-bf538/boards/ezkit.c
@@ -41,37 +41,148 @@ static struct platform_device rtc_device = {
41#endif 41#endif
42 42
43#if defined(CONFIG_SERIAL_BFIN) || defined(CONFIG_SERIAL_BFIN_MODULE) 43#if defined(CONFIG_SERIAL_BFIN) || defined(CONFIG_SERIAL_BFIN_MODULE)
44static struct resource bfin_uart_resources[] = {
45#ifdef CONFIG_SERIAL_BFIN_UART0 44#ifdef CONFIG_SERIAL_BFIN_UART0
45static struct resource bfin_uart0_resources[] = {
46 { 46 {
47 .start = 0xFFC00400, 47 .start = UART0_THR,
48 .end = 0xFFC004FF, 48 .end = UART0_GCTL+2,
49 .flags = IORESOURCE_MEM, 49 .flags = IORESOURCE_MEM,
50 }, 50 },
51 {
52 .start = IRQ_UART0_RX,
53 .end = IRQ_UART0_RX+1,
54 .flags = IORESOURCE_IRQ,
55 },
56 {
57 .start = IRQ_UART0_ERROR,
58 .end = IRQ_UART0_ERROR,
59 .flags = IORESOURCE_IRQ,
60 },
61 {
62 .start = CH_UART0_TX,
63 .end = CH_UART0_TX,
64 .flags = IORESOURCE_DMA,
65 },
66 {
67 .start = CH_UART0_RX,
68 .end = CH_UART0_RX,
69 .flags = IORESOURCE_DMA,
70 },
71#ifdef CONFIG_BFIN_UART0_CTSRTS
72 { /* CTS pin */
73 .start = GPIO_PG7,
74 .end = GPIO_PG7,
75 .flags = IORESOURCE_IO,
76 },
77 { /* RTS pin */
78 .start = GPIO_PG6,
79 .end = GPIO_PG6,
80 .flags = IORESOURCE_IO,
81 },
82#endif
83};
84
85unsigned short bfin_uart0_peripherals[] = {
86 P_UART0_TX, P_UART0_RX, 0
87};
88
89static struct platform_device bfin_uart0_device = {
90 .name = "bfin-uart",
91 .id = 0,
92 .num_resources = ARRAY_SIZE(bfin_uart0_resources),
93 .resource = bfin_uart0_resources,
94 .dev = {
95 .platform_data = &bfin_uart0_peripherals, /* Passed to driver */
96 },
97};
51#endif 98#endif
52#ifdef CONFIG_SERIAL_BFIN_UART1 99#ifdef CONFIG_SERIAL_BFIN_UART1
100static struct resource bfin_uart1_resources[] = {
53 { 101 {
54 .start = 0xFFC02000, 102 .start = UART1_THR,
55 .end = 0xFFC020FF, 103 .end = UART1_GCTL+2,
56 .flags = IORESOURCE_MEM, 104 .flags = IORESOURCE_MEM,
57 }, 105 },
106 {
107 .start = IRQ_UART1_RX,
108 .end = IRQ_UART1_RX+1,
109 .flags = IORESOURCE_IRQ,
110 },
111 {
112 .start = IRQ_UART1_ERROR,
113 .end = IRQ_UART1_ERROR,
114 .flags = IORESOURCE_IRQ,
115 },
116 {
117 .start = CH_UART1_TX,
118 .end = CH_UART1_TX,
119 .flags = IORESOURCE_DMA,
120 },
121 {
122 .start = CH_UART1_RX,
123 .end = CH_UART1_RX,
124 .flags = IORESOURCE_DMA,
125 },
126};
127
128unsigned short bfin_uart1_peripherals[] = {
129 P_UART1_TX, P_UART1_RX, 0
130};
131
132static struct platform_device bfin_uart1_device = {
133 .name = "bfin-uart",
134 .id = 1,
135 .num_resources = ARRAY_SIZE(bfin_uart1_resources),
136 .resource = bfin_uart1_resources,
137 .dev = {
138 .platform_data = &bfin_uart1_peripherals, /* Passed to driver */
139 },
140};
58#endif 141#endif
59#ifdef CONFIG_SERIAL_BFIN_UART2 142#ifdef CONFIG_SERIAL_BFIN_UART2
143static struct resource bfin_uart2_resources[] = {
60 { 144 {
61 .start = 0xFFC02100, 145 .start = UART2_THR,
62 .end = 0xFFC021FF, 146 .end = UART2_GCTL+2,
63 .flags = IORESOURCE_MEM, 147 .flags = IORESOURCE_MEM,
64 }, 148 },
65#endif 149 {
150 .start = IRQ_UART2_RX,
151 .end = IRQ_UART2_RX+1,
152 .flags = IORESOURCE_IRQ,
153 },
154 {
155 .start = IRQ_UART2_ERROR,
156 .end = IRQ_UART2_ERROR,
157 .flags = IORESOURCE_IRQ,
158 },
159 {
160 .start = CH_UART2_TX,
161 .end = CH_UART2_TX,
162 .flags = IORESOURCE_DMA,
163 },
164 {
165 .start = CH_UART2_RX,
166 .end = CH_UART2_RX,
167 .flags = IORESOURCE_DMA,
168 },
169};
170
171unsigned short bfin_uart2_peripherals[] = {
172 P_UART2_TX, P_UART2_RX, 0
66}; 173};
67 174
68static struct platform_device bfin_uart_device = { 175static struct platform_device bfin_uart2_device = {
69 .name = "bfin-uart", 176 .name = "bfin-uart",
70 .id = 1, 177 .id = 2,
71 .num_resources = ARRAY_SIZE(bfin_uart_resources), 178 .num_resources = ARRAY_SIZE(bfin_uart2_resources),
72 .resource = bfin_uart_resources, 179 .resource = bfin_uart2_resources,
180 .dev = {
181 .platform_data = &bfin_uart2_peripherals, /* Passed to driver */
182 },
73}; 183};
74#endif 184#endif
185#endif
75 186
76#if defined(CONFIG_BFIN_SIR) || defined(CONFIG_BFIN_SIR_MODULE) 187#if defined(CONFIG_BFIN_SIR) || defined(CONFIG_BFIN_SIR_MODULE)
77#ifdef CONFIG_BFIN_SIR0 188#ifdef CONFIG_BFIN_SIR0
@@ -151,6 +262,145 @@ static struct platform_device bfin_sir2_device = {
151#endif 262#endif
152#endif 263#endif
153 264
265#if defined(CONFIG_SERIAL_BFIN_SPORT) || defined(CONFIG_SERIAL_BFIN_SPORT_MODULE)
266#ifdef CONFIG_SERIAL_BFIN_SPORT0_UART
267static struct resource bfin_sport0_uart_resources[] = {
268 {
269 .start = SPORT0_TCR1,
270 .end = SPORT0_MRCS3+4,
271 .flags = IORESOURCE_MEM,
272 },
273 {
274 .start = IRQ_SPORT0_RX,
275 .end = IRQ_SPORT0_RX+1,
276 .flags = IORESOURCE_IRQ,
277 },
278 {
279 .start = IRQ_SPORT0_ERROR,
280 .end = IRQ_SPORT0_ERROR,
281 .flags = IORESOURCE_IRQ,
282 },
283};
284
285unsigned short bfin_sport0_peripherals[] = {
286 P_SPORT0_TFS, P_SPORT0_DTPRI, P_SPORT0_TSCLK, P_SPORT0_RFS,
287 P_SPORT0_DRPRI, P_SPORT0_RSCLK, P_SPORT0_DRSEC, P_SPORT0_DTSEC, 0
288};
289
290static struct platform_device bfin_sport0_uart_device = {
291 .name = "bfin-sport-uart",
292 .id = 0,
293 .num_resources = ARRAY_SIZE(bfin_sport0_uart_resources),
294 .resource = bfin_sport0_uart_resources,
295 .dev = {
296 .platform_data = &bfin_sport0_peripherals, /* Passed to driver */
297 },
298};
299#endif
300#ifdef CONFIG_SERIAL_BFIN_SPORT1_UART
301static struct resource bfin_sport1_uart_resources[] = {
302 {
303 .start = SPORT1_TCR1,
304 .end = SPORT1_MRCS3+4,
305 .flags = IORESOURCE_MEM,
306 },
307 {
308 .start = IRQ_SPORT1_RX,
309 .end = IRQ_SPORT1_RX+1,
310 .flags = IORESOURCE_IRQ,
311 },
312 {
313 .start = IRQ_SPORT1_ERROR,
314 .end = IRQ_SPORT1_ERROR,
315 .flags = IORESOURCE_IRQ,
316 },
317};
318
319unsigned short bfin_sport1_peripherals[] = {
320 P_SPORT1_TFS, P_SPORT1_DTPRI, P_SPORT1_TSCLK, P_SPORT1_RFS,
321 P_SPORT1_DRPRI, P_SPORT1_RSCLK, P_SPORT1_DRSEC, P_SPORT1_DTSEC, 0
322};
323
324static struct platform_device bfin_sport1_uart_device = {
325 .name = "bfin-sport-uart",
326 .id = 1,
327 .num_resources = ARRAY_SIZE(bfin_sport1_uart_resources),
328 .resource = bfin_sport1_uart_resources,
329 .dev = {
330 .platform_data = &bfin_sport1_peripherals, /* Passed to driver */
331 },
332};
333#endif
334#ifdef CONFIG_SERIAL_BFIN_SPORT2_UART
335static struct resource bfin_sport2_uart_resources[] = {
336 {
337 .start = SPORT2_TCR1,
338 .end = SPORT2_MRCS3+4,
339 .flags = IORESOURCE_MEM,
340 },
341 {
342 .start = IRQ_SPORT2_RX,
343 .end = IRQ_SPORT2_RX+1,
344 .flags = IORESOURCE_IRQ,
345 },
346 {
347 .start = IRQ_SPORT2_ERROR,
348 .end = IRQ_SPORT2_ERROR,
349 .flags = IORESOURCE_IRQ,
350 },
351};
352
353unsigned short bfin_sport2_peripherals[] = {
354 P_SPORT2_TFS, P_SPORT2_DTPRI, P_SPORT2_TSCLK, P_SPORT2_RFS,
355 P_SPORT2_DRPRI, P_SPORT2_RSCLK, P_SPORT2_DRSEC, P_SPORT2_DTSEC, 0
356};
357
358static struct platform_device bfin_sport2_uart_device = {
359 .name = "bfin-sport-uart",
360 .id = 2,
361 .num_resources = ARRAY_SIZE(bfin_sport2_uart_resources),
362 .resource = bfin_sport2_uart_resources,
363 .dev = {
364 .platform_data = &bfin_sport2_peripherals, /* Passed to driver */
365 },
366};
367#endif
368#ifdef CONFIG_SERIAL_BFIN_SPORT3_UART
369static struct resource bfin_sport3_uart_resources[] = {
370 {
371 .start = SPORT3_TCR1,
372 .end = SPORT3_MRCS3+4,
373 .flags = IORESOURCE_MEM,
374 },
375 {
376 .start = IRQ_SPORT3_RX,
377 .end = IRQ_SPORT3_RX+1,
378 .flags = IORESOURCE_IRQ,
379 },
380 {
381 .start = IRQ_SPORT3_ERROR,
382 .end = IRQ_SPORT3_ERROR,
383 .flags = IORESOURCE_IRQ,
384 },
385};
386
387unsigned short bfin_sport3_peripherals[] = {
388 P_SPORT3_TFS, P_SPORT3_DTPRI, P_SPORT3_TSCLK, P_SPORT3_RFS,
389 P_SPORT3_DRPRI, P_SPORT3_RSCLK, P_SPORT3_DRSEC, P_SPORT3_DTSEC, 0
390};
391
392static struct platform_device bfin_sport3_uart_device = {
393 .name = "bfin-sport-uart",
394 .id = 3,
395 .num_resources = ARRAY_SIZE(bfin_sport3_uart_resources),
396 .resource = bfin_sport3_uart_resources,
397 .dev = {
398 .platform_data = &bfin_sport3_peripherals, /* Passed to driver */
399 },
400};
401#endif
402#endif
403
154#if defined(CONFIG_CAN_BFIN) || defined(CONFIG_CAN_BFIN_MODULE) 404#if defined(CONFIG_CAN_BFIN) || defined(CONFIG_CAN_BFIN_MODULE)
155unsigned short bfin_can_peripherals[] = { 405unsigned short bfin_can_peripherals[] = {
156 P_CAN0_RX, P_CAN0_TX, 0 406 P_CAN0_RX, P_CAN0_TX, 0
@@ -268,8 +518,8 @@ static const struct ad7879_platform_data bfin_ad7879_ts_info = {
268 .median = 2, /* do 8 measurements */ 518 .median = 2, /* do 8 measurements */
269 .averaging = 1, /* take the average of 4 middle samples */ 519 .averaging = 1, /* take the average of 4 middle samples */
270 .pen_down_acc_interval = 255, /* 9.4 ms */ 520 .pen_down_acc_interval = 255, /* 9.4 ms */
271 .gpio_output = 1, /* configure AUX/VBAT/GPIO as GPIO output */ 521 .gpio_export = 1, /* Export GPIO to gpiolib */
272 .gpio_default = 1, /* During initialization set GPIO = HIGH */ 522 .gpio_base = -1, /* Dynamic allocation */
273}; 523};
274#endif 524#endif
275 525
@@ -284,9 +534,10 @@ static struct bfin5xx_spi_chip spi_ad7879_chip_info = {
284#include <asm/bfin-lq035q1.h> 534#include <asm/bfin-lq035q1.h>
285 535
286static struct bfin_lq035q1fb_disp_info bfin_lq035q1_data = { 536static struct bfin_lq035q1fb_disp_info bfin_lq035q1_data = {
287 .mode = LQ035_NORM | LQ035_RGB | LQ035_RL | LQ035_TB, 537 .mode = LQ035_NORM | LQ035_RGB | LQ035_RL | LQ035_TB,
288 .use_bl = 0, /* let something else control the LCD Blacklight */ 538 .ppi_mode = USE_RGB565_16_BIT_PPI,
289 .gpio_bl = GPIO_PF7, 539 .use_bl = 0, /* let something else control the LCD Blacklight */
540 .gpio_bl = GPIO_PF7,
290}; 541};
291 542
292static struct resource bfin_lq035q1_resources[] = { 543static struct resource bfin_lq035q1_resources[] = {
@@ -622,7 +873,15 @@ static struct platform_device *cm_bf538_devices[] __initdata = {
622#endif 873#endif
623 874
624#if defined(CONFIG_SERIAL_BFIN) || defined(CONFIG_SERIAL_BFIN_MODULE) 875#if defined(CONFIG_SERIAL_BFIN) || defined(CONFIG_SERIAL_BFIN_MODULE)
625 &bfin_uart_device, 876#ifdef CONFIG_SERIAL_BFIN_UART0
877 &bfin_uart0_device,
878#endif
879#ifdef CONFIG_SERIAL_BFIN_UART1
880 &bfin_uart1_device,
881#endif
882#ifdef CONFIG_SERIAL_BFIN_UART2
883 &bfin_uart2_device,
884#endif
626#endif 885#endif
627 886
628#if defined(CONFIG_SPI_BFIN) || defined(CONFIG_SPI_BFIN_MODULE) 887#if defined(CONFIG_SPI_BFIN) || defined(CONFIG_SPI_BFIN_MODULE)
@@ -648,6 +907,21 @@ static struct platform_device *cm_bf538_devices[] __initdata = {
648#endif 907#endif
649#endif 908#endif
650 909
910#if defined(CONFIG_SERIAL_BFIN_SPORT) || defined(CONFIG_SERIAL_BFIN_SPORT_MODULE)
911#ifdef CONFIG_SERIAL_BFIN_SPORT0_UART
912 &bfin_sport0_uart_device,
913#endif
914#ifdef CONFIG_SERIAL_BFIN_SPORT1_UART
915 &bfin_sport1_uart_device,
916#endif
917#ifdef CONFIG_SERIAL_BFIN_SPORT2_UART
918 &bfin_sport2_uart_device,
919#endif
920#ifdef CONFIG_SERIAL_BFIN_SPORT3_UART
921 &bfin_sport3_uart_device,
922#endif
923#endif
924
651#if defined(CONFIG_CAN_BFIN) || defined(CONFIG_CAN_BFIN_MODULE) 925#if defined(CONFIG_CAN_BFIN) || defined(CONFIG_CAN_BFIN_MODULE)
652 &bfin_can_device, 926 &bfin_can_device,
653#endif 927#endif
@@ -683,3 +957,39 @@ static int __init ezkit_init(void)
683} 957}
684 958
685arch_initcall(ezkit_init); 959arch_initcall(ezkit_init);
960
961static struct platform_device *ezkit_early_devices[] __initdata = {
962#if defined(CONFIG_SERIAL_BFIN_CONSOLE) || defined(CONFIG_EARLY_PRINTK)
963#ifdef CONFIG_SERIAL_BFIN_UART0
964 &bfin_uart0_device,
965#endif
966#ifdef CONFIG_SERIAL_BFIN_UART1
967 &bfin_uart1_device,
968#endif
969#ifdef CONFIG_SERIAL_BFIN_UART2
970 &bfin_uart2_device,
971#endif
972#endif
973
974#if defined(CONFIG_SERIAL_BFIN_SPORT_CONSOLE)
975#ifdef CONFIG_SERIAL_BFIN_SPORT0_UART
976 &bfin_sport0_uart_device,
977#endif
978#ifdef CONFIG_SERIAL_BFIN_SPORT1_UART
979 &bfin_sport1_uart_device,
980#endif
981#ifdef CONFIG_SERIAL_BFIN_SPORT2_UART
982 &bfin_sport2_uart_device,
983#endif
984#ifdef CONFIG_SERIAL_BFIN_SPORT3_UART
985 &bfin_sport3_uart_device,
986#endif
987#endif
988};
989
990void __init native_machine_early_platform_add_devices(void)
991{
992 printk(KERN_INFO "register early platform devices\n");
993 early_platform_add_devices(ezkit_early_devices,
994 ARRAY_SIZE(ezkit_early_devices));
995}
diff --git a/arch/blackfin/mach-bf538/include/mach/irq.h b/arch/blackfin/mach-bf538/include/mach/irq.h
index a4b7fcbc556b..7a479d224dc7 100644
--- a/arch/blackfin/mach-bf538/include/mach/irq.h
+++ b/arch/blackfin/mach-bf538/include/mach/irq.h
@@ -110,7 +110,8 @@
110 110
111#define GPIO_IRQ_BASE IRQ_PF0 111#define GPIO_IRQ_BASE IRQ_PF0
112 112
113#define NR_IRQS (IRQ_PF15+1) 113#define NR_MACH_IRQS (IRQ_PF15 + 1)
114#define NR_IRQS (NR_MACH_IRQS + NR_SPARE_IRQS)
114 115
115#define IVG7 7 116#define IVG7 7
116#define IVG8 8 117#define IVG8 8
diff --git a/arch/blackfin/mach-bf548/boards/cm_bf548.c b/arch/blackfin/mach-bf548/boards/cm_bf548.c
index ccdcd6da2e9f..f60c333fec66 100644
--- a/arch/blackfin/mach-bf548/boards/cm_bf548.c
+++ b/arch/blackfin/mach-bf548/boards/cm_bf548.c
@@ -127,44 +127,211 @@ static struct platform_device rtc_device = {
127#endif 127#endif
128 128
129#if defined(CONFIG_SERIAL_BFIN) || defined(CONFIG_SERIAL_BFIN_MODULE) 129#if defined(CONFIG_SERIAL_BFIN) || defined(CONFIG_SERIAL_BFIN_MODULE)
130static struct resource bfin_uart_resources[] = {
131#ifdef CONFIG_SERIAL_BFIN_UART0 130#ifdef CONFIG_SERIAL_BFIN_UART0
131static struct resource bfin_uart0_resources[] = {
132 { 132 {
133 .start = 0xFFC00400, 133 .start = UART0_DLL,
134 .end = 0xFFC004FF, 134 .end = UART0_RBR+2,
135 .flags = IORESOURCE_MEM, 135 .flags = IORESOURCE_MEM,
136 }, 136 },
137 {
138 .start = IRQ_UART0_RX,
139 .end = IRQ_UART0_RX+1,
140 .flags = IORESOURCE_IRQ,
141 },
142 {
143 .start = IRQ_UART0_ERROR,
144 .end = IRQ_UART0_ERROR,
145 .flags = IORESOURCE_IRQ,
146 },
147 {
148 .start = CH_UART0_TX,
149 .end = CH_UART0_TX,
150 .flags = IORESOURCE_DMA,
151 },
152 {
153 .start = CH_UART0_RX,
154 .end = CH_UART0_RX,
155 .flags = IORESOURCE_DMA,
156 },
157};
158
159unsigned short bfin_uart0_peripherals[] = {
160 P_UART0_TX, P_UART0_RX, 0
161};
162
163static struct platform_device bfin_uart0_device = {
164 .name = "bfin-uart",
165 .id = 0,
166 .num_resources = ARRAY_SIZE(bfin_uart0_resources),
167 .resource = bfin_uart0_resources,
168 .dev = {
169 .platform_data = &bfin_uart0_peripherals, /* Passed to driver */
170 },
171};
137#endif 172#endif
138#ifdef CONFIG_SERIAL_BFIN_UART1 173#ifdef CONFIG_SERIAL_BFIN_UART1
174static struct resource bfin_uart1_resources[] = {
139 { 175 {
140 .start = 0xFFC02000, 176 .start = UART1_DLL,
141 .end = 0xFFC020FF, 177 .end = UART1_RBR+2,
142 .flags = IORESOURCE_MEM, 178 .flags = IORESOURCE_MEM,
143 }, 179 },
180 {
181 .start = IRQ_UART1_RX,
182 .end = IRQ_UART1_RX+1,
183 .flags = IORESOURCE_IRQ,
184 },
185 {
186 .start = IRQ_UART1_ERROR,
187 .end = IRQ_UART1_ERROR,
188 .flags = IORESOURCE_IRQ,
189 },
190 {
191 .start = CH_UART1_TX,
192 .end = CH_UART1_TX,
193 .flags = IORESOURCE_DMA,
194 },
195 {
196 .start = CH_UART1_RX,
197 .end = CH_UART1_RX,
198 .flags = IORESOURCE_DMA,
199 },
200#ifdef CONFIG_BFIN_UART1_CTSRTS
201 { /* CTS pin -- 0 means not supported */
202 .start = GPIO_PE10,
203 .end = GPIO_PE10,
204 .flags = IORESOURCE_IO,
205 },
206 { /* RTS pin -- 0 means not supported */
207 .start = GPIO_PE9,
208 .end = GPIO_PE9,
209 .flags = IORESOURCE_IO,
210 },
211#endif
212};
213
214unsigned short bfin_uart1_peripherals[] = {
215 P_UART1_TX, P_UART1_RX,
216#ifdef CONFIG_BFIN_UART1_CTSRTS
217 P_UART1_RTS, P_UART1_CTS,
218#endif
219 0
220};
221
222static struct platform_device bfin_uart1_device = {
223 .name = "bfin-uart",
224 .id = 1,
225 .num_resources = ARRAY_SIZE(bfin_uart1_resources),
226 .resource = bfin_uart1_resources,
227 .dev = {
228 .platform_data = &bfin_uart1_peripherals, /* Passed to driver */
229 },
230};
144#endif 231#endif
145#ifdef CONFIG_SERIAL_BFIN_UART2 232#ifdef CONFIG_SERIAL_BFIN_UART2
233static struct resource bfin_uart2_resources[] = {
146 { 234 {
147 .start = 0xFFC02100, 235 .start = UART2_DLL,
148 .end = 0xFFC021FF, 236 .end = UART2_RBR+2,
149 .flags = IORESOURCE_MEM, 237 .flags = IORESOURCE_MEM,
150 }, 238 },
239 {
240 .start = IRQ_UART2_RX,
241 .end = IRQ_UART2_RX+1,
242 .flags = IORESOURCE_IRQ,
243 },
244 {
245 .start = IRQ_UART2_ERROR,
246 .end = IRQ_UART2_ERROR,
247 .flags = IORESOURCE_IRQ,
248 },
249 {
250 .start = CH_UART2_TX,
251 .end = CH_UART2_TX,
252 .flags = IORESOURCE_DMA,
253 },
254 {
255 .start = CH_UART2_RX,
256 .end = CH_UART2_RX,
257 .flags = IORESOURCE_DMA,
258 },
259};
260
261unsigned short bfin_uart2_peripherals[] = {
262 P_UART2_TX, P_UART2_RX, 0
263};
264
265static struct platform_device bfin_uart2_device = {
266 .name = "bfin-uart",
267 .id = 2,
268 .num_resources = ARRAY_SIZE(bfin_uart2_resources),
269 .resource = bfin_uart2_resources,
270 .dev = {
271 .platform_data = &bfin_uart2_peripherals, /* Passed to driver */
272 },
273};
151#endif 274#endif
152#ifdef CONFIG_SERIAL_BFIN_UART3 275#ifdef CONFIG_SERIAL_BFIN_UART3
276static struct resource bfin_uart3_resources[] = {
153 { 277 {
154 .start = 0xFFC03100, 278 .start = UART3_DLL,
155 .end = 0xFFC031FF, 279 .end = UART3_RBR+2,
156 .flags = IORESOURCE_MEM, 280 .flags = IORESOURCE_MEM,
157 }, 281 },
282 {
283 .start = IRQ_UART3_RX,
284 .end = IRQ_UART3_RX+1,
285 .flags = IORESOURCE_IRQ,
286 },
287 {
288 .start = IRQ_UART3_ERROR,
289 .end = IRQ_UART3_ERROR,
290 .flags = IORESOURCE_IRQ,
291 },
292 {
293 .start = CH_UART3_TX,
294 .end = CH_UART3_TX,
295 .flags = IORESOURCE_DMA,
296 },
297 {
298 .start = CH_UART3_RX,
299 .end = CH_UART3_RX,
300 .flags = IORESOURCE_DMA,
301 },
302#ifdef CONFIG_BFIN_UART3_CTSRTS
303 { /* CTS pin -- 0 means not supported */
304 .start = GPIO_PB3,
305 .end = GPIO_PB3,
306 .flags = IORESOURCE_IO,
307 },
308 { /* RTS pin -- 0 means not supported */
309 .start = GPIO_PB2,
310 .end = GPIO_PB2,
311 .flags = IORESOURCE_IO,
312 },
158#endif 313#endif
159}; 314};
160 315
161static struct platform_device bfin_uart_device = { 316unsigned short bfin_uart3_peripherals[] = {
317 P_UART3_TX, P_UART3_RX,
318#ifdef CONFIG_BFIN_UART3_CTSRTS
319 P_UART3_RTS, P_UART3_CTS,
320#endif
321 0
322};
323
324static struct platform_device bfin_uart3_device = {
162 .name = "bfin-uart", 325 .name = "bfin-uart",
163 .id = 1, 326 .id = 3,
164 .num_resources = ARRAY_SIZE(bfin_uart_resources), 327 .num_resources = ARRAY_SIZE(bfin_uart3_resources),
165 .resource = bfin_uart_resources, 328 .resource = bfin_uart3_resources,
329 .dev = {
330 .platform_data = &bfin_uart3_peripherals, /* Passed to driver */
331 },
166}; 332};
167#endif 333#endif
334#endif
168 335
169#if defined(CONFIG_BFIN_SIR) || defined(CONFIG_BFIN_SIR_MODULE) 336#if defined(CONFIG_BFIN_SIR) || defined(CONFIG_BFIN_SIR_MODULE)
170#ifdef CONFIG_BFIN_SIR0 337#ifdef CONFIG_BFIN_SIR0
@@ -359,6 +526,145 @@ static struct platform_device musb_device = {
359}; 526};
360#endif 527#endif
361 528
529#if defined(CONFIG_SERIAL_BFIN_SPORT) || defined(CONFIG_SERIAL_BFIN_SPORT_MODULE)
530#ifdef CONFIG_SERIAL_BFIN_SPORT0_UART
531static struct resource bfin_sport0_uart_resources[] = {
532 {
533 .start = SPORT0_TCR1,
534 .end = SPORT0_MRCS3+4,
535 .flags = IORESOURCE_MEM,
536 },
537 {
538 .start = IRQ_SPORT0_RX,
539 .end = IRQ_SPORT0_RX+1,
540 .flags = IORESOURCE_IRQ,
541 },
542 {
543 .start = IRQ_SPORT0_ERROR,
544 .end = IRQ_SPORT0_ERROR,
545 .flags = IORESOURCE_IRQ,
546 },
547};
548
549unsigned short bfin_sport0_peripherals[] = {
550 P_SPORT0_TFS, P_SPORT0_DTPRI, P_SPORT0_TSCLK, P_SPORT0_RFS,
551 P_SPORT0_DRPRI, P_SPORT0_RSCLK, P_SPORT0_DRSEC, P_SPORT0_DTSEC, 0
552};
553
554static struct platform_device bfin_sport0_uart_device = {
555 .name = "bfin-sport-uart",
556 .id = 0,
557 .num_resources = ARRAY_SIZE(bfin_sport0_uart_resources),
558 .resource = bfin_sport0_uart_resources,
559 .dev = {
560 .platform_data = &bfin_sport0_peripherals, /* Passed to driver */
561 },
562};
563#endif
564#ifdef CONFIG_SERIAL_BFIN_SPORT1_UART
565static struct resource bfin_sport1_uart_resources[] = {
566 {
567 .start = SPORT1_TCR1,
568 .end = SPORT1_MRCS3+4,
569 .flags = IORESOURCE_MEM,
570 },
571 {
572 .start = IRQ_SPORT1_RX,
573 .end = IRQ_SPORT1_RX+1,
574 .flags = IORESOURCE_IRQ,
575 },
576 {
577 .start = IRQ_SPORT1_ERROR,
578 .end = IRQ_SPORT1_ERROR,
579 .flags = IORESOURCE_IRQ,
580 },
581};
582
583unsigned short bfin_sport1_peripherals[] = {
584 P_SPORT1_TFS, P_SPORT1_DTPRI, P_SPORT1_TSCLK, P_SPORT1_RFS,
585 P_SPORT1_DRPRI, P_SPORT1_RSCLK, P_SPORT1_DRSEC, P_SPORT1_DTSEC, 0
586};
587
588static struct platform_device bfin_sport1_uart_device = {
589 .name = "bfin-sport-uart",
590 .id = 1,
591 .num_resources = ARRAY_SIZE(bfin_sport1_uart_resources),
592 .resource = bfin_sport1_uart_resources,
593 .dev = {
594 .platform_data = &bfin_sport1_peripherals, /* Passed to driver */
595 },
596};
597#endif
598#ifdef CONFIG_SERIAL_BFIN_SPORT2_UART
599static struct resource bfin_sport2_uart_resources[] = {
600 {
601 .start = SPORT2_TCR1,
602 .end = SPORT2_MRCS3+4,
603 .flags = IORESOURCE_MEM,
604 },
605 {
606 .start = IRQ_SPORT2_RX,
607 .end = IRQ_SPORT2_RX+1,
608 .flags = IORESOURCE_IRQ,
609 },
610 {
611 .start = IRQ_SPORT2_ERROR,
612 .end = IRQ_SPORT2_ERROR,
613 .flags = IORESOURCE_IRQ,
614 },
615};
616
617unsigned short bfin_sport2_peripherals[] = {
618 P_SPORT2_TFS, P_SPORT2_DTPRI, P_SPORT2_TSCLK, P_SPORT2_RFS,
619 P_SPORT2_DRPRI, P_SPORT2_RSCLK, P_SPORT2_DRSEC, P_SPORT2_DTSEC, 0
620};
621
622static struct platform_device bfin_sport2_uart_device = {
623 .name = "bfin-sport-uart",
624 .id = 2,
625 .num_resources = ARRAY_SIZE(bfin_sport2_uart_resources),
626 .resource = bfin_sport2_uart_resources,
627 .dev = {
628 .platform_data = &bfin_sport2_peripherals, /* Passed to driver */
629 },
630};
631#endif
632#ifdef CONFIG_SERIAL_BFIN_SPORT3_UART
633static struct resource bfin_sport3_uart_resources[] = {
634 {
635 .start = SPORT3_TCR1,
636 .end = SPORT3_MRCS3+4,
637 .flags = IORESOURCE_MEM,
638 },
639 {
640 .start = IRQ_SPORT3_RX,
641 .end = IRQ_SPORT3_RX+1,
642 .flags = IORESOURCE_IRQ,
643 },
644 {
645 .start = IRQ_SPORT3_ERROR,
646 .end = IRQ_SPORT3_ERROR,
647 .flags = IORESOURCE_IRQ,
648 },
649};
650
651unsigned short bfin_sport3_peripherals[] = {
652 P_SPORT3_TFS, P_SPORT3_DTPRI, P_SPORT3_TSCLK, P_SPORT3_RFS,
653 P_SPORT3_DRPRI, P_SPORT3_RSCLK, P_SPORT3_DRSEC, P_SPORT3_DTSEC, 0
654};
655
656static struct platform_device bfin_sport3_uart_device = {
657 .name = "bfin-sport-uart",
658 .id = 3,
659 .num_resources = ARRAY_SIZE(bfin_sport3_uart_resources),
660 .resource = bfin_sport3_uart_resources,
661 .dev = {
662 .platform_data = &bfin_sport3_peripherals, /* Passed to driver */
663 },
664};
665#endif
666#endif
667
362#if defined(CONFIG_PATA_BF54X) || defined(CONFIG_PATA_BF54X_MODULE) 668#if defined(CONFIG_PATA_BF54X) || defined(CONFIG_PATA_BF54X_MODULE)
363static struct resource bfin_atapi_resources[] = { 669static struct resource bfin_atapi_resources[] = {
364 { 670 {
@@ -752,7 +1058,18 @@ static struct platform_device *cm_bf548_devices[] __initdata = {
752#endif 1058#endif
753 1059
754#if defined(CONFIG_SERIAL_BFIN) || defined(CONFIG_SERIAL_BFIN_MODULE) 1060#if defined(CONFIG_SERIAL_BFIN) || defined(CONFIG_SERIAL_BFIN_MODULE)
755 &bfin_uart_device, 1061#ifdef CONFIG_SERIAL_BFIN_UART0
1062 &bfin_uart0_device,
1063#endif
1064#ifdef CONFIG_SERIAL_BFIN_UART1
1065 &bfin_uart1_device,
1066#endif
1067#ifdef CONFIG_SERIAL_BFIN_UART2
1068 &bfin_uart2_device,
1069#endif
1070#ifdef CONFIG_SERIAL_BFIN_UART3
1071 &bfin_uart3_device,
1072#endif
756#endif 1073#endif
757 1074
758#if defined(CONFIG_BFIN_SIR) || defined(CONFIG_BFIN_SIR_MODULE) 1075#if defined(CONFIG_BFIN_SIR) || defined(CONFIG_BFIN_SIR_MODULE)
@@ -782,6 +1099,21 @@ static struct platform_device *cm_bf548_devices[] __initdata = {
782 &musb_device, 1099 &musb_device,
783#endif 1100#endif
784 1101
1102#if defined(CONFIG_SERIAL_BFIN_SPORT) || defined(CONFIG_SERIAL_BFIN_SPORT_MODULE)
1103#ifdef CONFIG_SERIAL_BFIN_SPORT0_UART
1104 &bfin_sport0_uart_device,
1105#endif
1106#ifdef CONFIG_SERIAL_BFIN_SPORT1_UART
1107 &bfin_sport1_uart_device,
1108#endif
1109#ifdef CONFIG_SERIAL_BFIN_SPORT2_UART
1110 &bfin_sport2_uart_device,
1111#endif
1112#ifdef CONFIG_SERIAL_BFIN_SPORT3_UART
1113 &bfin_sport3_uart_device,
1114#endif
1115#endif
1116
785#if defined(CONFIG_PATA_BF54X) || defined(CONFIG_PATA_BF54X_MODULE) 1117#if defined(CONFIG_PATA_BF54X) || defined(CONFIG_PATA_BF54X_MODULE)
786 &bfin_atapi_device, 1118 &bfin_atapi_device,
787#endif 1119#endif
@@ -833,3 +1165,42 @@ static int __init cm_bf548_init(void)
833} 1165}
834 1166
835arch_initcall(cm_bf548_init); 1167arch_initcall(cm_bf548_init);
1168
1169static struct platform_device *cm_bf548_early_devices[] __initdata = {
1170#if defined(CONFIG_SERIAL_BFIN_CONSOLE) || defined(CONFIG_EARLY_PRINTK)
1171#ifdef CONFIG_SERIAL_BFIN_UART0
1172 &bfin_uart0_device,
1173#endif
1174#ifdef CONFIG_SERIAL_BFIN_UART1
1175 &bfin_uart1_device,
1176#endif
1177#ifdef CONFIG_SERIAL_BFIN_UART2
1178 &bfin_uart2_device,
1179#endif
1180#ifdef CONFIG_SERIAL_BFIN_UART3
1181 &bfin_uart3_device,
1182#endif
1183#endif
1184
1185#if defined(CONFIG_SERIAL_BFIN_SPORT_CONSOLE)
1186#ifdef CONFIG_SERIAL_BFIN_SPORT0_UART
1187 &bfin_sport0_uart_device,
1188#endif
1189#ifdef CONFIG_SERIAL_BFIN_SPORT1_UART
1190 &bfin_sport1_uart_device,
1191#endif
1192#ifdef CONFIG_SERIAL_BFIN_SPORT2_UART
1193 &bfin_sport2_uart_device,
1194#endif
1195#ifdef CONFIG_SERIAL_BFIN_SPORT3_UART
1196 &bfin_sport3_uart_device,
1197#endif
1198#endif
1199};
1200
1201void __init native_machine_early_platform_add_devices(void)
1202{
1203 printk(KERN_INFO "register early platform devices\n");
1204 early_platform_add_devices(cm_bf548_early_devices,
1205 ARRAY_SIZE(cm_bf548_early_devices));
1206}
diff --git a/arch/blackfin/mach-bf548/boards/ezkit.c b/arch/blackfin/mach-bf548/boards/ezkit.c
index 60193f72777c..06919db00a74 100644
--- a/arch/blackfin/mach-bf548/boards/ezkit.c
+++ b/arch/blackfin/mach-bf548/boards/ezkit.c
@@ -232,44 +232,211 @@ static struct platform_device rtc_device = {
232#endif 232#endif
233 233
234#if defined(CONFIG_SERIAL_BFIN) || defined(CONFIG_SERIAL_BFIN_MODULE) 234#if defined(CONFIG_SERIAL_BFIN) || defined(CONFIG_SERIAL_BFIN_MODULE)
235static struct resource bfin_uart_resources[] = {
236#ifdef CONFIG_SERIAL_BFIN_UART0 235#ifdef CONFIG_SERIAL_BFIN_UART0
236static struct resource bfin_uart0_resources[] = {
237 { 237 {
238 .start = 0xFFC00400, 238 .start = UART0_DLL,
239 .end = 0xFFC004FF, 239 .end = UART0_RBR+2,
240 .flags = IORESOURCE_MEM, 240 .flags = IORESOURCE_MEM,
241 }, 241 },
242 {
243 .start = IRQ_UART0_RX,
244 .end = IRQ_UART0_RX+1,
245 .flags = IORESOURCE_IRQ,
246 },
247 {
248 .start = IRQ_UART0_ERROR,
249 .end = IRQ_UART0_ERROR,
250 .flags = IORESOURCE_IRQ,
251 },
252 {
253 .start = CH_UART0_TX,
254 .end = CH_UART0_TX,
255 .flags = IORESOURCE_DMA,
256 },
257 {
258 .start = CH_UART0_RX,
259 .end = CH_UART0_RX,
260 .flags = IORESOURCE_DMA,
261 },
262};
263
264unsigned short bfin_uart0_peripherals[] = {
265 P_UART0_TX, P_UART0_RX, 0
266};
267
268static struct platform_device bfin_uart0_device = {
269 .name = "bfin-uart",
270 .id = 0,
271 .num_resources = ARRAY_SIZE(bfin_uart0_resources),
272 .resource = bfin_uart0_resources,
273 .dev = {
274 .platform_data = &bfin_uart0_peripherals, /* Passed to driver */
275 },
276};
242#endif 277#endif
243#ifdef CONFIG_SERIAL_BFIN_UART1 278#ifdef CONFIG_SERIAL_BFIN_UART1
279static struct resource bfin_uart1_resources[] = {
244 { 280 {
245 .start = 0xFFC02000, 281 .start = UART1_DLL,
246 .end = 0xFFC020FF, 282 .end = UART1_RBR+2,
247 .flags = IORESOURCE_MEM, 283 .flags = IORESOURCE_MEM,
248 }, 284 },
285 {
286 .start = IRQ_UART1_RX,
287 .end = IRQ_UART1_RX+1,
288 .flags = IORESOURCE_IRQ,
289 },
290 {
291 .start = IRQ_UART1_ERROR,
292 .end = IRQ_UART1_ERROR,
293 .flags = IORESOURCE_IRQ,
294 },
295 {
296 .start = CH_UART1_TX,
297 .end = CH_UART1_TX,
298 .flags = IORESOURCE_DMA,
299 },
300 {
301 .start = CH_UART1_RX,
302 .end = CH_UART1_RX,
303 .flags = IORESOURCE_DMA,
304 },
305#ifdef CONFIG_BFIN_UART1_CTSRTS
306 { /* CTS pin -- 0 means not supported */
307 .start = GPIO_PE10,
308 .end = GPIO_PE10,
309 .flags = IORESOURCE_IO,
310 },
311 { /* RTS pin -- 0 means not supported */
312 .start = GPIO_PE9,
313 .end = GPIO_PE9,
314 .flags = IORESOURCE_IO,
315 },
316#endif
317};
318
319unsigned short bfin_uart1_peripherals[] = {
320 P_UART1_TX, P_UART1_RX,
321#ifdef CONFIG_BFIN_UART1_CTSRTS
322 P_UART1_RTS, P_UART1_CTS,
323#endif
324 0
325};
326
327static struct platform_device bfin_uart1_device = {
328 .name = "bfin-uart",
329 .id = 1,
330 .num_resources = ARRAY_SIZE(bfin_uart1_resources),
331 .resource = bfin_uart1_resources,
332 .dev = {
333 .platform_data = &bfin_uart1_peripherals, /* Passed to driver */
334 },
335};
249#endif 336#endif
250#ifdef CONFIG_SERIAL_BFIN_UART2 337#ifdef CONFIG_SERIAL_BFIN_UART2
338static struct resource bfin_uart2_resources[] = {
251 { 339 {
252 .start = 0xFFC02100, 340 .start = UART2_DLL,
253 .end = 0xFFC021FF, 341 .end = UART2_RBR+2,
254 .flags = IORESOURCE_MEM, 342 .flags = IORESOURCE_MEM,
255 }, 343 },
344 {
345 .start = IRQ_UART2_RX,
346 .end = IRQ_UART2_RX+1,
347 .flags = IORESOURCE_IRQ,
348 },
349 {
350 .start = IRQ_UART2_ERROR,
351 .end = IRQ_UART2_ERROR,
352 .flags = IORESOURCE_IRQ,
353 },
354 {
355 .start = CH_UART2_TX,
356 .end = CH_UART2_TX,
357 .flags = IORESOURCE_DMA,
358 },
359 {
360 .start = CH_UART2_RX,
361 .end = CH_UART2_RX,
362 .flags = IORESOURCE_DMA,
363 },
364};
365
366unsigned short bfin_uart2_peripherals[] = {
367 P_UART2_TX, P_UART2_RX, 0
368};
369
370static struct platform_device bfin_uart2_device = {
371 .name = "bfin-uart",
372 .id = 2,
373 .num_resources = ARRAY_SIZE(bfin_uart2_resources),
374 .resource = bfin_uart2_resources,
375 .dev = {
376 .platform_data = &bfin_uart2_peripherals, /* Passed to driver */
377 },
378};
256#endif 379#endif
257#ifdef CONFIG_SERIAL_BFIN_UART3 380#ifdef CONFIG_SERIAL_BFIN_UART3
381static struct resource bfin_uart3_resources[] = {
258 { 382 {
259 .start = 0xFFC03100, 383 .start = UART3_DLL,
260 .end = 0xFFC031FF, 384 .end = UART3_RBR+2,
261 .flags = IORESOURCE_MEM, 385 .flags = IORESOURCE_MEM,
262 }, 386 },
387 {
388 .start = IRQ_UART3_RX,
389 .end = IRQ_UART3_RX+1,
390 .flags = IORESOURCE_IRQ,
391 },
392 {
393 .start = IRQ_UART3_ERROR,
394 .end = IRQ_UART3_ERROR,
395 .flags = IORESOURCE_IRQ,
396 },
397 {
398 .start = CH_UART3_TX,
399 .end = CH_UART3_TX,
400 .flags = IORESOURCE_DMA,
401 },
402 {
403 .start = CH_UART3_RX,
404 .end = CH_UART3_RX,
405 .flags = IORESOURCE_DMA,
406 },
407#ifdef CONFIG_BFIN_UART3_CTSRTS
408 { /* CTS pin -- 0 means not supported */
409 .start = GPIO_PB3,
410 .end = GPIO_PB3,
411 .flags = IORESOURCE_IO,
412 },
413 { /* RTS pin -- 0 means not supported */
414 .start = GPIO_PB2,
415 .end = GPIO_PB2,
416 .flags = IORESOURCE_IO,
417 },
263#endif 418#endif
264}; 419};
265 420
266static struct platform_device bfin_uart_device = { 421unsigned short bfin_uart3_peripherals[] = {
422 P_UART3_TX, P_UART3_RX,
423#ifdef CONFIG_BFIN_UART3_CTSRTS
424 P_UART3_RTS, P_UART3_CTS,
425#endif
426 0
427};
428
429static struct platform_device bfin_uart3_device = {
267 .name = "bfin-uart", 430 .name = "bfin-uart",
268 .id = 1, 431 .id = 3,
269 .num_resources = ARRAY_SIZE(bfin_uart_resources), 432 .num_resources = ARRAY_SIZE(bfin_uart3_resources),
270 .resource = bfin_uart_resources, 433 .resource = bfin_uart3_resources,
434 .dev = {
435 .platform_data = &bfin_uart3_peripherals, /* Passed to driver */
436 },
271}; 437};
272#endif 438#endif
439#endif
273 440
274#if defined(CONFIG_BFIN_SIR) || defined(CONFIG_BFIN_SIR_MODULE) 441#if defined(CONFIG_BFIN_SIR) || defined(CONFIG_BFIN_SIR_MODULE)
275#ifdef CONFIG_BFIN_SIR0 442#ifdef CONFIG_BFIN_SIR0
@@ -464,6 +631,145 @@ static struct platform_device musb_device = {
464}; 631};
465#endif 632#endif
466 633
634#if defined(CONFIG_SERIAL_BFIN_SPORT) || defined(CONFIG_SERIAL_BFIN_SPORT_MODULE)
635#ifdef CONFIG_SERIAL_BFIN_SPORT0_UART
636static struct resource bfin_sport0_uart_resources[] = {
637 {
638 .start = SPORT0_TCR1,
639 .end = SPORT0_MRCS3+4,
640 .flags = IORESOURCE_MEM,
641 },
642 {
643 .start = IRQ_SPORT0_RX,
644 .end = IRQ_SPORT0_RX+1,
645 .flags = IORESOURCE_IRQ,
646 },
647 {
648 .start = IRQ_SPORT0_ERROR,
649 .end = IRQ_SPORT0_ERROR,
650 .flags = IORESOURCE_IRQ,
651 },
652};
653
654unsigned short bfin_sport0_peripherals[] = {
655 P_SPORT0_TFS, P_SPORT0_DTPRI, P_SPORT0_TSCLK, P_SPORT0_RFS,
656 P_SPORT0_DRPRI, P_SPORT0_RSCLK, P_SPORT0_DRSEC, P_SPORT0_DTSEC, 0
657};
658
659static struct platform_device bfin_sport0_uart_device = {
660 .name = "bfin-sport-uart",
661 .id = 0,
662 .num_resources = ARRAY_SIZE(bfin_sport0_uart_resources),
663 .resource = bfin_sport0_uart_resources,
664 .dev = {
665 .platform_data = &bfin_sport0_peripherals, /* Passed to driver */
666 },
667};
668#endif
669#ifdef CONFIG_SERIAL_BFIN_SPORT1_UART
670static struct resource bfin_sport1_uart_resources[] = {
671 {
672 .start = SPORT1_TCR1,
673 .end = SPORT1_MRCS3+4,
674 .flags = IORESOURCE_MEM,
675 },
676 {
677 .start = IRQ_SPORT1_RX,
678 .end = IRQ_SPORT1_RX+1,
679 .flags = IORESOURCE_IRQ,
680 },
681 {
682 .start = IRQ_SPORT1_ERROR,
683 .end = IRQ_SPORT1_ERROR,
684 .flags = IORESOURCE_IRQ,
685 },
686};
687
688unsigned short bfin_sport1_peripherals[] = {
689 P_SPORT1_TFS, P_SPORT1_DTPRI, P_SPORT1_TSCLK, P_SPORT1_RFS,
690 P_SPORT1_DRPRI, P_SPORT1_RSCLK, P_SPORT1_DRSEC, P_SPORT1_DTSEC, 0
691};
692
693static struct platform_device bfin_sport1_uart_device = {
694 .name = "bfin-sport-uart",
695 .id = 1,
696 .num_resources = ARRAY_SIZE(bfin_sport1_uart_resources),
697 .resource = bfin_sport1_uart_resources,
698 .dev = {
699 .platform_data = &bfin_sport1_peripherals, /* Passed to driver */
700 },
701};
702#endif
703#ifdef CONFIG_SERIAL_BFIN_SPORT2_UART
704static struct resource bfin_sport2_uart_resources[] = {
705 {
706 .start = SPORT2_TCR1,
707 .end = SPORT2_MRCS3+4,
708 .flags = IORESOURCE_MEM,
709 },
710 {
711 .start = IRQ_SPORT2_RX,
712 .end = IRQ_SPORT2_RX+1,
713 .flags = IORESOURCE_IRQ,
714 },
715 {
716 .start = IRQ_SPORT2_ERROR,
717 .end = IRQ_SPORT2_ERROR,
718 .flags = IORESOURCE_IRQ,
719 },
720};
721
722unsigned short bfin_sport2_peripherals[] = {
723 P_SPORT2_TFS, P_SPORT2_DTPRI, P_SPORT2_TSCLK, P_SPORT2_RFS,
724 P_SPORT2_DRPRI, P_SPORT2_RSCLK, P_SPORT2_DRSEC, P_SPORT2_DTSEC, 0
725};
726
727static struct platform_device bfin_sport2_uart_device = {
728 .name = "bfin-sport-uart",
729 .id = 2,
730 .num_resources = ARRAY_SIZE(bfin_sport2_uart_resources),
731 .resource = bfin_sport2_uart_resources,
732 .dev = {
733 .platform_data = &bfin_sport2_peripherals, /* Passed to driver */
734 },
735};
736#endif
737#ifdef CONFIG_SERIAL_BFIN_SPORT3_UART
738static struct resource bfin_sport3_uart_resources[] = {
739 {
740 .start = SPORT3_TCR1,
741 .end = SPORT3_MRCS3+4,
742 .flags = IORESOURCE_MEM,
743 },
744 {
745 .start = IRQ_SPORT3_RX,
746 .end = IRQ_SPORT3_RX+1,
747 .flags = IORESOURCE_IRQ,
748 },
749 {
750 .start = IRQ_SPORT3_ERROR,
751 .end = IRQ_SPORT3_ERROR,
752 .flags = IORESOURCE_IRQ,
753 },
754};
755
756unsigned short bfin_sport3_peripherals[] = {
757 P_SPORT3_TFS, P_SPORT3_DTPRI, P_SPORT3_TSCLK, P_SPORT3_RFS,
758 P_SPORT3_DRPRI, P_SPORT3_RSCLK, P_SPORT3_DRSEC, P_SPORT3_DTSEC, 0
759};
760
761static struct platform_device bfin_sport3_uart_device = {
762 .name = "bfin-sport-uart",
763 .id = 3,
764 .num_resources = ARRAY_SIZE(bfin_sport3_uart_resources),
765 .resource = bfin_sport3_uart_resources,
766 .dev = {
767 .platform_data = &bfin_sport3_peripherals, /* Passed to driver */
768 },
769};
770#endif
771#endif
772
467#if defined(CONFIG_CAN_BFIN) || defined(CONFIG_CAN_BFIN_MODULE) 773#if defined(CONFIG_CAN_BFIN) || defined(CONFIG_CAN_BFIN_MODULE)
468unsigned short bfin_can_peripherals[] = { 774unsigned short bfin_can_peripherals[] = {
469 P_CAN0_RX, P_CAN0_TX, 0 775 P_CAN0_RX, P_CAN0_TX, 0
@@ -657,8 +963,8 @@ static struct bfin5xx_spi_chip spi_flash_chip_info = {
657}; 963};
658#endif 964#endif
659 965
660#if defined(CONFIG_SND_BLACKFIN_AD1836) \ 966#if defined(CONFIG_SND_BLACKFIN_AD183X) \
661 || defined(CONFIG_SND_BLACKFIN_AD1836_MODULE) 967 || defined(CONFIG_SND_BLACKFIN_AD183X_MODULE)
662static struct bfin5xx_spi_chip ad1836_spi_chip_info = { 968static struct bfin5xx_spi_chip ad1836_spi_chip_info = {
663 .enable_dma = 0, 969 .enable_dma = 0,
664 .bits_per_word = 16, 970 .bits_per_word = 16,
@@ -714,8 +1020,8 @@ static struct spi_board_info bfin_spi_board_info[] __initdata = {
714 .mode = SPI_MODE_3, 1020 .mode = SPI_MODE_3,
715 }, 1021 },
716#endif 1022#endif
717#if defined(CONFIG_SND_BLACKFIN_AD1836) \ 1023#if defined(CONFIG_SND_BLACKFIN_AD183X) \
718 || defined(CONFIG_SND_BLACKFIN_AD1836_MODULE) 1024 || defined(CONFIG_SND_BLACKFIN_AD183X_MODULE)
719 { 1025 {
720 .modalias = "ad1836", 1026 .modalias = "ad1836",
721 .max_speed_hz = 3125000, /* max spi clock (SCK) speed in HZ */ 1027 .max_speed_hz = 3125000, /* max spi clock (SCK) speed in HZ */
@@ -951,6 +1257,30 @@ static struct platform_device bfin_dpmc = {
951 }, 1257 },
952}; 1258};
953 1259
1260#if defined(CONFIG_SND_BF5XX_I2S) || defined(CONFIG_SND_BF5XX_I2S_MODULE)
1261static struct platform_device bfin_i2s = {
1262 .name = "bfin-i2s",
1263 .id = CONFIG_SND_BF5XX_SPORT_NUM,
1264 /* TODO: add platform data here */
1265};
1266#endif
1267
1268#if defined(CONFIG_SND_BF5XX_TDM) || defined(CONFIG_SND_BF5XX_TDM_MODULE)
1269static struct platform_device bfin_tdm = {
1270 .name = "bfin-tdm",
1271 .id = CONFIG_SND_BF5XX_SPORT_NUM,
1272 /* TODO: add platform data here */
1273};
1274#endif
1275
1276#if defined(CONFIG_SND_BF5XX_AC97) || defined(CONFIG_SND_BF5XX_AC97_MODULE)
1277static struct platform_device bfin_ac97 = {
1278 .name = "bfin-ac97",
1279 .id = CONFIG_SND_BF5XX_SPORT_NUM,
1280 /* TODO: add platform data here */
1281};
1282#endif
1283
954static struct platform_device *ezkit_devices[] __initdata = { 1284static struct platform_device *ezkit_devices[] __initdata = {
955 1285
956 &bfin_dpmc, 1286 &bfin_dpmc,
@@ -960,7 +1290,18 @@ static struct platform_device *ezkit_devices[] __initdata = {
960#endif 1290#endif
961 1291
962#if defined(CONFIG_SERIAL_BFIN) || defined(CONFIG_SERIAL_BFIN_MODULE) 1292#if defined(CONFIG_SERIAL_BFIN) || defined(CONFIG_SERIAL_BFIN_MODULE)
963 &bfin_uart_device, 1293#ifdef CONFIG_SERIAL_BFIN_UART0
1294 &bfin_uart0_device,
1295#endif
1296#ifdef CONFIG_SERIAL_BFIN_UART1
1297 &bfin_uart1_device,
1298#endif
1299#ifdef CONFIG_SERIAL_BFIN_UART2
1300 &bfin_uart2_device,
1301#endif
1302#ifdef CONFIG_SERIAL_BFIN_UART3
1303 &bfin_uart3_device,
1304#endif
964#endif 1305#endif
965 1306
966#if defined(CONFIG_BFIN_SIR) || defined(CONFIG_BFIN_SIR_MODULE) 1307#if defined(CONFIG_BFIN_SIR) || defined(CONFIG_BFIN_SIR_MODULE)
@@ -994,6 +1335,21 @@ static struct platform_device *ezkit_devices[] __initdata = {
994 &bfin_isp1760_device, 1335 &bfin_isp1760_device,
995#endif 1336#endif
996 1337
1338#if defined(CONFIG_SERIAL_BFIN_SPORT) || defined(CONFIG_SERIAL_BFIN_SPORT_MODULE)
1339#ifdef CONFIG_SERIAL_BFIN_SPORT0_UART
1340 &bfin_sport0_uart_device,
1341#endif
1342#ifdef CONFIG_SERIAL_BFIN_SPORT1_UART
1343 &bfin_sport1_uart_device,
1344#endif
1345#ifdef CONFIG_SERIAL_BFIN_SPORT2_UART
1346 &bfin_sport2_uart_device,
1347#endif
1348#ifdef CONFIG_SERIAL_BFIN_SPORT3_UART
1349 &bfin_sport3_uart_device,
1350#endif
1351#endif
1352
997#if defined(CONFIG_CAN_BFIN) || defined(CONFIG_CAN_BFIN_MODULE) 1353#if defined(CONFIG_CAN_BFIN) || defined(CONFIG_CAN_BFIN_MODULE)
998 &bfin_can_device, 1354 &bfin_can_device,
999#endif 1355#endif
@@ -1037,6 +1393,18 @@ static struct platform_device *ezkit_devices[] __initdata = {
1037#if defined(CONFIG_MTD_PHYSMAP) || defined(CONFIG_MTD_PHYSMAP_MODULE) 1393#if defined(CONFIG_MTD_PHYSMAP) || defined(CONFIG_MTD_PHYSMAP_MODULE)
1038 &ezkit_flash_device, 1394 &ezkit_flash_device,
1039#endif 1395#endif
1396
1397#if defined(CONFIG_SND_BF5XX_I2S) || defined(CONFIG_SND_BF5XX_I2S_MODULE)
1398 &bfin_i2s,
1399#endif
1400
1401#if defined(CONFIG_SND_BF5XX_TDM) || defined(CONFIG_SND_BF5XX_TDM_MODULE)
1402 &bfin_tdm,
1403#endif
1404
1405#if defined(CONFIG_SND_BF5XX_AC97) || defined(CONFIG_SND_BF5XX_AC97_MODULE)
1406 &bfin_ac97,
1407#endif
1040}; 1408};
1041 1409
1042static int __init ezkit_init(void) 1410static int __init ezkit_init(void)
@@ -1058,3 +1426,42 @@ static int __init ezkit_init(void)
1058} 1426}
1059 1427
1060arch_initcall(ezkit_init); 1428arch_initcall(ezkit_init);
1429
1430static struct platform_device *ezkit_early_devices[] __initdata = {
1431#if defined(CONFIG_SERIAL_BFIN_CONSOLE) || defined(CONFIG_EARLY_PRINTK)
1432#ifdef CONFIG_SERIAL_BFIN_UART0
1433 &bfin_uart0_device,
1434#endif
1435#ifdef CONFIG_SERIAL_BFIN_UART1
1436 &bfin_uart1_device,
1437#endif
1438#ifdef CONFIG_SERIAL_BFIN_UART2
1439 &bfin_uart2_device,
1440#endif
1441#ifdef CONFIG_SERIAL_BFIN_UART3
1442 &bfin_uart3_device,
1443#endif
1444#endif
1445
1446#if defined(CONFIG_SERIAL_BFIN_SPORT_CONSOLE)
1447#ifdef CONFIG_SERIAL_BFIN_SPORT0_UART
1448 &bfin_sport0_uart_device,
1449#endif
1450#ifdef CONFIG_SERIAL_BFIN_SPORT1_UART
1451 &bfin_sport1_uart_device,
1452#endif
1453#ifdef CONFIG_SERIAL_BFIN_SPORT2_UART
1454 &bfin_sport2_uart_device,
1455#endif
1456#ifdef CONFIG_SERIAL_BFIN_SPORT3_UART
1457 &bfin_sport3_uart_device,
1458#endif
1459#endif
1460};
1461
1462void __init native_machine_early_platform_add_devices(void)
1463{
1464 printk(KERN_INFO "register early platform devices\n");
1465 early_platform_add_devices(ezkit_early_devices,
1466 ARRAY_SIZE(ezkit_early_devices));
1467}
diff --git a/arch/blackfin/mach-bf548/include/mach/irq.h b/arch/blackfin/mach-bf548/include/mach/irq.h
index 106db05684ae..1f99b51a3d56 100644
--- a/arch/blackfin/mach-bf548/include/mach/irq.h
+++ b/arch/blackfin/mach-bf548/include/mach/irq.h
@@ -317,7 +317,8 @@ Events (highest priority) EMU 0
317 317
318#define GPIO_IRQ_BASE IRQ_PA0 318#define GPIO_IRQ_BASE IRQ_PA0
319 319
320#define NR_IRQS (IRQ_PJ15+1) 320#define NR_MACH_IRQS (IRQ_PJ15 + 1)
321#define NR_IRQS (NR_MACH_IRQS + NR_SPARE_IRQS)
321 322
322/* For compatibility reasons with existing code */ 323/* For compatibility reasons with existing code */
323 324
diff --git a/arch/blackfin/mach-bf561/Makefile b/arch/blackfin/mach-bf561/Makefile
index 59e18afe28c6..b34029718318 100644
--- a/arch/blackfin/mach-bf561/Makefile
+++ b/arch/blackfin/mach-bf561/Makefile
@@ -6,3 +6,4 @@ obj-y := ints-priority.o dma.o
6 6
7obj-$(CONFIG_BF561_COREB) += coreb.o 7obj-$(CONFIG_BF561_COREB) += coreb.o
8obj-$(CONFIG_SMP) += smp.o secondary.o atomic.o 8obj-$(CONFIG_SMP) += smp.o secondary.o atomic.o
9obj-$(CONFIG_HOTPLUG_CPU) += hotplug.o
diff --git a/arch/blackfin/mach-bf561/boards/acvilon.c b/arch/blackfin/mach-bf561/boards/acvilon.c
index 07e8dc8770da..5163e2c383c5 100644
--- a/arch/blackfin/mach-bf561/boards/acvilon.c
+++ b/arch/blackfin/mach-bf561/boards/acvilon.c
@@ -176,7 +176,7 @@ static struct resource smsc911x_resources[] = {
176}; 176};
177 177
178static struct smsc911x_platform_config smsc911x_config = { 178static struct smsc911x_platform_config smsc911x_config = {
179 .flags = SMSC911X_USE_32BIT, 179 .flags = SMSC911X_USE_32BIT | SMSC911X_SAVE_MAC_ADDRESS,
180 .irq_polarity = SMSC911X_IRQ_POLARITY_ACTIVE_LOW, 180 .irq_polarity = SMSC911X_IRQ_POLARITY_ACTIVE_LOW,
181 .irq_type = SMSC911X_IRQ_TYPE_OPEN_DRAIN, 181 .irq_type = SMSC911X_IRQ_TYPE_OPEN_DRAIN,
182 .phy_interface = PHY_INTERFACE_MODE_MII, 182 .phy_interface = PHY_INTERFACE_MODE_MII,
diff --git a/arch/blackfin/mach-bf561/boards/cm_bf561.c b/arch/blackfin/mach-bf561/boards/cm_bf561.c
index dfc8d5b77986..e127aedc1d7f 100644
--- a/arch/blackfin/mach-bf561/boards/cm_bf561.c
+++ b/arch/blackfin/mach-bf561/boards/cm_bf561.c
@@ -72,7 +72,7 @@ static struct bfin5xx_spi_chip spi_adc_chip_info = {
72}; 72};
73#endif 73#endif
74 74
75#if defined(CONFIG_SND_BLACKFIN_AD1836) || defined(CONFIG_SND_BLACKFIN_AD1836_MODULE) 75#if defined(CONFIG_SND_BLACKFIN_AD183X) || defined(CONFIG_SND_BLACKFIN_AD183X_MODULE)
76static struct bfin5xx_spi_chip ad1836_spi_chip_info = { 76static struct bfin5xx_spi_chip ad1836_spi_chip_info = {
77 .enable_dma = 0, 77 .enable_dma = 0,
78 .bits_per_word = 16, 78 .bits_per_word = 16,
@@ -111,7 +111,7 @@ static struct spi_board_info bfin_spi_board_info[] __initdata = {
111 }, 111 },
112#endif 112#endif
113 113
114#if defined(CONFIG_SND_BLACKFIN_AD1836) || defined(CONFIG_SND_BLACKFIN_AD1836_MODULE) 114#if defined(CONFIG_SND_BLACKFIN_AD183X) || defined(CONFIG_SND_BLACKFIN_AD183X_MODULE)
115 { 115 {
116 .modalias = "ad1836", 116 .modalias = "ad1836",
117 .max_speed_hz = 3125000, /* max spi clock (SCK) speed in HZ */ 117 .max_speed_hz = 3125000, /* max spi clock (SCK) speed in HZ */
@@ -305,21 +305,50 @@ static struct platform_device isp1362_hcd_device = {
305#endif 305#endif
306 306
307#if defined(CONFIG_SERIAL_BFIN) || defined(CONFIG_SERIAL_BFIN_MODULE) 307#if defined(CONFIG_SERIAL_BFIN) || defined(CONFIG_SERIAL_BFIN_MODULE)
308static struct resource bfin_uart_resources[] = { 308#ifdef CONFIG_SERIAL_BFIN_UART0
309static struct resource bfin_uart0_resources[] = {
309 { 310 {
310 .start = 0xFFC00400, 311 .start = BFIN_UART_THR,
311 .end = 0xFFC004FF, 312 .end = BFIN_UART_GCTL+2,
312 .flags = IORESOURCE_MEM, 313 .flags = IORESOURCE_MEM,
313 }, 314 },
315 {
316 .start = IRQ_UART_RX,
317 .end = IRQ_UART_RX+1,
318 .flags = IORESOURCE_IRQ,
319 },
320 {
321 .start = IRQ_UART_ERROR,
322 .end = IRQ_UART_ERROR,
323 .flags = IORESOURCE_IRQ,
324 },
325 {
326 .start = CH_UART_TX,
327 .end = CH_UART_TX,
328 .flags = IORESOURCE_DMA,
329 },
330 {
331 .start = CH_UART_RX,
332 .end = CH_UART_RX,
333 .flags = IORESOURCE_DMA,
334 },
314}; 335};
315 336
316static struct platform_device bfin_uart_device = { 337unsigned short bfin_uart0_peripherals[] = {
338 P_UART0_TX, P_UART0_RX, 0
339};
340
341static struct platform_device bfin_uart0_device = {
317 .name = "bfin-uart", 342 .name = "bfin-uart",
318 .id = 1, 343 .id = 0,
319 .num_resources = ARRAY_SIZE(bfin_uart_resources), 344 .num_resources = ARRAY_SIZE(bfin_uart0_resources),
320 .resource = bfin_uart_resources, 345 .resource = bfin_uart0_resources,
346 .dev = {
347 .platform_data = &bfin_uart0_peripherals, /* Passed to driver */
348 },
321}; 349};
322#endif 350#endif
351#endif
323 352
324#if defined(CONFIG_BFIN_SIR) || defined(CONFIG_BFIN_SIR_MODULE) 353#if defined(CONFIG_BFIN_SIR) || defined(CONFIG_BFIN_SIR_MODULE)
325#ifdef CONFIG_BFIN_SIR0 354#ifdef CONFIG_BFIN_SIR0
@@ -463,7 +492,9 @@ static struct platform_device *cm_bf561_devices[] __initdata = {
463#endif 492#endif
464 493
465#if defined(CONFIG_SERIAL_BFIN) || defined(CONFIG_SERIAL_BFIN_MODULE) 494#if defined(CONFIG_SERIAL_BFIN) || defined(CONFIG_SERIAL_BFIN_MODULE)
466 &bfin_uart_device, 495#ifdef CONFIG_SERIAL_BFIN_UART0
496 &bfin_uart0_device,
497#endif
467#endif 498#endif
468 499
469#if defined(CONFIG_BFIN_SIR) || defined(CONFIG_BFIN_SIR_MODULE) 500#if defined(CONFIG_BFIN_SIR) || defined(CONFIG_BFIN_SIR_MODULE)
@@ -516,3 +547,18 @@ static int __init cm_bf561_init(void)
516} 547}
517 548
518arch_initcall(cm_bf561_init); 549arch_initcall(cm_bf561_init);
550
551static struct platform_device *cm_bf561_early_devices[] __initdata = {
552#if defined(CONFIG_SERIAL_BFIN_CONSOLE) || defined(CONFIG_EARLY_PRINTK)
553#ifdef CONFIG_SERIAL_BFIN_UART0
554 &bfin_uart0_device,
555#endif
556#endif
557};
558
559void __init native_machine_early_platform_add_devices(void)
560{
561 printk(KERN_INFO "register early platform devices\n");
562 early_platform_add_devices(cm_bf561_early_devices,
563 ARRAY_SIZE(cm_bf561_early_devices));
564}
diff --git a/arch/blackfin/mach-bf561/boards/ezkit.c b/arch/blackfin/mach-bf561/boards/ezkit.c
index ffd3e6a80d1a..9b93e2f95791 100644
--- a/arch/blackfin/mach-bf561/boards/ezkit.c
+++ b/arch/blackfin/mach-bf561/boards/ezkit.c
@@ -160,21 +160,50 @@ static struct platform_device smc91x_device = {
160#endif 160#endif
161 161
162#if defined(CONFIG_SERIAL_BFIN) || defined(CONFIG_SERIAL_BFIN_MODULE) 162#if defined(CONFIG_SERIAL_BFIN) || defined(CONFIG_SERIAL_BFIN_MODULE)
163static struct resource bfin_uart_resources[] = { 163#ifdef CONFIG_SERIAL_BFIN_UART0
164static struct resource bfin_uart0_resources[] = {
164 { 165 {
165 .start = 0xFFC00400, 166 .start = BFIN_UART_THR,
166 .end = 0xFFC004FF, 167 .end = BFIN_UART_GCTL+2,
167 .flags = IORESOURCE_MEM, 168 .flags = IORESOURCE_MEM,
168 }, 169 },
170 {
171 .start = IRQ_UART_RX,
172 .end = IRQ_UART_RX+1,
173 .flags = IORESOURCE_IRQ,
174 },
175 {
176 .start = IRQ_UART_ERROR,
177 .end = IRQ_UART_ERROR,
178 .flags = IORESOURCE_IRQ,
179 },
180 {
181 .start = CH_UART_TX,
182 .end = CH_UART_TX,
183 .flags = IORESOURCE_DMA,
184 },
185 {
186 .start = CH_UART_RX,
187 .end = CH_UART_RX,
188 .flags = IORESOURCE_DMA,
189 },
169}; 190};
170 191
171static struct platform_device bfin_uart_device = { 192unsigned short bfin_uart0_peripherals[] = {
193 P_UART0_TX, P_UART0_RX, 0
194};
195
196static struct platform_device bfin_uart0_device = {
172 .name = "bfin-uart", 197 .name = "bfin-uart",
173 .id = 1, 198 .id = 0,
174 .num_resources = ARRAY_SIZE(bfin_uart_resources), 199 .num_resources = ARRAY_SIZE(bfin_uart0_resources),
175 .resource = bfin_uart_resources, 200 .resource = bfin_uart0_resources,
201 .dev = {
202 .platform_data = &bfin_uart0_peripherals, /* Passed to driver */
203 },
176}; 204};
177#endif 205#endif
206#endif
178 207
179#if defined(CONFIG_BFIN_SIR) || defined(CONFIG_BFIN_SIR_MODULE) 208#if defined(CONFIG_BFIN_SIR) || defined(CONFIG_BFIN_SIR_MODULE)
180#ifdef CONFIG_BFIN_SIR0 209#ifdef CONFIG_BFIN_SIR0
@@ -245,8 +274,8 @@ static struct platform_device ezkit_flash_device = {
245}; 274};
246#endif 275#endif
247 276
248#if defined(CONFIG_SND_BLACKFIN_AD1836) \ 277#if defined(CONFIG_SND_BLACKFIN_AD183X) \
249 || defined(CONFIG_SND_BLACKFIN_AD1836_MODULE) 278 || defined(CONFIG_SND_BLACKFIN_AD183X_MODULE)
250static struct bfin5xx_spi_chip ad1836_spi_chip_info = { 279static struct bfin5xx_spi_chip ad1836_spi_chip_info = {
251 .enable_dma = 0, 280 .enable_dma = 0,
252 .bits_per_word = 16, 281 .bits_per_word = 16,
@@ -299,8 +328,8 @@ static struct platform_device bfin_spi0_device = {
299#endif 328#endif
300 329
301static struct spi_board_info bfin_spi_board_info[] __initdata = { 330static struct spi_board_info bfin_spi_board_info[] __initdata = {
302#if defined(CONFIG_SND_BLACKFIN_AD1836) \ 331#if defined(CONFIG_SND_BLACKFIN_AD183X) \
303 || defined(CONFIG_SND_BLACKFIN_AD1836_MODULE) 332 || defined(CONFIG_SND_BLACKFIN_AD183X_MODULE)
304 { 333 {
305 .modalias = "ad1836", 334 .modalias = "ad1836",
306 .max_speed_hz = 3125000, /* max spi clock (SCK) speed in HZ */ 335 .max_speed_hz = 3125000, /* max spi clock (SCK) speed in HZ */
@@ -412,7 +441,9 @@ static struct platform_device *ezkit_devices[] __initdata = {
412#endif 441#endif
413 442
414#if defined(CONFIG_SERIAL_BFIN) || defined(CONFIG_SERIAL_BFIN_MODULE) 443#if defined(CONFIG_SERIAL_BFIN) || defined(CONFIG_SERIAL_BFIN_MODULE)
415 &bfin_uart_device, 444#ifdef CONFIG_SERIAL_BFIN_UART0
445 &bfin_uart0_device,
446#endif
416#endif 447#endif
417 448
418#if defined(CONFIG_BFIN_SIR) || defined(CONFIG_BFIN_SIR_MODULE) 449#if defined(CONFIG_BFIN_SIR) || defined(CONFIG_BFIN_SIR_MODULE)
@@ -458,3 +489,18 @@ static int __init ezkit_init(void)
458} 489}
459 490
460arch_initcall(ezkit_init); 491arch_initcall(ezkit_init);
492
493static struct platform_device *ezkit_early_devices[] __initdata = {
494#if defined(CONFIG_SERIAL_BFIN_CONSOLE) || defined(CONFIG_EARLY_PRINTK)
495#ifdef CONFIG_SERIAL_BFIN_UART0
496 &bfin_uart0_device,
497#endif
498#endif
499};
500
501void __init native_machine_early_platform_add_devices(void)
502{
503 printk(KERN_INFO "register early platform devices\n");
504 early_platform_add_devices(ezkit_early_devices,
505 ARRAY_SIZE(ezkit_early_devices));
506}
diff --git a/arch/blackfin/mach-bf561/boards/tepla.c b/arch/blackfin/mach-bf561/boards/tepla.c
index 8ba7252455e1..d3017e53686b 100644
--- a/arch/blackfin/mach-bf561/boards/tepla.c
+++ b/arch/blackfin/mach-bf561/boards/tepla.c
@@ -42,6 +42,52 @@ static struct platform_device smc91x_device = {
42 .resource = smc91x_resources, 42 .resource = smc91x_resources,
43}; 43};
44 44
45#if defined(CONFIG_SERIAL_BFIN) || defined(CONFIG_SERIAL_BFIN_MODULE)
46#ifdef CONFIG_SERIAL_BFIN_UART0
47static struct resource bfin_uart0_resources[] = {
48 {
49 .start = BFIN_UART_THR,
50 .end = BFIN_UART_GCTL+2,
51 .flags = IORESOURCE_MEM,
52 },
53 {
54 .start = IRQ_UART_RX,
55 .end = IRQ_UART_RX+1,
56 .flags = IORESOURCE_IRQ,
57 },
58 {
59 .start = IRQ_UART_ERROR,
60 .end = IRQ_UART_ERROR,
61 .flags = IORESOURCE_IRQ,
62 },
63 {
64 .start = CH_UART_TX,
65 .end = CH_UART_TX,
66 .flags = IORESOURCE_DMA,
67 },
68 {
69 .start = CH_UART_RX,
70 .end = CH_UART_RX,
71 .flags = IORESOURCE_DMA,
72 },
73};
74
75unsigned short bfin_uart0_peripherals[] = {
76 P_UART0_TX, P_UART0_RX, 0
77};
78
79static struct platform_device bfin_uart0_device = {
80 .name = "bfin-uart",
81 .id = 0,
82 .num_resources = ARRAY_SIZE(bfin_uart0_resources),
83 .resource = bfin_uart0_resources,
84 .dev = {
85 .platform_data = &bfin_uart0_peripherals, /* Passed to driver */
86 },
87};
88#endif
89#endif
90
45#if defined(CONFIG_BFIN_SIR) || defined(CONFIG_BFIN_SIR_MODULE) 91#if defined(CONFIG_BFIN_SIR) || defined(CONFIG_BFIN_SIR_MODULE)
46#ifdef CONFIG_BFIN_SIR0 92#ifdef CONFIG_BFIN_SIR0
47static struct resource bfin_sir0_resources[] = { 93static struct resource bfin_sir0_resources[] = {
@@ -73,6 +119,13 @@ static struct platform_device bfin_sir0_device = {
73 119
74static struct platform_device *tepla_devices[] __initdata = { 120static struct platform_device *tepla_devices[] __initdata = {
75 &smc91x_device, 121 &smc91x_device,
122
123#if defined(CONFIG_SERIAL_BFIN) || defined(CONFIG_SERIAL_BFIN_MODULE)
124#ifdef CONFIG_SERIAL_BFIN_UART0
125 &bfin_uart0_device,
126#endif
127#endif
128
76#if defined(CONFIG_BFIN_SIR) || defined(CONFIG_BFIN_SIR_MODULE) 129#if defined(CONFIG_BFIN_SIR) || defined(CONFIG_BFIN_SIR_MODULE)
77#ifdef CONFIG_BFIN_SIR0 130#ifdef CONFIG_BFIN_SIR0
78 &bfin_sir0_device, 131 &bfin_sir0_device,
@@ -87,3 +140,18 @@ static int __init tepla_init(void)
87} 140}
88 141
89arch_initcall(tepla_init); 142arch_initcall(tepla_init);
143
144static struct platform_device *tepla_early_devices[] __initdata = {
145#if defined(CONFIG_SERIAL_BFIN_CONSOLE) || defined(CONFIG_EARLY_PRINTK)
146#ifdef CONFIG_SERIAL_BFIN_UART0
147 &bfin_uart0_device,
148#endif
149#endif
150};
151
152void __init native_machine_early_platform_add_devices(void)
153{
154 printk(KERN_INFO "register early platform devices\n");
155 early_platform_add_devices(tepla_early_devices,
156 ARRAY_SIZE(tepla_early_devices));
157}
diff --git a/arch/blackfin/mach-bf561/hotplug.c b/arch/blackfin/mach-bf561/hotplug.c
new file mode 100644
index 000000000000..c95169b612dc
--- /dev/null
+++ b/arch/blackfin/mach-bf561/hotplug.c
@@ -0,0 +1,32 @@
1/*
2 * Copyright 2007-2009 Analog Devices Inc.
3 * Graff Yang <graf.yang@analog.com>
4 *
5 * Licensed under the GPL-2 or later.
6 */
7
8#include <asm/blackfin.h>
9#include <asm/smp.h>
10#define SIC_SYSIRQ(irq) (irq - (IRQ_CORETMR + 1))
11
12int hotplug_coreb;
13
14void platform_cpu_die(void)
15{
16 unsigned long iwr[2] = {0, 0};
17 unsigned long bank = SIC_SYSIRQ(IRQ_SUPPLE_0) / 32;
18 unsigned long bit = 1 << (SIC_SYSIRQ(IRQ_SUPPLE_0) % 32);
19
20 hotplug_coreb = 1;
21
22 iwr[bank] = bit;
23
24 /* disable core timer */
25 bfin_write_TCNTL(0);
26
27 /* clear ipi interrupt IRQ_SUPPLE_0 */
28 bfin_write_SICB_SYSCR(bfin_read_SICB_SYSCR() | (1 << (10 + 1)));
29 SSYNC();
30
31 coreb_sleep(iwr[0], iwr[1], 0);
32}
diff --git a/arch/blackfin/mach-bf561/include/mach/irq.h b/arch/blackfin/mach-bf561/include/mach/irq.h
index 7b208db267bf..c95566ade51b 100644
--- a/arch/blackfin/mach-bf561/include/mach/irq.h
+++ b/arch/blackfin/mach-bf561/include/mach/irq.h
@@ -265,7 +265,8 @@
265 265
266#define GPIO_IRQ_BASE IRQ_PF0 266#define GPIO_IRQ_BASE IRQ_PF0
267 267
268#define NR_IRQS (IRQ_PF47 + 1) 268#define NR_MACH_IRQS (IRQ_PF47 + 1)
269#define NR_IRQS (NR_MACH_IRQS + NR_SPARE_IRQS)
269 270
270#define IVG7 7 271#define IVG7 7
271#define IVG8 8 272#define IVG8 8
diff --git a/arch/blackfin/mach-bf561/include/mach/smp.h b/arch/blackfin/mach-bf561/include/mach/smp.h
index 390c7f4ae7b3..2c8c514dd386 100644
--- a/arch/blackfin/mach-bf561/include/mach/smp.h
+++ b/arch/blackfin/mach-bf561/include/mach/smp.h
@@ -25,4 +25,6 @@ void platform_send_ipi_cpu(unsigned int cpu);
25 25
26void platform_clear_ipi(unsigned int cpu); 26void platform_clear_ipi(unsigned int cpu);
27 27
28void bfin_local_timer_setup(void);
29
28#endif /* !_MACH_BF561_SMP */ 30#endif /* !_MACH_BF561_SMP */
diff --git a/arch/blackfin/mach-bf561/secondary.S b/arch/blackfin/mach-bf561/secondary.S
index 8e6050369c06..4624eebbf9c4 100644
--- a/arch/blackfin/mach-bf561/secondary.S
+++ b/arch/blackfin/mach-bf561/secondary.S
@@ -11,6 +11,7 @@
11#include <linux/init.h> 11#include <linux/init.h>
12#include <asm/blackfin.h> 12#include <asm/blackfin.h>
13#include <asm/asm-offsets.h> 13#include <asm/asm-offsets.h>
14#include <asm/trace.h>
14 15
15__INIT 16__INIT
16 17
@@ -62,6 +63,8 @@ ENTRY(_coreb_trampoline_start)
62 M2 = r0; 63 M2 = r0;
63 M3 = r0; 64 M3 = r0;
64 65
66 trace_buffer_init(p0,r0);
67
65 /* Turn off the icache */ 68 /* Turn off the icache */
66 p0.l = LO(IMEM_CONTROL); 69 p0.l = LO(IMEM_CONTROL);
67 p0.h = HI(IMEM_CONTROL); 70 p0.h = HI(IMEM_CONTROL);
@@ -159,6 +162,41 @@ ENTRY(_coreb_trampoline_start)
159ENDPROC(_coreb_trampoline_start) 162ENDPROC(_coreb_trampoline_start)
160ENTRY(_coreb_trampoline_end) 163ENTRY(_coreb_trampoline_end)
161 164
165.section ".text"
166ENTRY(_set_sicb_iwr)
167 P0.H = hi(SICB_IWR0);
168 P0.L = lo(SICB_IWR0);
169 P1.H = hi(SICB_IWR1);
170 P1.L = lo(SICB_IWR1);
171 [P0] = R0;
172 [P1] = R1;
173 SSYNC;
174 RTS;
175ENDPROC(_set_sicb_iwr)
176
177ENTRY(_coreb_sleep)
178 sp.l = lo(INITIAL_STACK);
179 sp.h = hi(INITIAL_STACK);
180 fp = sp;
181 usp = sp;
182
183 call _set_sicb_iwr;
184
185 CLI R2;
186 SSYNC;
187 IDLE;
188 STI R2;
189
190 R0 = IWR_DISABLE_ALL;
191 R1 = IWR_DISABLE_ALL;
192 call _set_sicb_iwr;
193
194 p0.h = hi(COREB_L1_CODE_START);
195 p0.l = lo(COREB_L1_CODE_START);
196 jump (p0);
197ENDPROC(_coreb_sleep)
198
199__CPUINIT
162ENTRY(_coreb_start) 200ENTRY(_coreb_start)
163 [--sp] = reti; 201 [--sp] = reti;
164 202
@@ -176,12 +214,20 @@ ENTRY(_coreb_start)
176 sp = [p0]; 214 sp = [p0];
177 usp = sp; 215 usp = sp;
178 fp = sp; 216 fp = sp;
217#ifdef CONFIG_HOTPLUG_CPU
218 p0.l = _hotplug_coreb;
219 p0.h = _hotplug_coreb;
220 r0 = [p0];
221 cc = BITTST(r0, 0);
222 if cc jump 3f;
223#endif
179 sp += -12; 224 sp += -12;
180 call _init_pda 225 call _init_pda
181 sp += 12; 226 sp += 12;
227#ifdef CONFIG_HOTPLUG_CPU
2283:
229#endif
182 call _secondary_start_kernel; 230 call _secondary_start_kernel;
183.L_exit: 231.L_exit:
184 jump.s .L_exit; 232 jump.s .L_exit;
185ENDPROC(_coreb_start) 233ENDPROC(_coreb_start)
186
187__FINIT
diff --git a/arch/blackfin/mach-bf561/smp.c b/arch/blackfin/mach-bf561/smp.c
index 0192532e96a2..3b9a4bf7dacc 100644
--- a/arch/blackfin/mach-bf561/smp.c
+++ b/arch/blackfin/mach-bf561/smp.c
@@ -11,11 +11,10 @@
11#include <linux/delay.h> 11#include <linux/delay.h>
12#include <asm/smp.h> 12#include <asm/smp.h>
13#include <asm/dma.h> 13#include <asm/dma.h>
14#include <asm/time.h>
14 15
15static DEFINE_SPINLOCK(boot_lock); 16static DEFINE_SPINLOCK(boot_lock);
16 17
17static cpumask_t cpu_callin_map;
18
19/* 18/*
20 * platform_init_cpus() - Tell the world about how many cores we 19 * platform_init_cpus() - Tell the world about how many cores we
21 * have. This is called while setting up the architecture support 20 * have. This is called while setting up the architecture support
@@ -66,13 +65,15 @@ void __cpuinit platform_secondary_init(unsigned int cpu)
66 bfin_write_SICB_IAR5(bfin_read_SICA_IAR5()); 65 bfin_write_SICB_IAR5(bfin_read_SICA_IAR5());
67 bfin_write_SICB_IAR6(bfin_read_SICA_IAR6()); 66 bfin_write_SICB_IAR6(bfin_read_SICA_IAR6());
68 bfin_write_SICB_IAR7(bfin_read_SICA_IAR7()); 67 bfin_write_SICB_IAR7(bfin_read_SICA_IAR7());
68 bfin_write_SICB_IWR0(IWR_DISABLE_ALL);
69 bfin_write_SICB_IWR1(IWR_DISABLE_ALL);
69 SSYNC(); 70 SSYNC();
70 71
71 /* Store CPU-private information to the cpu_data array. */ 72 /* Store CPU-private information to the cpu_data array. */
72 bfin_setup_cpudata(cpu); 73 bfin_setup_cpudata(cpu);
73 74
74 /* We are done with local CPU inits, unblock the boot CPU. */ 75 /* We are done with local CPU inits, unblock the boot CPU. */
75 cpu_set(cpu, cpu_callin_map); 76 set_cpu_online(cpu, true);
76 spin_lock(&boot_lock); 77 spin_lock(&boot_lock);
77 spin_unlock(&boot_lock); 78 spin_unlock(&boot_lock);
78} 79}
@@ -81,28 +82,28 @@ int __cpuinit platform_boot_secondary(unsigned int cpu, struct task_struct *idle
81{ 82{
82 unsigned long timeout; 83 unsigned long timeout;
83 84
84 /* CoreB already running?! */
85 BUG_ON((bfin_read_SICA_SYSCR() & COREB_SRAM_INIT) == 0);
86
87 printk(KERN_INFO "Booting Core B.\n"); 85 printk(KERN_INFO "Booting Core B.\n");
88 86
89 spin_lock(&boot_lock); 87 spin_lock(&boot_lock);
90 88
91 /* Kick CoreB, which should start execution from CORE_SRAM_BASE. */ 89 if ((bfin_read_SICA_SYSCR() & COREB_SRAM_INIT) == 0) {
92 SSYNC(); 90 /* CoreB already running, sending ipi to wakeup it */
93 bfin_write_SICA_SYSCR(bfin_read_SICA_SYSCR() & ~COREB_SRAM_INIT); 91 platform_send_ipi_cpu(cpu, IRQ_SUPPLE_0);
94 SSYNC(); 92 } else {
93 /* Kick CoreB, which should start execution from CORE_SRAM_BASE. */
94 bfin_write_SICA_SYSCR(bfin_read_SICA_SYSCR() & ~COREB_SRAM_INIT);
95 SSYNC();
96 }
95 97
96 timeout = jiffies + 1 * HZ; 98 timeout = jiffies + 1 * HZ;
97 while (time_before(jiffies, timeout)) { 99 while (time_before(jiffies, timeout)) {
98 if (cpu_isset(cpu, cpu_callin_map)) 100 if (cpu_online(cpu))
99 break; 101 break;
100 udelay(100); 102 udelay(100);
101 barrier(); 103 barrier();
102 } 104 }
103 105
104 if (cpu_isset(cpu, cpu_callin_map)) { 106 if (cpu_online(cpu)) {
105 cpu_set(cpu, cpu_online_map);
106 /* release the lock and let coreb run */ 107 /* release the lock and let coreb run */
107 spin_unlock(&boot_lock); 108 spin_unlock(&boot_lock);
108 return 0; 109 return 0;
@@ -147,3 +148,20 @@ void platform_clear_ipi(unsigned int cpu)
147 bfin_write_SICB_SYSCR(bfin_read_SICB_SYSCR() | (1 << (10 + cpu))); 148 bfin_write_SICB_SYSCR(bfin_read_SICB_SYSCR() | (1 << (10 + cpu)));
148 SSYNC(); 149 SSYNC();
149} 150}
151
152/*
153 * Setup core B's local core timer.
154 * In SMP, core timer is used for clock event device.
155 */
156void __cpuinit bfin_local_timer_setup(void)
157{
158#if defined(CONFIG_TICKSOURCE_CORETMR)
159 bfin_coretmr_init();
160 bfin_coretmr_clockevent_init();
161 get_irq_chip(IRQ_CORETMR)->unmask(IRQ_CORETMR);
162#else
163 /* Power down the core timer, just to play safe. */
164 bfin_write_TCNTL(0);
165#endif
166
167}
diff --git a/arch/blackfin/mach-common/cpufreq.c b/arch/blackfin/mach-common/cpufreq.c
index 777582897253..4391d03dc845 100644
--- a/arch/blackfin/mach-common/cpufreq.c
+++ b/arch/blackfin/mach-common/cpufreq.c
@@ -11,10 +11,13 @@
11#include <linux/init.h> 11#include <linux/init.h>
12#include <linux/cpufreq.h> 12#include <linux/cpufreq.h>
13#include <linux/fs.h> 13#include <linux/fs.h>
14#include <linux/delay.h>
14#include <asm/blackfin.h> 15#include <asm/blackfin.h>
15#include <asm/time.h> 16#include <asm/time.h>
16#include <asm/dpmc.h> 17#include <asm/dpmc.h>
17 18
19#define CPUFREQ_CPU 0
20
18/* this is the table of CCLK frequencies, in Hz */ 21/* this is the table of CCLK frequencies, in Hz */
19/* .index is the entry in the auxillary dpm_state_table[] */ 22/* .index is the entry in the auxillary dpm_state_table[] */
20static struct cpufreq_frequency_table bfin_freq_table[] = { 23static struct cpufreq_frequency_table bfin_freq_table[] = {
@@ -41,64 +44,124 @@ static struct bfin_dpm_state {
41 unsigned int tscale; /* change the divider on the core timer interrupt */ 44 unsigned int tscale; /* change the divider on the core timer interrupt */
42} dpm_state_table[3]; 45} dpm_state_table[3];
43 46
47#if defined(CONFIG_CYCLES_CLOCKSOURCE)
44/* 48/*
45 normalized to maximum frequncy offset for CYCLES, 49 * normalized to maximum frequncy offset for CYCLES,
46 used in time-ts cycles clock source, but could be used 50 * used in time-ts cycles clock source, but could be used
47 somewhere also. 51 * somewhere also.
48 */ 52 */
49unsigned long long __bfin_cycles_off; 53unsigned long long __bfin_cycles_off;
50unsigned int __bfin_cycles_mod; 54unsigned int __bfin_cycles_mod;
55#endif
51 56
52/**************************************************************************/ 57/**************************************************************************/
58static void __init bfin_init_tables(unsigned long cclk, unsigned long sclk)
59{
53 60
54static unsigned int bfin_getfreq_khz(unsigned int cpu) 61 unsigned long csel, min_cclk;
62 int index;
63
64 /* Anomaly 273 seems to still exist on non-BF54x w/dcache turned on */
65#if ANOMALY_05000273 || ANOMALY_05000274 || \
66 (!defined(CONFIG_BF54x) && defined(CONFIG_BFIN_EXTMEM_DCACHEABLE))
67 min_cclk = sclk * 2;
68#else
69 min_cclk = sclk;
70#endif
71 csel = ((bfin_read_PLL_DIV() & CSEL) >> 4);
72
73 for (index = 0; (cclk >> index) >= min_cclk && csel <= 3; index++, csel++) {
74 bfin_freq_table[index].frequency = cclk >> index;
75 dpm_state_table[index].csel = csel << 4; /* Shift now into PLL_DIV bitpos */
76 dpm_state_table[index].tscale = (TIME_SCALE / (1 << csel)) - 1;
77
78 pr_debug("cpufreq: freq:%d csel:0x%x tscale:%d\n",
79 bfin_freq_table[index].frequency,
80 dpm_state_table[index].csel,
81 dpm_state_table[index].tscale);
82 }
83 return;
84}
85
86static void bfin_adjust_core_timer(void *info)
55{ 87{
56 /* The driver only support single cpu */ 88 unsigned int tscale;
57 if (cpu != 0) 89 unsigned int index = *(unsigned int *)info;
58 return -1;
59 90
60 return get_cclk() / 1000; 91 /* we have to adjust the core timer, because it is using cclk */
92 tscale = dpm_state_table[index].tscale;
93 bfin_write_TSCALE(tscale);
94 return;
61} 95}
62 96
97static unsigned int bfin_getfreq_khz(unsigned int cpu)
98{
99 /* Both CoreA/B have the same core clock */
100 return get_cclk() / 1000;
101}
63 102
64static int bfin_target(struct cpufreq_policy *policy, 103static int bfin_target(struct cpufreq_policy *poli,
65 unsigned int target_freq, unsigned int relation) 104 unsigned int target_freq, unsigned int relation)
66{ 105{
67 unsigned int index, plldiv, tscale; 106 unsigned int index, plldiv, cpu;
68 unsigned long flags, cclk_hz; 107 unsigned long flags, cclk_hz;
69 struct cpufreq_freqs freqs; 108 struct cpufreq_freqs freqs;
109 static unsigned long lpj_ref;
110 static unsigned int lpj_ref_freq;
111
112#if defined(CONFIG_CYCLES_CLOCKSOURCE)
70 cycles_t cycles; 113 cycles_t cycles;
114#endif
71 115
72 if (cpufreq_frequency_table_target(policy, bfin_freq_table, 116 for_each_online_cpu(cpu) {
73 target_freq, relation, &index)) 117 struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
74 return -EINVAL; 118
75 119 if (!policy)
76 cclk_hz = bfin_freq_table[index].frequency; 120 continue;
77 121
78 freqs.old = bfin_getfreq_khz(0); 122 if (cpufreq_frequency_table_target(policy, bfin_freq_table,
79 freqs.new = cclk_hz; 123 target_freq, relation, &index))
80 freqs.cpu = 0; 124 return -EINVAL;
81 125
82 pr_debug("cpufreq: changing cclk to %lu; target = %u, oldfreq = %u\n", 126 cclk_hz = bfin_freq_table[index].frequency;
83 cclk_hz, target_freq, freqs.old); 127
84 128 freqs.old = bfin_getfreq_khz(0);
85 cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE); 129 freqs.new = cclk_hz;
86 local_irq_save_hw(flags); 130 freqs.cpu = cpu;
87 plldiv = (bfin_read_PLL_DIV() & SSEL) | dpm_state_table[index].csel; 131
88 tscale = dpm_state_table[index].tscale; 132 pr_debug("cpufreq: changing cclk to %lu; target = %u, oldfreq = %u\n",
89 bfin_write_PLL_DIV(plldiv); 133 cclk_hz, target_freq, freqs.old);
90 /* we have to adjust the core timer, because it is using cclk */ 134
91 bfin_write_TSCALE(tscale); 135 cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE);
92 cycles = get_cycles(); 136 if (cpu == CPUFREQ_CPU) {
93 SSYNC(); 137 local_irq_save_hw(flags);
94 cycles += 10; /* ~10 cycles we lose after get_cycles() */ 138 plldiv = (bfin_read_PLL_DIV() & SSEL) |
95 __bfin_cycles_off += (cycles << __bfin_cycles_mod) - (cycles << index); 139 dpm_state_table[index].csel;
96 __bfin_cycles_mod = index; 140 bfin_write_PLL_DIV(plldiv);
97 local_irq_restore_hw(flags); 141 on_each_cpu(bfin_adjust_core_timer, &index, 1);
98 /* TODO: just test case for cycles clock source, remove later */ 142#if defined(CONFIG_CYCLES_CLOCKSOURCE)
99 pr_debug("cpufreq: done\n"); 143 cycles = get_cycles();
100 cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE); 144 SSYNC();
145 cycles += 10; /* ~10 cycles we lose after get_cycles() */
146 __bfin_cycles_off +=
147 (cycles << __bfin_cycles_mod) - (cycles << index);
148 __bfin_cycles_mod = index;
149#endif
150 if (!lpj_ref_freq) {
151 lpj_ref = loops_per_jiffy;
152 lpj_ref_freq = freqs.old;
153 }
154 if (freqs.new != freqs.old) {
155 loops_per_jiffy = cpufreq_scale(lpj_ref,
156 lpj_ref_freq, freqs.new);
157 }
158 local_irq_restore_hw(flags);
159 }
160 /* TODO: just test case for cycles clock source, remove later */
161 cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE);
162 }
101 163
164 pr_debug("cpufreq: done\n");
102 return 0; 165 return 0;
103} 166}
104 167
@@ -110,37 +173,16 @@ static int bfin_verify_speed(struct cpufreq_policy *policy)
110static int __init __bfin_cpu_init(struct cpufreq_policy *policy) 173static int __init __bfin_cpu_init(struct cpufreq_policy *policy)
111{ 174{
112 175
113 unsigned long cclk, sclk, csel, min_cclk; 176 unsigned long cclk, sclk;
114 int index;
115
116 if (policy->cpu != 0)
117 return -EINVAL;
118 177
119 cclk = get_cclk() / 1000; 178 cclk = get_cclk() / 1000;
120 sclk = get_sclk() / 1000; 179 sclk = get_sclk() / 1000;
121 180
122#if ANOMALY_05000273 || ANOMALY_05000274 || \ 181 if (policy->cpu == CPUFREQ_CPU)
123 (!defined(CONFIG_BF54x) && defined(CONFIG_BFIN_EXTMEM_DCACHEABLE)) 182 bfin_init_tables(cclk, sclk);
124 min_cclk = sclk * 2;
125#else
126 min_cclk = sclk;
127#endif
128 csel = ((bfin_read_PLL_DIV() & CSEL) >> 4);
129
130 for (index = 0; (cclk >> index) >= min_cclk && csel <= 3; index++, csel++) {
131 bfin_freq_table[index].frequency = cclk >> index;
132 dpm_state_table[index].csel = csel << 4; /* Shift now into PLL_DIV bitpos */
133 dpm_state_table[index].tscale = (TIME_SCALE / (1 << csel)) - 1;
134
135 pr_debug("cpufreq: freq:%d csel:0x%x tscale:%d\n",
136 bfin_freq_table[index].frequency,
137 dpm_state_table[index].csel,
138 dpm_state_table[index].tscale);
139 }
140 183
141 policy->cpuinfo.transition_latency = 50000; /* 50us assumed */ 184 policy->cpuinfo.transition_latency = 50000; /* 50us assumed */
142 185
143 /*Now ,only support one cpu */
144 policy->cur = cclk; 186 policy->cur = cclk;
145 cpufreq_frequency_table_get_attr(bfin_freq_table, policy->cpu); 187 cpufreq_frequency_table_get_attr(bfin_freq_table, policy->cpu);
146 return cpufreq_frequency_table_cpuinfo(policy, bfin_freq_table); 188 return cpufreq_frequency_table_cpuinfo(policy, bfin_freq_table);
diff --git a/arch/blackfin/mach-common/entry.S b/arch/blackfin/mach-common/entry.S
index 01b2f58dfb95..a5847f5d67c7 100644
--- a/arch/blackfin/mach-common/entry.S
+++ b/arch/blackfin/mach-common/entry.S
@@ -405,7 +405,7 @@ ENTRY(_double_fault)
405 405
406 r0 = sp; /* stack frame pt_regs pointer argument ==> r0 */ 406 r0 = sp; /* stack frame pt_regs pointer argument ==> r0 */
407 SP += -12; 407 SP += -12;
408 call _double_fault_c; 408 pseudo_long_call _double_fault_c, p5;
409 SP += 12; 409 SP += 12;
410.L_double_fault_panic: 410.L_double_fault_panic:
411 JUMP .L_double_fault_panic 411 JUMP .L_double_fault_panic
@@ -447,7 +447,7 @@ ENTRY(_exception_to_level5)
447 447
448 r0 = sp; /* stack frame pt_regs pointer argument ==> r0 */ 448 r0 = sp; /* stack frame pt_regs pointer argument ==> r0 */
449 SP += -12; 449 SP += -12;
450 call _trap_c; 450 pseudo_long_call _trap_c, p4;
451 SP += 12; 451 SP += 12;
452 452
453 /* If interrupts were off during the exception (IPEND[4] = 1), turn them off 453 /* If interrupts were off during the exception (IPEND[4] = 1), turn them off
@@ -482,6 +482,8 @@ ENTRY(_trap) /* Exception: 4th entry into system event table(supervisor mode)*/
482 [--sp] = ASTAT; 482 [--sp] = ASTAT;
483 [--sp] = (R7:6,P5:4); 483 [--sp] = (R7:6,P5:4);
484 484
485 ANOMALY_283_315_WORKAROUND(p5, r7)
486
485#ifdef CONFIG_EXACT_HWERR 487#ifdef CONFIG_EXACT_HWERR
486 /* Make sure all pending read/writes complete. This will ensure any 488 /* Make sure all pending read/writes complete. This will ensure any
487 * accesses which could cause hardware errors completes, and signal 489 * accesses which could cause hardware errors completes, and signal
@@ -492,8 +494,6 @@ ENTRY(_trap) /* Exception: 4th entry into system event table(supervisor mode)*/
492 ssync; 494 ssync;
493#endif 495#endif
494 496
495 ANOMALY_283_315_WORKAROUND(p5, r7)
496
497#ifdef CONFIG_DEBUG_DOUBLEFAULT 497#ifdef CONFIG_DEBUG_DOUBLEFAULT
498 /* 498 /*
499 * Save these registers, as they are only valid in exception context 499 * Save these registers, as they are only valid in exception context
@@ -551,7 +551,7 @@ ENTRY(_kernel_execve)
551 p0 = sp; 551 p0 = sp;
552 sp += -16; 552 sp += -16;
553 [sp + 12] = p0; 553 [sp + 12] = p0;
554 call _do_execve; 554 pseudo_long_call _do_execve, p5;
555 SP += 16; 555 SP += 16;
556 cc = r0 == 0; 556 cc = r0 == 0;
557 if ! cc jump .Lexecve_failed; 557 if ! cc jump .Lexecve_failed;
@@ -626,13 +626,6 @@ ENTRY(_system_call)
626 p0 = [sp + PT_ORIG_P0]; 626 p0 = [sp + PT_ORIG_P0];
627#endif /* CONFIG_IPIPE */ 627#endif /* CONFIG_IPIPE */
628 628
629 /* Check the System Call */
630 r7 = __NR_syscall;
631 /* System call number is passed in P0 */
632 r6 = p0;
633 cc = r6 < r7;
634 if ! cc jump .Lbadsys;
635
636 /* are we tracing syscalls?*/ 629 /* are we tracing syscalls?*/
637 r7 = sp; 630 r7 = sp;
638 r6.l = lo(ALIGN_PAGE_MASK); 631 r6.l = lo(ALIGN_PAGE_MASK);
@@ -642,6 +635,14 @@ ENTRY(_system_call)
642 r7 = [p2+TI_FLAGS]; 635 r7 = [p2+TI_FLAGS];
643 CC = BITTST(r7,TIF_SYSCALL_TRACE); 636 CC = BITTST(r7,TIF_SYSCALL_TRACE);
644 if CC JUMP _sys_trace; 637 if CC JUMP _sys_trace;
638 CC = BITTST(r7,TIF_SINGLESTEP);
639 if CC JUMP _sys_trace;
640
641 /* Make sure the system call # is valid */
642 p4 = __NR_syscall;
643 /* System call number is passed in P0 */
644 cc = p4 <= p0;
645 if cc jump .Lbadsys;
645 646
646 /* Execute the appropriate system call */ 647 /* Execute the appropriate system call */
647 648
@@ -704,7 +705,7 @@ ENTRY(_system_call)
704 sp += 4; 705 sp += 4;
705 706
706 SP += -12; 707 SP += -12;
707 call _schedule; 708 pseudo_long_call _schedule, p4;
708 SP += 12; 709 SP += 12;
709 710
710 jump .Lresume_userspace_1; 711 jump .Lresume_userspace_1;
@@ -723,7 +724,7 @@ ENTRY(_system_call)
723 724
724 r0 = sp; 725 r0 = sp;
725 SP += -12; 726 SP += -12;
726 call _do_notify_resume; 727 pseudo_long_call _do_notify_resume, p5;
727 SP += 12; 728 SP += 12;
728 729
729.Lsyscall_really_exit: 730.Lsyscall_really_exit:
@@ -736,11 +737,17 @@ ENDPROC(_system_call)
736 * this symbol need not be global anyways, so ... 737 * this symbol need not be global anyways, so ...
737 */ 738 */
738_sys_trace: 739_sys_trace:
739 call _syscall_trace; 740 r0 = sp;
740 741 pseudo_long_call _syscall_trace_enter, p5;
741 /* Execute the appropriate system call */
742 742
743 /* Make sure the system call # is valid */
743 p4 = [SP + PT_P0]; 744 p4 = [SP + PT_P0];
745 p3 = __NR_syscall;
746 cc = p3 <= p4;
747 r0 = -ENOSYS;
748 if cc jump .Lsys_trace_badsys;
749
750 /* Execute the appropriate system call */
744 p5.l = _sys_call_table; 751 p5.l = _sys_call_table;
745 p5.h = _sys_call_table; 752 p5.h = _sys_call_table;
746 p5 = p5 + (p4 << 2); 753 p5 = p5 + (p4 << 2);
@@ -758,9 +765,11 @@ _sys_trace:
758 SP += -12; 765 SP += -12;
759 call (p5); 766 call (p5);
760 SP += 24; 767 SP += 24;
768.Lsys_trace_badsys:
761 [sp + PT_R0] = r0; 769 [sp + PT_R0] = r0;
762 770
763 call _syscall_trace; 771 r0 = sp;
772 pseudo_long_call _syscall_trace_leave, p5;
764 jump .Lresume_userspace; 773 jump .Lresume_userspace;
765ENDPROC(_sys_trace) 774ENDPROC(_sys_trace)
766 775
@@ -966,6 +975,13 @@ ENTRY(_evt_evt14)
966#else 975#else
967 cli r0; 976 cli r0;
968#endif 977#endif
978#ifdef CONFIG_TRACE_IRQFLAGS
979 [--sp] = rets;
980 sp += -12;
981 call _trace_hardirqs_off;
982 sp += 12;
983 rets = [sp++];
984#endif
969 [--sp] = RETI; 985 [--sp] = RETI;
970 SP += 4; 986 SP += 4;
971 rts; 987 rts;
@@ -989,6 +1005,14 @@ ENTRY(_schedule_and_signal_from_int)
989 p1 = rets; 1005 p1 = rets;
990 [sp + PT_RESERVED] = p1; 1006 [sp + PT_RESERVED] = p1;
991 1007
1008#ifdef CONFIG_TRACE_IRQFLAGS
1009 /* trace_hardirqs_on() checks if all irqs are disabled. But here IRQ 15
1010 * is turned on, so disable all irqs. */
1011 cli r0;
1012 sp += -12;
1013 call _trace_hardirqs_on;
1014 sp += 12;
1015#endif
992#ifdef CONFIG_SMP 1016#ifdef CONFIG_SMP
993 GET_PDA(p0, r0); /* Fetch current PDA (can't migrate to other CPU here) */ 1017 GET_PDA(p0, r0); /* Fetch current PDA (can't migrate to other CPU here) */
994 r0 = [p0 + PDA_IRQFLAGS]; 1018 r0 = [p0 + PDA_IRQFLAGS];
@@ -1007,7 +1031,8 @@ ENTRY(_schedule_and_signal_from_int)
1007 1031
1008 r0 = sp; 1032 r0 = sp;
1009 sp += -12; 1033 sp += -12;
1010 call _finish_atomic_sections; 1034
1035 pseudo_long_call _finish_atomic_sections, p5;
1011 sp += 12; 1036 sp += 12;
1012 jump.s .Lresume_userspace; 1037 jump.s .Lresume_userspace;
1013ENDPROC(_schedule_and_signal_from_int) 1038ENDPROC(_schedule_and_signal_from_int)
@@ -1357,7 +1382,7 @@ ENTRY(_sys_call_table)
1357 .long _sys_newuname 1382 .long _sys_newuname
1358 .long _sys_ni_syscall /* old sys_modify_ldt */ 1383 .long _sys_ni_syscall /* old sys_modify_ldt */
1359 .long _sys_adjtimex 1384 .long _sys_adjtimex
1360 .long _sys_ni_syscall /* 125 */ /* sys_mprotect */ 1385 .long _sys_mprotect /* 125 */
1361 .long _sys_ni_syscall /* old sys_sigprocmask */ 1386 .long _sys_ni_syscall /* old sys_sigprocmask */
1362 .long _sys_ni_syscall /* old "creat_module" */ 1387 .long _sys_ni_syscall /* old "creat_module" */
1363 .long _sys_init_module 1388 .long _sys_init_module
@@ -1376,16 +1401,16 @@ ENTRY(_sys_call_table)
1376 .long _sys_getdents 1401 .long _sys_getdents
1377 .long _sys_ni_syscall /* sys_select */ 1402 .long _sys_ni_syscall /* sys_select */
1378 .long _sys_flock 1403 .long _sys_flock
1379 .long _sys_ni_syscall /* sys_msync */ 1404 .long _sys_msync
1380 .long _sys_readv /* 145 */ 1405 .long _sys_readv /* 145 */
1381 .long _sys_writev 1406 .long _sys_writev
1382 .long _sys_getsid 1407 .long _sys_getsid
1383 .long _sys_fdatasync 1408 .long _sys_fdatasync
1384 .long _sys_sysctl 1409 .long _sys_sysctl
1385 .long _sys_ni_syscall /* 150 */ /* sys_mlock */ 1410 .long _sys_mlock /* 150 */
1386 .long _sys_ni_syscall /* sys_munlock */ 1411 .long _sys_munlock
1387 .long _sys_ni_syscall /* sys_mlockall */ 1412 .long _sys_mlockall
1388 .long _sys_ni_syscall /* sys_munlockall */ 1413 .long _sys_munlockall
1389 .long _sys_sched_setparam 1414 .long _sys_sched_setparam
1390 .long _sys_sched_getparam /* 155 */ 1415 .long _sys_sched_getparam /* 155 */
1391 .long _sys_sched_setscheduler 1416 .long _sys_sched_setscheduler
@@ -1450,8 +1475,8 @@ ENTRY(_sys_call_table)
1450 .long _sys_setfsuid /* 215 */ 1475 .long _sys_setfsuid /* 215 */
1451 .long _sys_setfsgid 1476 .long _sys_setfsgid
1452 .long _sys_pivot_root 1477 .long _sys_pivot_root
1453 .long _sys_ni_syscall /* sys_mincore */ 1478 .long _sys_mincore
1454 .long _sys_ni_syscall /* sys_madvise */ 1479 .long _sys_madvise
1455 .long _sys_getdents64 /* 220 */ 1480 .long _sys_getdents64 /* 220 */
1456 .long _sys_fcntl64 1481 .long _sys_fcntl64
1457 .long _sys_ni_syscall /* reserved for TUX */ 1482 .long _sys_ni_syscall /* reserved for TUX */
@@ -1507,7 +1532,7 @@ ENTRY(_sys_call_table)
1507 .long _sys_utimes 1532 .long _sys_utimes
1508 .long _sys_fadvise64_64 1533 .long _sys_fadvise64_64
1509 .long _sys_ni_syscall /* vserver */ 1534 .long _sys_ni_syscall /* vserver */
1510 .long _sys_ni_syscall /* 275, mbind */ 1535 .long _sys_mbind /* 275 */
1511 .long _sys_ni_syscall /* get_mempolicy */ 1536 .long _sys_ni_syscall /* get_mempolicy */
1512 .long _sys_ni_syscall /* set_mempolicy */ 1537 .long _sys_ni_syscall /* set_mempolicy */
1513 .long _sys_mq_open 1538 .long _sys_mq_open
diff --git a/arch/blackfin/mach-common/head.S b/arch/blackfin/mach-common/head.S
index cab0a0031eee..4391621d9048 100644
--- a/arch/blackfin/mach-common/head.S
+++ b/arch/blackfin/mach-common/head.S
@@ -144,8 +144,8 @@ ENTRY(__start)
144#endif 144#endif
145 145
146 /* Initialize stack pointer */ 146 /* Initialize stack pointer */
147 sp.l = _init_thread_union; 147 sp.l = _init_thread_union + THREAD_SIZE;
148 sp.h = _init_thread_union; 148 sp.h = _init_thread_union + THREAD_SIZE;
149 fp = sp; 149 fp = sp;
150 usp = sp; 150 usp = sp;
151 151
@@ -186,6 +186,11 @@ ENTRY(__start)
186 186
187 /* Put The Code for PLL Programming and SDRAM Programming in L1 ISRAM */ 187 /* Put The Code for PLL Programming and SDRAM Programming in L1 ISRAM */
188 call _bfin_relocate_l1_mem; 188 call _bfin_relocate_l1_mem;
189
190#ifdef CONFIG_ROMKERNEL
191 call _bfin_relocate_xip_data;
192#endif
193
189#ifdef CONFIG_BFIN_KERNEL_CLOCK 194#ifdef CONFIG_BFIN_KERNEL_CLOCK
190 /* Only use on-chip scratch space for stack when absolutely required 195 /* Only use on-chip scratch space for stack when absolutely required
191 * to avoid Anomaly 05000227 ... we know the init_clocks() func only 196 * to avoid Anomaly 05000227 ... we know the init_clocks() func only
@@ -257,12 +262,7 @@ ENTRY(_real_start)
257 R0 = R7; 262 R0 = R7;
258 call _cmdline_init; 263 call _cmdline_init;
259 264
260 /* Load the current thread pointer and stack */ 265 sp += -12 + 4; /* +4 is for reti loading above */
261 p1 = THREAD_SIZE + 4 (z); /* +4 is for reti loading */
262 sp = sp + p1;
263 usp = sp;
264 fp = sp;
265 sp += -12;
266 call _init_pda 266 call _init_pda
267 sp += 12; 267 sp += 12;
268 jump.l _start_kernel; 268 jump.l _start_kernel;
diff --git a/arch/blackfin/mach-common/interrupt.S b/arch/blackfin/mach-common/interrupt.S
index 8085ff1cce00..cee62cf4acd4 100644
--- a/arch/blackfin/mach-common/interrupt.S
+++ b/arch/blackfin/mach-common/interrupt.S
@@ -88,6 +88,13 @@ __common_int_entry:
88#else 88#else
89 cli r1; 89 cli r1;
90#endif 90#endif
91#ifdef CONFIG_TRACE_IRQFLAGS
92 [--sp] = r0;
93 sp += -12;
94 call _trace_hardirqs_off;
95 sp += 12;
96 r0 = [sp++];
97#endif
91 [--sp] = RETI; /* orig_pc */ 98 [--sp] = RETI; /* orig_pc */
92 /* Clear all L registers. */ 99 /* Clear all L registers. */
93 r1 = 0 (x); 100 r1 = 0 (x);
@@ -109,10 +116,10 @@ __common_int_entry:
109 cc = r0 == 0; 116 cc = r0 == 0;
110 if cc jump .Lcommon_restore_context; 117 if cc jump .Lcommon_restore_context;
111#else /* CONFIG_IPIPE */ 118#else /* CONFIG_IPIPE */
112 call _do_irq; 119 pseudo_long_call _do_irq, p2;
113 SP += 12; 120 SP += 12;
114#endif /* CONFIG_IPIPE */ 121#endif /* CONFIG_IPIPE */
115 call _return_from_int; 122 pseudo_long_call _return_from_int, p2;
116.Lcommon_restore_context: 123.Lcommon_restore_context:
117 RESTORE_CONTEXT 124 RESTORE_CONTEXT
118 rti; 125 rti;
@@ -168,7 +175,7 @@ ENTRY(_evt_ivhw)
168 175
169 r0 = sp; /* stack frame pt_regs pointer argument ==> r0 */ 176 r0 = sp; /* stack frame pt_regs pointer argument ==> r0 */
170 SP += -12; 177 SP += -12;
171 call _trap_c; 178 pseudo_long_call _trap_c, p5;
172 SP += 12; 179 SP += 12;
173 180
174#ifdef EBIU_ERRMST 181#ifdef EBIU_ERRMST
@@ -179,7 +186,7 @@ ENTRY(_evt_ivhw)
179 w[p0] = r0.l; 186 w[p0] = r0.l;
180#endif 187#endif
181 188
182 call _ret_from_exception; 189 pseudo_long_call _ret_from_exception, p2;
183 190
184.Lcommon_restore_all_sys: 191.Lcommon_restore_all_sys:
185 RESTORE_ALL_SYS 192 RESTORE_ALL_SYS
@@ -187,12 +194,28 @@ ENTRY(_evt_ivhw)
187ENDPROC(_evt_ivhw) 194ENDPROC(_evt_ivhw)
188 195
189/* Interrupt routine for evt2 (NMI). 196/* Interrupt routine for evt2 (NMI).
190 * We don't actually use this, so just return.
191 * For inner circle type details, please see: 197 * For inner circle type details, please see:
192 * http://docs.blackfin.uclinux.org/doku.php?id=linux-kernel:nmi 198 * http://docs.blackfin.uclinux.org/doku.php?id=linux-kernel:nmi
193 */ 199 */
194ENTRY(_evt_nmi) 200ENTRY(_evt_nmi)
201#ifndef CONFIG_NMI_WATCHDOG
195.weak _evt_nmi 202.weak _evt_nmi
203#else
204 /* Not take account of CPLBs, this handler will not return */
205 SAVE_ALL_SYS
206 r0 = sp;
207 r1 = retn;
208 [sp + PT_PC] = r1;
209 trace_buffer_save(p4,r5);
210
211 ANOMALY_283_315_WORKAROUND(p4, r5)
212
213 SP += -12;
214 call _do_nmi;
215 SP += 12;
2161:
217 jump 1b;
218#endif
196 rtn; 219 rtn;
197ENDPROC(_evt_nmi) 220ENDPROC(_evt_nmi)
198 221
@@ -223,7 +246,7 @@ ENTRY(_evt_system_call)
223#ifdef CONFIG_FRAME_POINTER 246#ifdef CONFIG_FRAME_POINTER
224 fp = 0; 247 fp = 0;
225#endif 248#endif
226 call _system_call; 249 pseudo_long_call _system_call, p2;
227 jump .Lcommon_restore_context; 250 jump .Lcommon_restore_context;
228ENDPROC(_evt_system_call) 251ENDPROC(_evt_system_call)
229 252
diff --git a/arch/blackfin/mach-common/ints-priority.c b/arch/blackfin/mach-common/ints-priority.c
index 1873b2c1fede..7ad8878bfa18 100644
--- a/arch/blackfin/mach-common/ints-priority.c
+++ b/arch/blackfin/mach-common/ints-priority.c
@@ -28,6 +28,7 @@
28#include <asm/dpmc.h> 28#include <asm/dpmc.h>
29#include <asm/bfin5xx_spi.h> 29#include <asm/bfin5xx_spi.h>
30#include <asm/bfin_sport.h> 30#include <asm/bfin_sport.h>
31#include <asm/bfin_can.h>
31 32
32#define SIC_SYSIRQ(irq) (irq - (IRQ_CORETMR + 1)) 33#define SIC_SYSIRQ(irq) (irq - (IRQ_CORETMR + 1))
33 34
@@ -172,7 +173,12 @@ static void bfin_internal_mask_irq(unsigned int irq)
172 local_irq_restore_hw(flags); 173 local_irq_restore_hw(flags);
173} 174}
174 175
176#ifdef CONFIG_SMP
177static void bfin_internal_unmask_irq_affinity(unsigned int irq,
178 const struct cpumask *affinity)
179#else
175static void bfin_internal_unmask_irq(unsigned int irq) 180static void bfin_internal_unmask_irq(unsigned int irq)
181#endif
176{ 182{
177 unsigned long flags; 183 unsigned long flags;
178 184
@@ -185,16 +191,38 @@ static void bfin_internal_unmask_irq(unsigned int irq)
185 local_irq_save_hw(flags); 191 local_irq_save_hw(flags);
186 mask_bank = SIC_SYSIRQ(irq) / 32; 192 mask_bank = SIC_SYSIRQ(irq) / 32;
187 mask_bit = SIC_SYSIRQ(irq) % 32; 193 mask_bit = SIC_SYSIRQ(irq) % 32;
188 bfin_write_SIC_IMASK(mask_bank, bfin_read_SIC_IMASK(mask_bank) |
189 (1 << mask_bit));
190#ifdef CONFIG_SMP 194#ifdef CONFIG_SMP
191 bfin_write_SICB_IMASK(mask_bank, bfin_read_SICB_IMASK(mask_bank) | 195 if (cpumask_test_cpu(0, affinity))
192 (1 << mask_bit)); 196#endif
197 bfin_write_SIC_IMASK(mask_bank,
198 bfin_read_SIC_IMASK(mask_bank) |
199 (1 << mask_bit));
200#ifdef CONFIG_SMP
201 if (cpumask_test_cpu(1, affinity))
202 bfin_write_SICB_IMASK(mask_bank,
203 bfin_read_SICB_IMASK(mask_bank) |
204 (1 << mask_bit));
193#endif 205#endif
194#endif 206#endif
195 local_irq_restore_hw(flags); 207 local_irq_restore_hw(flags);
196} 208}
197 209
210#ifdef CONFIG_SMP
211static void bfin_internal_unmask_irq(unsigned int irq)
212{
213 struct irq_desc *desc = irq_to_desc(irq);
214 bfin_internal_unmask_irq_affinity(irq, desc->affinity);
215}
216
217static int bfin_internal_set_affinity(unsigned int irq, const struct cpumask *mask)
218{
219 bfin_internal_mask_irq(irq);
220 bfin_internal_unmask_irq_affinity(irq, mask);
221
222 return 0;
223}
224#endif
225
198#ifdef CONFIG_PM 226#ifdef CONFIG_PM
199int bfin_internal_set_wake(unsigned int irq, unsigned int state) 227int bfin_internal_set_wake(unsigned int irq, unsigned int state)
200{ 228{
@@ -224,11 +252,6 @@ int bfin_internal_set_wake(unsigned int irq, unsigned int state)
224 wakeup |= USBWE; 252 wakeup |= USBWE;
225 break; 253 break;
226#endif 254#endif
227#ifdef IRQ_KEY
228 case IRQ_KEY:
229 wakeup |= KPADWE;
230 break;
231#endif
232#ifdef CONFIG_BF54x 255#ifdef CONFIG_BF54x
233 case IRQ_CNT: 256 case IRQ_CNT:
234 wakeup |= ROTWE; 257 wakeup |= ROTWE;
@@ -270,6 +293,9 @@ static struct irq_chip bfin_internal_irqchip = {
270 .mask_ack = bfin_internal_mask_irq, 293 .mask_ack = bfin_internal_mask_irq,
271 .disable = bfin_internal_mask_irq, 294 .disable = bfin_internal_mask_irq,
272 .enable = bfin_internal_unmask_irq, 295 .enable = bfin_internal_unmask_irq,
296#ifdef CONFIG_SMP
297 .set_affinity = bfin_internal_set_affinity,
298#endif
273#ifdef CONFIG_PM 299#ifdef CONFIG_PM
274 .set_wake = bfin_internal_set_wake, 300 .set_wake = bfin_internal_set_wake,
275#endif 301#endif
@@ -294,7 +320,6 @@ static int error_int_mask;
294static void bfin_generic_error_mask_irq(unsigned int irq) 320static void bfin_generic_error_mask_irq(unsigned int irq)
295{ 321{
296 error_int_mask &= ~(1L << (irq - IRQ_PPI_ERROR)); 322 error_int_mask &= ~(1L << (irq - IRQ_PPI_ERROR));
297
298 if (!error_int_mask) 323 if (!error_int_mask)
299 bfin_internal_mask_irq(IRQ_GENERIC_ERROR); 324 bfin_internal_mask_irq(IRQ_GENERIC_ERROR);
300} 325}
@@ -385,6 +410,127 @@ static void bfin_demux_error_irq(unsigned int int_err_irq,
385} 410}
386#endif /* BF537_GENERIC_ERROR_INT_DEMUX */ 411#endif /* BF537_GENERIC_ERROR_INT_DEMUX */
387 412
413#if defined(CONFIG_BFIN_MAC) || defined(CONFIG_BFIN_MAC_MODULE)
414static int mac_stat_int_mask;
415
416static void bfin_mac_status_ack_irq(unsigned int irq)
417{
418 switch (irq) {
419 case IRQ_MAC_MMCINT:
420 bfin_write_EMAC_MMC_TIRQS(
421 bfin_read_EMAC_MMC_TIRQE() &
422 bfin_read_EMAC_MMC_TIRQS());
423 bfin_write_EMAC_MMC_RIRQS(
424 bfin_read_EMAC_MMC_RIRQE() &
425 bfin_read_EMAC_MMC_RIRQS());
426 break;
427 case IRQ_MAC_RXFSINT:
428 bfin_write_EMAC_RX_STKY(
429 bfin_read_EMAC_RX_IRQE() &
430 bfin_read_EMAC_RX_STKY());
431 break;
432 case IRQ_MAC_TXFSINT:
433 bfin_write_EMAC_TX_STKY(
434 bfin_read_EMAC_TX_IRQE() &
435 bfin_read_EMAC_TX_STKY());
436 break;
437 case IRQ_MAC_WAKEDET:
438 bfin_write_EMAC_WKUP_CTL(
439 bfin_read_EMAC_WKUP_CTL() | MPKS | RWKS);
440 break;
441 default:
442 /* These bits are W1C */
443 bfin_write_EMAC_SYSTAT(1L << (irq - IRQ_MAC_PHYINT));
444 break;
445 }
446}
447
448static void bfin_mac_status_mask_irq(unsigned int irq)
449{
450 mac_stat_int_mask &= ~(1L << (irq - IRQ_MAC_PHYINT));
451#ifdef BF537_GENERIC_ERROR_INT_DEMUX
452 switch (irq) {
453 case IRQ_MAC_PHYINT:
454 bfin_write_EMAC_SYSCTL(bfin_read_EMAC_SYSCTL() & ~PHYIE);
455 break;
456 default:
457 break;
458 }
459#else
460 if (!mac_stat_int_mask)
461 bfin_internal_mask_irq(IRQ_MAC_ERROR);
462#endif
463 bfin_mac_status_ack_irq(irq);
464}
465
466static void bfin_mac_status_unmask_irq(unsigned int irq)
467{
468#ifdef BF537_GENERIC_ERROR_INT_DEMUX
469 switch (irq) {
470 case IRQ_MAC_PHYINT:
471 bfin_write_EMAC_SYSCTL(bfin_read_EMAC_SYSCTL() | PHYIE);
472 break;
473 default:
474 break;
475 }
476#else
477 if (!mac_stat_int_mask)
478 bfin_internal_unmask_irq(IRQ_MAC_ERROR);
479#endif
480 mac_stat_int_mask |= 1L << (irq - IRQ_MAC_PHYINT);
481}
482
483#ifdef CONFIG_PM
484int bfin_mac_status_set_wake(unsigned int irq, unsigned int state)
485{
486#ifdef BF537_GENERIC_ERROR_INT_DEMUX
487 return bfin_internal_set_wake(IRQ_GENERIC_ERROR, state);
488#else
489 return bfin_internal_set_wake(IRQ_MAC_ERROR, state);
490#endif
491}
492#endif
493
494static struct irq_chip bfin_mac_status_irqchip = {
495 .name = "MACST",
496 .ack = bfin_ack_noop,
497 .mask_ack = bfin_mac_status_mask_irq,
498 .mask = bfin_mac_status_mask_irq,
499 .unmask = bfin_mac_status_unmask_irq,
500#ifdef CONFIG_PM
501 .set_wake = bfin_mac_status_set_wake,
502#endif
503};
504
505static void bfin_demux_mac_status_irq(unsigned int int_err_irq,
506 struct irq_desc *inta_desc)
507{
508 int i, irq = 0;
509 u32 status = bfin_read_EMAC_SYSTAT();
510
511 for (i = 0; i < (IRQ_MAC_STMDONE - IRQ_MAC_PHYINT); i++)
512 if (status & (1L << i)) {
513 irq = IRQ_MAC_PHYINT + i;
514 break;
515 }
516
517 if (irq) {
518 if (mac_stat_int_mask & (1L << (irq - IRQ_MAC_PHYINT))) {
519 bfin_handle_irq(irq);
520 } else {
521 bfin_mac_status_ack_irq(irq);
522 pr_debug("IRQ %d:"
523 " MASKED MAC ERROR INTERRUPT ASSERTED\n",
524 irq);
525 }
526 } else
527 printk(KERN_ERR
528 "%s : %s : LINE %d :\nIRQ ?: MAC ERROR"
529 " INTERRUPT ASSERTED BUT NO SOURCE FOUND\n",
530 __func__, __FILE__, __LINE__);
531}
532#endif
533
388static inline void bfin_set_irq_handler(unsigned irq, irq_flow_handler_t handle) 534static inline void bfin_set_irq_handler(unsigned irq, irq_flow_handler_t handle)
389{ 535{
390#ifdef CONFIG_IPIPE 536#ifdef CONFIG_IPIPE
@@ -1031,7 +1177,6 @@ int __init init_arch_irq(void)
1031#elif defined(CONFIG_BF538) || defined(CONFIG_BF539) 1177#elif defined(CONFIG_BF538) || defined(CONFIG_BF539)
1032 case IRQ_PORTF_INTA: 1178 case IRQ_PORTF_INTA:
1033#endif 1179#endif
1034
1035 set_irq_chained_handler(irq, 1180 set_irq_chained_handler(irq,
1036 bfin_demux_gpio_irq); 1181 bfin_demux_gpio_irq);
1037 break; 1182 break;
@@ -1040,29 +1185,36 @@ int __init init_arch_irq(void)
1040 set_irq_chained_handler(irq, bfin_demux_error_irq); 1185 set_irq_chained_handler(irq, bfin_demux_error_irq);
1041 break; 1186 break;
1042#endif 1187#endif
1043 1188#if defined(CONFIG_BFIN_MAC) || defined(CONFIG_BFIN_MAC_MODULE)
1044#ifdef CONFIG_SMP 1189 case IRQ_MAC_ERROR:
1045#ifdef CONFIG_TICKSOURCE_GPTMR0 1190 set_irq_chained_handler(irq, bfin_demux_mac_status_irq);
1046 case IRQ_TIMER0: 1191 break;
1047#endif
1048#ifdef CONFIG_TICKSOURCE_CORETMR
1049 case IRQ_CORETMR:
1050#endif 1192#endif
1193#ifdef CONFIG_SMP
1051 case IRQ_SUPPLE_0: 1194 case IRQ_SUPPLE_0:
1052 case IRQ_SUPPLE_1: 1195 case IRQ_SUPPLE_1:
1053 set_irq_handler(irq, handle_percpu_irq); 1196 set_irq_handler(irq, handle_percpu_irq);
1054 break; 1197 break;
1055#endif 1198#endif
1056 1199
1057#ifdef CONFIG_IPIPE 1200#ifdef CONFIG_TICKSOURCE_CORETMR
1058#ifndef CONFIG_TICKSOURCE_CORETMR 1201 case IRQ_CORETMR:
1059 case IRQ_TIMER0: 1202# ifdef CONFIG_SMP
1203 set_irq_handler(irq, handle_percpu_irq);
1204 break;
1205# else
1060 set_irq_handler(irq, handle_simple_irq); 1206 set_irq_handler(irq, handle_simple_irq);
1061 break; 1207 break;
1208# endif
1062#endif 1209#endif
1063 case IRQ_CORETMR: 1210
1211#ifdef CONFIG_TICKSOURCE_GPTMR0
1212 case IRQ_TIMER0:
1064 set_irq_handler(irq, handle_simple_irq); 1213 set_irq_handler(irq, handle_simple_irq);
1065 break; 1214 break;
1215#endif
1216
1217#ifdef CONFIG_IPIPE
1066 default: 1218 default:
1067 set_irq_handler(irq, handle_level_irq); 1219 set_irq_handler(irq, handle_level_irq);
1068 break; 1220 break;
@@ -1078,14 +1230,22 @@ int __init init_arch_irq(void)
1078 for (irq = IRQ_PPI_ERROR; irq <= IRQ_UART1_ERROR; irq++) 1230 for (irq = IRQ_PPI_ERROR; irq <= IRQ_UART1_ERROR; irq++)
1079 set_irq_chip_and_handler(irq, &bfin_generic_error_irqchip, 1231 set_irq_chip_and_handler(irq, &bfin_generic_error_irqchip,
1080 handle_level_irq); 1232 handle_level_irq);
1233#if defined(CONFIG_BFIN_MAC) || defined(CONFIG_BFIN_MAC_MODULE)
1234 set_irq_chained_handler(IRQ_MAC_ERROR, bfin_demux_mac_status_irq);
1235#endif
1081#endif 1236#endif
1082 1237
1238#if defined(CONFIG_BFIN_MAC) || defined(CONFIG_BFIN_MAC_MODULE)
1239 for (irq = IRQ_MAC_PHYINT; irq <= IRQ_MAC_STMDONE; irq++)
1240 set_irq_chip_and_handler(irq, &bfin_mac_status_irqchip,
1241 handle_level_irq);
1242#endif
1083 /* if configured as edge, then will be changed to do_edge_IRQ */ 1243 /* if configured as edge, then will be changed to do_edge_IRQ */
1084 for (irq = GPIO_IRQ_BASE; irq < NR_IRQS; irq++) 1244 for (irq = GPIO_IRQ_BASE;
1245 irq < (GPIO_IRQ_BASE + MAX_BLACKFIN_GPIOS); irq++)
1085 set_irq_chip_and_handler(irq, &bfin_gpio_irqchip, 1246 set_irq_chip_and_handler(irq, &bfin_gpio_irqchip,
1086 handle_level_irq); 1247 handle_level_irq);
1087 1248
1088
1089 bfin_write_IMASK(0); 1249 bfin_write_IMASK(0);
1090 CSYNC(); 1250 CSYNC();
1091 ilat = bfin_read_ILAT(); 1251 ilat = bfin_read_ILAT();
diff --git a/arch/blackfin/mach-common/smp.c b/arch/blackfin/mach-common/smp.c
index 369e687582b7..7803f22d2ca7 100644
--- a/arch/blackfin/mach-common/smp.c
+++ b/arch/blackfin/mach-common/smp.c
@@ -122,9 +122,17 @@ static void ipi_call_function(unsigned int cpu, struct ipi_message *msg)
122 wait = msg->call_struct.wait; 122 wait = msg->call_struct.wait;
123 cpu_clear(cpu, msg->call_struct.pending); 123 cpu_clear(cpu, msg->call_struct.pending);
124 func(info); 124 func(info);
125 if (wait) 125 if (wait) {
126#ifdef __ARCH_SYNC_CORE_DCACHE
127 /*
128 * 'wait' usually means synchronization between CPUs.
129 * Invalidate D cache in case shared data was changed
130 * by func() to ensure cache coherence.
131 */
132 resync_core_dcache();
133#endif
126 cpu_clear(cpu, msg->call_struct.waitmask); 134 cpu_clear(cpu, msg->call_struct.waitmask);
127 else 135 } else
128 kfree(msg); 136 kfree(msg);
129} 137}
130 138
@@ -219,6 +227,13 @@ int smp_call_function(void (*func)(void *info), void *info, int wait)
219 blackfin_dcache_invalidate_range( 227 blackfin_dcache_invalidate_range(
220 (unsigned long)(&msg->call_struct.waitmask), 228 (unsigned long)(&msg->call_struct.waitmask),
221 (unsigned long)(&msg->call_struct.waitmask)); 229 (unsigned long)(&msg->call_struct.waitmask));
230#ifdef __ARCH_SYNC_CORE_DCACHE
231 /*
232 * Invalidate D cache in case shared data was changed by
233 * other processors to ensure cache coherence.
234 */
235 resync_core_dcache();
236#endif
222 kfree(msg); 237 kfree(msg);
223 } 238 }
224 return 0; 239 return 0;
@@ -261,6 +276,13 @@ int smp_call_function_single(int cpuid, void (*func) (void *info), void *info,
261 blackfin_dcache_invalidate_range( 276 blackfin_dcache_invalidate_range(
262 (unsigned long)(&msg->call_struct.waitmask), 277 (unsigned long)(&msg->call_struct.waitmask),
263 (unsigned long)(&msg->call_struct.waitmask)); 278 (unsigned long)(&msg->call_struct.waitmask));
279#ifdef __ARCH_SYNC_CORE_DCACHE
280 /*
281 * Invalidate D cache in case shared data was changed by
282 * other processors to ensure cache coherence.
283 */
284 resync_core_dcache();
285#endif
264 kfree(msg); 286 kfree(msg);
265 } 287 }
266 return 0; 288 return 0;
@@ -322,8 +344,11 @@ void smp_send_stop(void)
322 344
323int __cpuinit __cpu_up(unsigned int cpu) 345int __cpuinit __cpu_up(unsigned int cpu)
324{ 346{
325 struct task_struct *idle;
326 int ret; 347 int ret;
348 static struct task_struct *idle;
349
350 if (idle)
351 free_task(idle);
327 352
328 idle = fork_idle(cpu); 353 idle = fork_idle(cpu);
329 if (IS_ERR(idle)) { 354 if (IS_ERR(idle)) {
@@ -332,7 +357,6 @@ int __cpuinit __cpu_up(unsigned int cpu)
332 } 357 }
333 358
334 secondary_stack = task_stack_page(idle) + THREAD_SIZE; 359 secondary_stack = task_stack_page(idle) + THREAD_SIZE;
335 smp_wmb();
336 360
337 ret = platform_boot_secondary(cpu, idle); 361 ret = platform_boot_secondary(cpu, idle);
338 362
@@ -343,9 +367,6 @@ int __cpuinit __cpu_up(unsigned int cpu)
343 367
344static void __cpuinit setup_secondary(unsigned int cpu) 368static void __cpuinit setup_secondary(unsigned int cpu)
345{ 369{
346#if !defined(CONFIG_TICKSOURCE_GPTMR0)
347 struct irq_desc *timer_desc;
348#endif
349 unsigned long ilat; 370 unsigned long ilat;
350 371
351 bfin_write_IMASK(0); 372 bfin_write_IMASK(0);
@@ -360,17 +381,6 @@ static void __cpuinit setup_secondary(unsigned int cpu)
360 bfin_irq_flags |= IMASK_IVG15 | 381 bfin_irq_flags |= IMASK_IVG15 |
361 IMASK_IVG14 | IMASK_IVG13 | IMASK_IVG12 | IMASK_IVG11 | 382 IMASK_IVG14 | IMASK_IVG13 | IMASK_IVG12 | IMASK_IVG11 |
362 IMASK_IVG10 | IMASK_IVG9 | IMASK_IVG8 | IMASK_IVG7 | IMASK_IVGHW; 383 IMASK_IVG10 | IMASK_IVG9 | IMASK_IVG8 | IMASK_IVG7 | IMASK_IVGHW;
363
364#if defined(CONFIG_TICKSOURCE_GPTMR0)
365 /* Power down the core timer, just to play safe. */
366 bfin_write_TCNTL(0);
367
368 /* system timer0 has been setup by CoreA. */
369#else
370 timer_desc = irq_desc + IRQ_CORETMR;
371 setup_core_timer();
372 timer_desc->chip->enable(IRQ_CORETMR);
373#endif
374} 384}
375 385
376void __cpuinit secondary_start_kernel(void) 386void __cpuinit secondary_start_kernel(void)
@@ -405,7 +415,6 @@ void __cpuinit secondary_start_kernel(void)
405 atomic_inc(&mm->mm_users); 415 atomic_inc(&mm->mm_users);
406 atomic_inc(&mm->mm_count); 416 atomic_inc(&mm->mm_count);
407 current->active_mm = mm; 417 current->active_mm = mm;
408 BUG_ON(current->mm); /* Can't be, but better be safe than sorry. */
409 418
410 preempt_disable(); 419 preempt_disable();
411 420
@@ -413,6 +422,9 @@ void __cpuinit secondary_start_kernel(void)
413 422
414 platform_secondary_init(cpu); 423 platform_secondary_init(cpu);
415 424
425 /* setup local core timer */
426 bfin_local_timer_setup();
427
416 local_irq_enable(); 428 local_irq_enable();
417 429
418 /* 430 /*
@@ -462,25 +474,58 @@ void smp_icache_flush_range_others(unsigned long start, unsigned long end)
462EXPORT_SYMBOL_GPL(smp_icache_flush_range_others); 474EXPORT_SYMBOL_GPL(smp_icache_flush_range_others);
463 475
464#ifdef __ARCH_SYNC_CORE_ICACHE 476#ifdef __ARCH_SYNC_CORE_ICACHE
477unsigned long icache_invld_count[NR_CPUS];
465void resync_core_icache(void) 478void resync_core_icache(void)
466{ 479{
467 unsigned int cpu = get_cpu(); 480 unsigned int cpu = get_cpu();
468 blackfin_invalidate_entire_icache(); 481 blackfin_invalidate_entire_icache();
469 ++per_cpu(cpu_data, cpu).icache_invld_count; 482 icache_invld_count[cpu]++;
470 put_cpu(); 483 put_cpu();
471} 484}
472EXPORT_SYMBOL(resync_core_icache); 485EXPORT_SYMBOL(resync_core_icache);
473#endif 486#endif
474 487
475#ifdef __ARCH_SYNC_CORE_DCACHE 488#ifdef __ARCH_SYNC_CORE_DCACHE
489unsigned long dcache_invld_count[NR_CPUS];
476unsigned long barrier_mask __attribute__ ((__section__(".l2.bss"))); 490unsigned long barrier_mask __attribute__ ((__section__(".l2.bss")));
477 491
478void resync_core_dcache(void) 492void resync_core_dcache(void)
479{ 493{
480 unsigned int cpu = get_cpu(); 494 unsigned int cpu = get_cpu();
481 blackfin_invalidate_entire_dcache(); 495 blackfin_invalidate_entire_dcache();
482 ++per_cpu(cpu_data, cpu).dcache_invld_count; 496 dcache_invld_count[cpu]++;
483 put_cpu(); 497 put_cpu();
484} 498}
485EXPORT_SYMBOL(resync_core_dcache); 499EXPORT_SYMBOL(resync_core_dcache);
486#endif 500#endif
501
502#ifdef CONFIG_HOTPLUG_CPU
503int __cpuexit __cpu_disable(void)
504{
505 unsigned int cpu = smp_processor_id();
506
507 if (cpu == 0)
508 return -EPERM;
509
510 set_cpu_online(cpu, false);
511 return 0;
512}
513
514static DECLARE_COMPLETION(cpu_killed);
515
516int __cpuexit __cpu_die(unsigned int cpu)
517{
518 return wait_for_completion_timeout(&cpu_killed, 5000);
519}
520
521void cpu_die(void)
522{
523 complete(&cpu_killed);
524
525 atomic_dec(&init_mm.mm_users);
526 atomic_dec(&init_mm.mm_count);
527
528 local_irq_disable();
529 platform_cpu_die();
530}
531#endif
diff --git a/arch/blackfin/mm/isram-driver.c b/arch/blackfin/mm/isram-driver.c
index 84cdc5a1c139..9213e2357888 100644
--- a/arch/blackfin/mm/isram-driver.c
+++ b/arch/blackfin/mm/isram-driver.c
@@ -62,7 +62,7 @@ static void isram_write(const void *addr, uint64_t data)
62 uint32_t cmd; 62 uint32_t cmd;
63 unsigned long flags; 63 unsigned long flags;
64 64
65 if (addr >= (void *)(L1_CODE_START + L1_CODE_LENGTH)) 65 if (unlikely(addr >= (void *)(L1_CODE_START + L1_CODE_LENGTH)))
66 return; 66 return;
67 67
68 cmd = IADDR2DTEST(addr) | 2; /* write */ 68 cmd = IADDR2DTEST(addr) | 2; /* write */
@@ -93,7 +93,7 @@ static uint64_t isram_read(const void *addr)
93 unsigned long flags; 93 unsigned long flags;
94 uint64_t ret; 94 uint64_t ret;
95 95
96 if (addr > (void *)(L1_CODE_START + L1_CODE_LENGTH)) 96 if (unlikely(addr > (void *)(L1_CODE_START + L1_CODE_LENGTH)))
97 return 0; 97 return 0;
98 98
99 cmd = IADDR2DTEST(addr) | 0; /* read */ 99 cmd = IADDR2DTEST(addr) | 0; /* read */
@@ -120,7 +120,7 @@ static bool isram_check_addr(const void *addr, size_t n)
120{ 120{
121 if ((addr >= (void *)L1_CODE_START) && 121 if ((addr >= (void *)L1_CODE_START) &&
122 (addr < (void *)(L1_CODE_START + L1_CODE_LENGTH))) { 122 (addr < (void *)(L1_CODE_START + L1_CODE_LENGTH))) {
123 if ((addr + n) > (void *)(L1_CODE_START + L1_CODE_LENGTH)) { 123 if (unlikely((addr + n) > (void *)(L1_CODE_START + L1_CODE_LENGTH))) {
124 show_stack(NULL, NULL); 124 show_stack(NULL, NULL);
125 pr_err("copy involving %p length (%zu) too long\n", addr, n); 125 pr_err("copy involving %p length (%zu) too long\n", addr, n);
126 } 126 }
diff --git a/arch/blackfin/mm/sram-alloc.c b/arch/blackfin/mm/sram-alloc.c
index f068c11ea98f..5732da25ee2d 100644
--- a/arch/blackfin/mm/sram-alloc.c
+++ b/arch/blackfin/mm/sram-alloc.c
@@ -402,7 +402,7 @@ void *l1_data_A_sram_alloc(size_t size)
402 void *addr; 402 void *addr;
403 unsigned int cpu; 403 unsigned int cpu;
404 404
405 cpu = get_cpu(); 405 cpu = smp_processor_id();
406 /* add mutex operation */ 406 /* add mutex operation */
407 spin_lock_irqsave(&per_cpu(l1_data_sram_lock, cpu), flags); 407 spin_lock_irqsave(&per_cpu(l1_data_sram_lock, cpu), flags);
408 408
@@ -411,7 +411,6 @@ void *l1_data_A_sram_alloc(size_t size)
411 411
412 /* add mutex operation */ 412 /* add mutex operation */
413 spin_unlock_irqrestore(&per_cpu(l1_data_sram_lock, cpu), flags); 413 spin_unlock_irqrestore(&per_cpu(l1_data_sram_lock, cpu), flags);
414 put_cpu();
415 414
416 pr_debug("Allocated address in l1_data_A_sram_alloc is 0x%lx+0x%lx\n", 415 pr_debug("Allocated address in l1_data_A_sram_alloc is 0x%lx+0x%lx\n",
417 (long unsigned int)addr, size); 416 (long unsigned int)addr, size);
@@ -430,7 +429,7 @@ int l1_data_A_sram_free(const void *addr)
430 int ret; 429 int ret;
431 unsigned int cpu; 430 unsigned int cpu;
432 431
433 cpu = get_cpu(); 432 cpu = smp_processor_id();
434 /* add mutex operation */ 433 /* add mutex operation */
435 spin_lock_irqsave(&per_cpu(l1_data_sram_lock, cpu), flags); 434 spin_lock_irqsave(&per_cpu(l1_data_sram_lock, cpu), flags);
436 435
@@ -439,7 +438,6 @@ int l1_data_A_sram_free(const void *addr)
439 438
440 /* add mutex operation */ 439 /* add mutex operation */
441 spin_unlock_irqrestore(&per_cpu(l1_data_sram_lock, cpu), flags); 440 spin_unlock_irqrestore(&per_cpu(l1_data_sram_lock, cpu), flags);
442 put_cpu();
443 441
444 return ret; 442 return ret;
445#else 443#else
@@ -455,7 +453,7 @@ void *l1_data_B_sram_alloc(size_t size)
455 void *addr; 453 void *addr;
456 unsigned int cpu; 454 unsigned int cpu;
457 455
458 cpu = get_cpu(); 456 cpu = smp_processor_id();
459 /* add mutex operation */ 457 /* add mutex operation */
460 spin_lock_irqsave(&per_cpu(l1_data_sram_lock, cpu), flags); 458 spin_lock_irqsave(&per_cpu(l1_data_sram_lock, cpu), flags);
461 459
@@ -464,7 +462,6 @@ void *l1_data_B_sram_alloc(size_t size)
464 462
465 /* add mutex operation */ 463 /* add mutex operation */
466 spin_unlock_irqrestore(&per_cpu(l1_data_sram_lock, cpu), flags); 464 spin_unlock_irqrestore(&per_cpu(l1_data_sram_lock, cpu), flags);
467 put_cpu();
468 465
469 pr_debug("Allocated address in l1_data_B_sram_alloc is 0x%lx+0x%lx\n", 466 pr_debug("Allocated address in l1_data_B_sram_alloc is 0x%lx+0x%lx\n",
470 (long unsigned int)addr, size); 467 (long unsigned int)addr, size);
@@ -483,7 +480,7 @@ int l1_data_B_sram_free(const void *addr)
483 int ret; 480 int ret;
484 unsigned int cpu; 481 unsigned int cpu;
485 482
486 cpu = get_cpu(); 483 cpu = smp_processor_id();
487 /* add mutex operation */ 484 /* add mutex operation */
488 spin_lock_irqsave(&per_cpu(l1_data_sram_lock, cpu), flags); 485 spin_lock_irqsave(&per_cpu(l1_data_sram_lock, cpu), flags);
489 486
@@ -492,7 +489,6 @@ int l1_data_B_sram_free(const void *addr)
492 489
493 /* add mutex operation */ 490 /* add mutex operation */
494 spin_unlock_irqrestore(&per_cpu(l1_data_sram_lock, cpu), flags); 491 spin_unlock_irqrestore(&per_cpu(l1_data_sram_lock, cpu), flags);
495 put_cpu();
496 492
497 return ret; 493 return ret;
498#else 494#else
@@ -540,7 +536,7 @@ void *l1_inst_sram_alloc(size_t size)
540 void *addr; 536 void *addr;
541 unsigned int cpu; 537 unsigned int cpu;
542 538
543 cpu = get_cpu(); 539 cpu = smp_processor_id();
544 /* add mutex operation */ 540 /* add mutex operation */
545 spin_lock_irqsave(&per_cpu(l1_inst_sram_lock, cpu), flags); 541 spin_lock_irqsave(&per_cpu(l1_inst_sram_lock, cpu), flags);
546 542
@@ -549,7 +545,6 @@ void *l1_inst_sram_alloc(size_t size)
549 545
550 /* add mutex operation */ 546 /* add mutex operation */
551 spin_unlock_irqrestore(&per_cpu(l1_inst_sram_lock, cpu), flags); 547 spin_unlock_irqrestore(&per_cpu(l1_inst_sram_lock, cpu), flags);
552 put_cpu();
553 548
554 pr_debug("Allocated address in l1_inst_sram_alloc is 0x%lx+0x%lx\n", 549 pr_debug("Allocated address in l1_inst_sram_alloc is 0x%lx+0x%lx\n",
555 (long unsigned int)addr, size); 550 (long unsigned int)addr, size);
@@ -568,7 +563,7 @@ int l1_inst_sram_free(const void *addr)
568 int ret; 563 int ret;
569 unsigned int cpu; 564 unsigned int cpu;
570 565
571 cpu = get_cpu(); 566 cpu = smp_processor_id();
572 /* add mutex operation */ 567 /* add mutex operation */
573 spin_lock_irqsave(&per_cpu(l1_inst_sram_lock, cpu), flags); 568 spin_lock_irqsave(&per_cpu(l1_inst_sram_lock, cpu), flags);
574 569
@@ -577,7 +572,6 @@ int l1_inst_sram_free(const void *addr)
577 572
578 /* add mutex operation */ 573 /* add mutex operation */
579 spin_unlock_irqrestore(&per_cpu(l1_inst_sram_lock, cpu), flags); 574 spin_unlock_irqrestore(&per_cpu(l1_inst_sram_lock, cpu), flags);
580 put_cpu();
581 575
582 return ret; 576 return ret;
583#else 577#else
@@ -593,7 +587,7 @@ void *l1sram_alloc(size_t size)
593 void *addr; 587 void *addr;
594 unsigned int cpu; 588 unsigned int cpu;
595 589
596 cpu = get_cpu(); 590 cpu = smp_processor_id();
597 /* add mutex operation */ 591 /* add mutex operation */
598 spin_lock_irqsave(&per_cpu(l1sram_lock, cpu), flags); 592 spin_lock_irqsave(&per_cpu(l1sram_lock, cpu), flags);
599 593
@@ -602,7 +596,6 @@ void *l1sram_alloc(size_t size)
602 596
603 /* add mutex operation */ 597 /* add mutex operation */
604 spin_unlock_irqrestore(&per_cpu(l1sram_lock, cpu), flags); 598 spin_unlock_irqrestore(&per_cpu(l1sram_lock, cpu), flags);
605 put_cpu();
606 599
607 return addr; 600 return addr;
608} 601}
@@ -614,7 +607,7 @@ void *l1sram_alloc_max(size_t *psize)
614 void *addr; 607 void *addr;
615 unsigned int cpu; 608 unsigned int cpu;
616 609
617 cpu = get_cpu(); 610 cpu = smp_processor_id();
618 /* add mutex operation */ 611 /* add mutex operation */
619 spin_lock_irqsave(&per_cpu(l1sram_lock, cpu), flags); 612 spin_lock_irqsave(&per_cpu(l1sram_lock, cpu), flags);
620 613
@@ -623,7 +616,6 @@ void *l1sram_alloc_max(size_t *psize)
623 616
624 /* add mutex operation */ 617 /* add mutex operation */
625 spin_unlock_irqrestore(&per_cpu(l1sram_lock, cpu), flags); 618 spin_unlock_irqrestore(&per_cpu(l1sram_lock, cpu), flags);
626 put_cpu();
627 619
628 return addr; 620 return addr;
629} 621}
@@ -635,7 +627,7 @@ int l1sram_free(const void *addr)
635 int ret; 627 int ret;
636 unsigned int cpu; 628 unsigned int cpu;
637 629
638 cpu = get_cpu(); 630 cpu = smp_processor_id();
639 /* add mutex operation */ 631 /* add mutex operation */
640 spin_lock_irqsave(&per_cpu(l1sram_lock, cpu), flags); 632 spin_lock_irqsave(&per_cpu(l1sram_lock, cpu), flags);
641 633
@@ -644,7 +636,6 @@ int l1sram_free(const void *addr)
644 636
645 /* add mutex operation */ 637 /* add mutex operation */
646 spin_unlock_irqrestore(&per_cpu(l1sram_lock, cpu), flags); 638 spin_unlock_irqrestore(&per_cpu(l1sram_lock, cpu), flags);
647 put_cpu();
648 639
649 return ret; 640 return ret;
650} 641}
diff --git a/arch/microblaze/Kconfig b/arch/microblaze/Kconfig
index b008168ae946..203ec61c6d4c 100644
--- a/arch/microblaze/Kconfig
+++ b/arch/microblaze/Kconfig
@@ -14,6 +14,8 @@ config MICROBLAZE
14 select USB_ARCH_HAS_EHCI 14 select USB_ARCH_HAS_EHCI
15 select ARCH_WANT_OPTIONAL_GPIOLIB 15 select ARCH_WANT_OPTIONAL_GPIOLIB
16 select HAVE_OPROFILE 16 select HAVE_OPROFILE
17 select HAVE_DMA_ATTRS
18 select HAVE_DMA_API_DEBUG
17 select TRACING_SUPPORT 19 select TRACING_SUPPORT
18 20
19config SWAP 21config SWAP
@@ -76,9 +78,6 @@ config HAVE_LATENCYTOP_SUPPORT
76config PCI 78config PCI
77 def_bool n 79 def_bool n
78 80
79config NO_DMA
80 def_bool y
81
82config DTC 81config DTC
83 def_bool y 82 def_bool y
84 83
@@ -146,7 +145,6 @@ menu "Advanced setup"
146 145
147config ADVANCED_OPTIONS 146config ADVANCED_OPTIONS
148 bool "Prompt for advanced kernel configuration options" 147 bool "Prompt for advanced kernel configuration options"
149 depends on MMU
150 help 148 help
151 This option will enable prompting for a variety of advanced kernel 149 This option will enable prompting for a variety of advanced kernel
152 configuration options. These options can cause the kernel to not 150 configuration options. These options can cause the kernel to not
@@ -158,6 +156,15 @@ config ADVANCED_OPTIONS
158comment "Default settings for advanced configuration options are used" 156comment "Default settings for advanced configuration options are used"
159 depends on !ADVANCED_OPTIONS 157 depends on !ADVANCED_OPTIONS
160 158
159config XILINX_UNCACHED_SHADOW
160 bool "Are you using uncached shadow for RAM ?"
161 depends on ADVANCED_OPTIONS && !MMU
162 default n
163 help
164 This is needed to be able to allocate uncachable memory regions.
165 The feature requires the design to define the RAM memory controller
166 window to be twice as large as the actual physical memory.
167
161config HIGHMEM_START_BOOL 168config HIGHMEM_START_BOOL
162 bool "Set high memory pool address" 169 bool "Set high memory pool address"
163 depends on ADVANCED_OPTIONS && HIGHMEM 170 depends on ADVANCED_OPTIONS && HIGHMEM
@@ -175,7 +182,7 @@ config HIGHMEM_START
175 182
176config LOWMEM_SIZE_BOOL 183config LOWMEM_SIZE_BOOL
177 bool "Set maximum low memory" 184 bool "Set maximum low memory"
178 depends on ADVANCED_OPTIONS 185 depends on ADVANCED_OPTIONS && MMU
179 help 186 help
180 This option allows you to set the maximum amount of memory which 187 This option allows you to set the maximum amount of memory which
181 will be used as "low memory", that is, memory which the kernel can 188 will be used as "low memory", that is, memory which the kernel can
@@ -187,7 +194,6 @@ config LOWMEM_SIZE_BOOL
187 194
188config LOWMEM_SIZE 195config LOWMEM_SIZE
189 hex "Maximum low memory size (in bytes)" if LOWMEM_SIZE_BOOL 196 hex "Maximum low memory size (in bytes)" if LOWMEM_SIZE_BOOL
190 depends on MMU
191 default "0x30000000" 197 default "0x30000000"
192 198
193config KERNEL_START_BOOL 199config KERNEL_START_BOOL
@@ -208,7 +214,7 @@ config KERNEL_START
208 214
209config TASK_SIZE_BOOL 215config TASK_SIZE_BOOL
210 bool "Set custom user task size" 216 bool "Set custom user task size"
211 depends on ADVANCED_OPTIONS 217 depends on ADVANCED_OPTIONS && MMU
212 help 218 help
213 This option allows you to set the amount of virtual address space 219 This option allows you to set the amount of virtual address space
214 allocated to user tasks. This can be useful in optimizing the 220 allocated to user tasks. This can be useful in optimizing the
@@ -218,42 +224,34 @@ config TASK_SIZE_BOOL
218 224
219config TASK_SIZE 225config TASK_SIZE
220 hex "Size of user task space" if TASK_SIZE_BOOL 226 hex "Size of user task space" if TASK_SIZE_BOOL
221 depends on MMU
222 default "0x80000000" 227 default "0x80000000"
223 228
224config CONSISTENT_START_BOOL 229endmenu
225 bool "Set custom consistent memory pool address"
226 depends on ADVANCED_OPTIONS && NOT_COHERENT_CACHE
227 help
228 This option allows you to set the base virtual address
229 of the the consistent memory pool. This pool of virtual
230 memory is used to make consistent memory allocations.
231 230
232config CONSISTENT_START 231source "mm/Kconfig"
233 hex "Base virtual address of consistent memory pool" if CONSISTENT_START_BOOL
234 depends on MMU
235 default "0xff100000" if NOT_COHERENT_CACHE
236 232
237config CONSISTENT_SIZE_BOOL 233menu "Exectuable file formats"
238 bool "Set custom consistent memory pool size"
239 depends on ADVANCED_OPTIONS && NOT_COHERENT_CACHE
240 help
241 This option allows you to set the size of the the
242 consistent memory pool. This pool of virtual memory
243 is used to make consistent memory allocations.
244 234
245config CONSISTENT_SIZE 235source "fs/Kconfig.binfmt"
246 hex "Size of consistent memory pool" if CONSISTENT_SIZE_BOOL
247 depends on MMU
248 default "0x00200000" if NOT_COHERENT_CACHE
249 236
250endmenu 237endmenu
251 238
252source "mm/Kconfig" 239menu "Bus Options"
253 240
254menu "Exectuable file formats" 241config PCI
242 bool "PCI support"
255 243
256source "fs/Kconfig.binfmt" 244config PCI_DOMAINS
245 def_bool PCI
246
247config PCI_SYSCALL
248 def_bool PCI
249
250config PCI_XILINX
251 bool "Xilinx PCI host bridge support"
252 depends on PCI
253
254source "drivers/pci/Kconfig"
257 255
258endmenu 256endmenu
259 257
diff --git a/arch/microblaze/Makefile b/arch/microblaze/Makefile
index d2d6cfcb1a30..836832dd9b26 100644
--- a/arch/microblaze/Makefile
+++ b/arch/microblaze/Makefile
@@ -50,6 +50,7 @@ libs-y += $(LIBGCC)
50core-y += arch/microblaze/kernel/ 50core-y += arch/microblaze/kernel/
51core-y += arch/microblaze/mm/ 51core-y += arch/microblaze/mm/
52core-y += arch/microblaze/platform/ 52core-y += arch/microblaze/platform/
53core-$(CONFIG_PCI) += arch/microblaze/pci/
53 54
54drivers-$(CONFIG_OPROFILE) += arch/microblaze/oprofile/ 55drivers-$(CONFIG_OPROFILE) += arch/microblaze/oprofile/
55 56
diff --git a/arch/microblaze/include/asm/device.h b/arch/microblaze/include/asm/device.h
index 78a038452c0f..402b46e630f6 100644
--- a/arch/microblaze/include/asm/device.h
+++ b/arch/microblaze/include/asm/device.h
@@ -14,6 +14,10 @@ struct device_node;
14struct dev_archdata { 14struct dev_archdata {
15 /* Optional pointer to an OF device node */ 15 /* Optional pointer to an OF device node */
16 struct device_node *of_node; 16 struct device_node *of_node;
17
18 /* DMA operations on that device */
19 struct dma_map_ops *dma_ops;
20 void *dma_data;
17}; 21};
18 22
19struct pdev_archdata { 23struct pdev_archdata {
diff --git a/arch/microblaze/include/asm/dma-mapping.h b/arch/microblaze/include/asm/dma-mapping.h
index d00e40099165..18b3731c8509 100644
--- a/arch/microblaze/include/asm/dma-mapping.h
+++ b/arch/microblaze/include/asm/dma-mapping.h
@@ -1 +1,153 @@
1#include <asm-generic/dma-mapping-broken.h> 1/*
2 * Implements the generic device dma API for microblaze and the pci
3 *
4 * Copyright (C) 2009-2010 Michal Simek <monstr@monstr.eu>
5 * Copyright (C) 2009-2010 PetaLogix
6 *
7 * This file is subject to the terms and conditions of the GNU General
8 * Public License. See the file COPYING in the main directory of this
9 * archive for more details.
10 *
11 * This file is base on powerpc and x86 dma-mapping.h versions
12 * Copyright (C) 2004 IBM
13 */
14
15#ifndef _ASM_MICROBLAZE_DMA_MAPPING_H
16#define _ASM_MICROBLAZE_DMA_MAPPING_H
17
18/*
19 * See Documentation/PCI/PCI-DMA-mapping.txt and
20 * Documentation/DMA-API.txt for documentation.
21 */
22
23#include <linux/types.h>
24#include <linux/cache.h>
25#include <linux/mm.h>
26#include <linux/scatterlist.h>
27#include <linux/dma-debug.h>
28#include <linux/dma-attrs.h>
29#include <asm/io.h>
30#include <asm-generic/dma-coherent.h>
31
32#define DMA_ERROR_CODE (~(dma_addr_t)0x0)
33
34#define __dma_alloc_coherent(dev, gfp, size, handle) NULL
35#define __dma_free_coherent(size, addr) ((void)0)
36#define __dma_sync(addr, size, rw) ((void)0)
37
38static inline unsigned long device_to_mask(struct device *dev)
39{
40 if (dev->dma_mask && *dev->dma_mask)
41 return *dev->dma_mask;
42 /* Assume devices without mask can take 32 bit addresses */
43 return 0xfffffffful;
44}
45
46extern struct dma_map_ops *dma_ops;
47
48/*
49 * Available generic sets of operations
50 */
51extern struct dma_map_ops dma_direct_ops;
52
53static inline struct dma_map_ops *get_dma_ops(struct device *dev)
54{
55 /* We don't handle the NULL dev case for ISA for now. We could
56 * do it via an out of line call but it is not needed for now. The
57 * only ISA DMA device we support is the floppy and we have a hack
58 * in the floppy driver directly to get a device for us.
59 */
60 if (unlikely(!dev) || !dev->archdata.dma_ops)
61 return NULL;
62
63 return dev->archdata.dma_ops;
64}
65
66static inline void set_dma_ops(struct device *dev, struct dma_map_ops *ops)
67{
68 dev->archdata.dma_ops = ops;
69}
70
71static inline int dma_supported(struct device *dev, u64 mask)
72{
73 struct dma_map_ops *ops = get_dma_ops(dev);
74
75 if (unlikely(!ops))
76 return 0;
77 if (!ops->dma_supported)
78 return 1;
79 return ops->dma_supported(dev, mask);
80}
81
82#ifdef CONFIG_PCI
83/* We have our own implementation of pci_set_dma_mask() */
84#define HAVE_ARCH_PCI_SET_DMA_MASK
85
86#endif
87
88static inline int dma_set_mask(struct device *dev, u64 dma_mask)
89{
90 struct dma_map_ops *ops = get_dma_ops(dev);
91
92 if (unlikely(ops == NULL))
93 return -EIO;
94 if (ops->set_dma_mask)
95 return ops->set_dma_mask(dev, dma_mask);
96 if (!dev->dma_mask || !dma_supported(dev, dma_mask))
97 return -EIO;
98 *dev->dma_mask = dma_mask;
99 return 0;
100}
101
102#include <asm-generic/dma-mapping-common.h>
103
104static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
105{
106 struct dma_map_ops *ops = get_dma_ops(dev);
107 if (ops->mapping_error)
108 return ops->mapping_error(dev, dma_addr);
109
110 return (dma_addr == DMA_ERROR_CODE);
111}
112
113#define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f)
114#define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h)
115#define dma_is_consistent(d, h) (1)
116
117static inline void *dma_alloc_coherent(struct device *dev, size_t size,
118 dma_addr_t *dma_handle, gfp_t flag)
119{
120 struct dma_map_ops *ops = get_dma_ops(dev);
121 void *memory;
122
123 BUG_ON(!ops);
124
125 memory = ops->alloc_coherent(dev, size, dma_handle, flag);
126
127 debug_dma_alloc_coherent(dev, size, *dma_handle, memory);
128 return memory;
129}
130
131static inline void dma_free_coherent(struct device *dev, size_t size,
132 void *cpu_addr, dma_addr_t dma_handle)
133{
134 struct dma_map_ops *ops = get_dma_ops(dev);
135
136 BUG_ON(!ops);
137 debug_dma_free_coherent(dev, size, cpu_addr, dma_handle);
138 ops->free_coherent(dev, size, cpu_addr, dma_handle);
139}
140
141static inline int dma_get_cache_alignment(void)
142{
143 return L1_CACHE_BYTES;
144}
145
146static inline void dma_cache_sync(struct device *dev, void *vaddr, size_t size,
147 enum dma_data_direction direction)
148{
149 BUG_ON(direction == DMA_NONE);
150 __dma_sync(vaddr, size, (int)direction);
151}
152
153#endif /* _ASM_MICROBLAZE_DMA_MAPPING_H */
diff --git a/arch/microblaze/include/asm/io.h b/arch/microblaze/include/asm/io.h
index 267c7c779e53..32d621a56aee 100644
--- a/arch/microblaze/include/asm/io.h
+++ b/arch/microblaze/include/asm/io.h
@@ -15,7 +15,23 @@
15#include <asm/page.h> 15#include <asm/page.h>
16#include <linux/types.h> 16#include <linux/types.h>
17#include <linux/mm.h> /* Get struct page {...} */ 17#include <linux/mm.h> /* Get struct page {...} */
18#include <asm-generic/iomap.h>
18 19
20#ifndef CONFIG_PCI
21#define _IO_BASE 0
22#define _ISA_MEM_BASE 0
23#define PCI_DRAM_OFFSET 0
24#else
25#define _IO_BASE isa_io_base
26#define _ISA_MEM_BASE isa_mem_base
27#define PCI_DRAM_OFFSET pci_dram_offset
28#endif
29
30extern unsigned long isa_io_base;
31extern unsigned long pci_io_base;
32extern unsigned long pci_dram_offset;
33
34extern resource_size_t isa_mem_base;
19 35
20#define IO_SPACE_LIMIT (0xFFFFFFFF) 36#define IO_SPACE_LIMIT (0xFFFFFFFF)
21 37
@@ -124,9 +140,6 @@ static inline void writel(unsigned int v, volatile void __iomem *addr)
124#define virt_to_phys(addr) ((unsigned long)__virt_to_phys(addr)) 140#define virt_to_phys(addr) ((unsigned long)__virt_to_phys(addr))
125#define virt_to_bus(addr) ((unsigned long)__virt_to_phys(addr)) 141#define virt_to_bus(addr) ((unsigned long)__virt_to_phys(addr))
126 142
127#define __page_address(page) \
128 (PAGE_OFFSET + (((page) - mem_map) << PAGE_SHIFT))
129#define page_to_phys(page) virt_to_phys((void *)__page_address(page))
130#define page_to_bus(page) (page_to_phys(page)) 143#define page_to_bus(page) (page_to_phys(page))
131#define bus_to_virt(addr) (phys_to_virt(addr)) 144#define bus_to_virt(addr) (phys_to_virt(addr))
132 145
@@ -227,15 +240,7 @@ static inline void __iomem *__ioremap(phys_addr_t address, unsigned long size,
227#define out_8(a, v) __raw_writeb((v), (a)) 240#define out_8(a, v) __raw_writeb((v), (a))
228#define in_8(a) __raw_readb(a) 241#define in_8(a) __raw_readb(a)
229 242
230/* FIXME */ 243#define ioport_map(port, nr) ((void __iomem *)(port))
231static inline void __iomem *ioport_map(unsigned long port, unsigned int len) 244#define ioport_unmap(addr)
232{
233 return (void __iomem *) (port);
234}
235
236static inline void ioport_unmap(void __iomem *addr)
237{
238 /* Nothing to do */
239}
240 245
241#endif /* _ASM_MICROBLAZE_IO_H */ 246#endif /* _ASM_MICROBLAZE_IO_H */
diff --git a/arch/microblaze/include/asm/irq.h b/arch/microblaze/include/asm/irq.h
index 90f050535ebe..31a35c33df63 100644
--- a/arch/microblaze/include/asm/irq.h
+++ b/arch/microblaze/include/asm/irq.h
@@ -14,6 +14,12 @@
14 14
15#include <linux/interrupt.h> 15#include <linux/interrupt.h>
16 16
17/* This type is the placeholder for a hardware interrupt number. It has to
18 * be big enough to enclose whatever representation is used by a given
19 * platform.
20 */
21typedef unsigned long irq_hw_number_t;
22
17extern unsigned int nr_irq; 23extern unsigned int nr_irq;
18 24
19#define NO_IRQ (-1) 25#define NO_IRQ (-1)
@@ -21,7 +27,8 @@ extern unsigned int nr_irq;
21struct pt_regs; 27struct pt_regs;
22extern void do_IRQ(struct pt_regs *regs); 28extern void do_IRQ(struct pt_regs *regs);
23 29
24/* irq_of_parse_and_map - Parse and Map an interrupt into linux virq space 30/**
31 * irq_of_parse_and_map - Parse and Map an interrupt into linux virq space
25 * @device: Device node of the device whose interrupt is to be mapped 32 * @device: Device node of the device whose interrupt is to be mapped
26 * @index: Index of the interrupt to map 33 * @index: Index of the interrupt to map
27 * 34 *
@@ -40,4 +47,32 @@ static inline void irq_dispose_mapping(unsigned int virq)
40 return; 47 return;
41} 48}
42 49
50struct irq_host;
51
52/**
53 * irq_create_mapping - Map a hardware interrupt into linux virq space
54 * @host: host owning this hardware interrupt or NULL for default host
55 * @hwirq: hardware irq number in that host space
56 *
57 * Only one mapping per hardware interrupt is permitted. Returns a linux
58 * virq number.
59 * If the sense/trigger is to be specified, set_irq_type() should be called
60 * on the number returned from that call.
61 */
62extern unsigned int irq_create_mapping(struct irq_host *host,
63 irq_hw_number_t hwirq);
64
65/**
66 * irq_create_of_mapping - Map a hardware interrupt into linux virq space
67 * @controller: Device node of the interrupt controller
68 * @inspec: Interrupt specifier from the device-tree
69 * @intsize: Size of the interrupt specifier from the device-tree
70 *
71 * This function is identical to irq_create_mapping except that it takes
72 * as input informations straight from the device-tree (typically the results
73 * of the of_irq_map_*() functions.
74 */
75extern unsigned int irq_create_of_mapping(struct device_node *controller,
76 u32 *intspec, unsigned int intsize);
77
43#endif /* _ASM_MICROBLAZE_IRQ_H */ 78#endif /* _ASM_MICROBLAZE_IRQ_H */
diff --git a/arch/microblaze/include/asm/page.h b/arch/microblaze/include/asm/page.h
index 9b66c0fa9a32..2dd1d04129e0 100644
--- a/arch/microblaze/include/asm/page.h
+++ b/arch/microblaze/include/asm/page.h
@@ -62,12 +62,6 @@ extern unsigned int __page_offset;
62#define PAGE_OFFSET CONFIG_KERNEL_START 62#define PAGE_OFFSET CONFIG_KERNEL_START
63 63
64/* 64/*
65 * MAP_NR -- given an address, calculate the index of the page struct which
66 * points to the address's page.
67 */
68#define MAP_NR(addr) (((unsigned long)(addr) - PAGE_OFFSET) >> PAGE_SHIFT)
69
70/*
71 * The basic type of a PTE - 32 bit physical addressing. 65 * The basic type of a PTE - 32 bit physical addressing.
72 */ 66 */
73typedef unsigned long pte_basic_t; 67typedef unsigned long pte_basic_t;
@@ -154,7 +148,11 @@ extern int page_is_ram(unsigned long pfn);
154# define pfn_to_virt(pfn) __va(pfn_to_phys((pfn))) 148# define pfn_to_virt(pfn) __va(pfn_to_phys((pfn)))
155 149
156# ifdef CONFIG_MMU 150# ifdef CONFIG_MMU
157# define virt_to_page(kaddr) (mem_map + MAP_NR(kaddr)) 151
152# define virt_to_page(kaddr) (pfn_to_page(__pa(kaddr) >> PAGE_SHIFT))
153# define page_to_virt(page) __va(page_to_pfn(page) << PAGE_SHIFT)
154# define page_to_phys(page) (page_to_pfn(page) << PAGE_SHIFT)
155
158# else /* CONFIG_MMU */ 156# else /* CONFIG_MMU */
159# define virt_to_page(vaddr) (pfn_to_page(virt_to_pfn(vaddr))) 157# define virt_to_page(vaddr) (pfn_to_page(virt_to_pfn(vaddr)))
160# define page_to_virt(page) (pfn_to_virt(page_to_pfn(page))) 158# define page_to_virt(page) (pfn_to_virt(page_to_pfn(page)))
diff --git a/arch/microblaze/include/asm/pci-bridge.h b/arch/microblaze/include/asm/pci-bridge.h
index 7ad28f6f5f1a..0c77cda9f5d8 100644
--- a/arch/microblaze/include/asm/pci-bridge.h
+++ b/arch/microblaze/include/asm/pci-bridge.h
@@ -1 +1,196 @@
1#ifndef _ASM_MICROBLAZE_PCI_BRIDGE_H
2#define _ASM_MICROBLAZE_PCI_BRIDGE_H
3#ifdef __KERNEL__
4/*
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License
7 * as published by the Free Software Foundation; either version
8 * 2 of the License, or (at your option) any later version.
9 */
1#include <linux/pci.h> 10#include <linux/pci.h>
11#include <linux/list.h>
12#include <linux/ioport.h>
13
14struct device_node;
15
16enum {
17 /* Force re-assigning all resources (ignore firmware
18 * setup completely)
19 */
20 PCI_REASSIGN_ALL_RSRC = 0x00000001,
21
22 /* Re-assign all bus numbers */
23 PCI_REASSIGN_ALL_BUS = 0x00000002,
24
25 /* Do not try to assign, just use existing setup */
26 PCI_PROBE_ONLY = 0x00000004,
27
28 /* Don't bother with ISA alignment unless the bridge has
29 * ISA forwarding enabled
30 */
31 PCI_CAN_SKIP_ISA_ALIGN = 0x00000008,
32
33 /* Enable domain numbers in /proc */
34 PCI_ENABLE_PROC_DOMAINS = 0x00000010,
35 /* ... except for domain 0 */
36 PCI_COMPAT_DOMAIN_0 = 0x00000020,
37};
38
39/*
40 * Structure of a PCI controller (host bridge)
41 */
42struct pci_controller {
43 struct pci_bus *bus;
44 char is_dynamic;
45 struct device_node *dn;
46 struct list_head list_node;
47 struct device *parent;
48
49 int first_busno;
50 int last_busno;
51
52 int self_busno;
53
54 void __iomem *io_base_virt;
55 resource_size_t io_base_phys;
56
57 resource_size_t pci_io_size;
58
59 /* Some machines (PReP) have a non 1:1 mapping of
60 * the PCI memory space in the CPU bus space
61 */
62 resource_size_t pci_mem_offset;
63
64 /* Some machines have a special region to forward the ISA
65 * "memory" cycles such as VGA memory regions. Left to 0
66 * if unsupported
67 */
68 resource_size_t isa_mem_phys;
69 resource_size_t isa_mem_size;
70
71 struct pci_ops *ops;
72 unsigned int __iomem *cfg_addr;
73 void __iomem *cfg_data;
74
75 /*
76 * Used for variants of PCI indirect handling and possible quirks:
77 * SET_CFG_TYPE - used on 4xx or any PHB that does explicit type0/1
78 * EXT_REG - provides access to PCI-e extended registers
79 * SURPRESS_PRIMARY_BUS - we surpress the setting of PCI_PRIMARY_BUS
80 * on Freescale PCI-e controllers since they used the PCI_PRIMARY_BUS
81 * to determine which bus number to match on when generating type0
82 * config cycles
83 * NO_PCIE_LINK - the Freescale PCI-e controllers have issues with
84 * hanging if we don't have link and try to do config cycles to
85 * anything but the PHB. Only allow talking to the PHB if this is
86 * set.
87 * BIG_ENDIAN - cfg_addr is a big endian register
88 * BROKEN_MRM - the 440EPx/GRx chips have an errata that causes hangs
89 * on the PLB4. Effectively disable MRM commands by setting this.
90 */
91#define INDIRECT_TYPE_SET_CFG_TYPE 0x00000001
92#define INDIRECT_TYPE_EXT_REG 0x00000002
93#define INDIRECT_TYPE_SURPRESS_PRIMARY_BUS 0x00000004
94#define INDIRECT_TYPE_NO_PCIE_LINK 0x00000008
95#define INDIRECT_TYPE_BIG_ENDIAN 0x00000010
96#define INDIRECT_TYPE_BROKEN_MRM 0x00000020
97 u32 indirect_type;
98
99 /* Currently, we limit ourselves to 1 IO range and 3 mem
100 * ranges since the common pci_bus structure can't handle more
101 */
102 struct resource io_resource;
103 struct resource mem_resources[3];
104 int global_number; /* PCI domain number */
105};
106
107static inline struct pci_controller *pci_bus_to_host(const struct pci_bus *bus)
108{
109 return bus->sysdata;
110}
111
112static inline int isa_vaddr_is_ioport(void __iomem *address)
113{
114 /* No specific ISA handling on ppc32 at this stage, it
115 * all goes through PCI
116 */
117 return 0;
118}
119
120/* These are used for config access before all the PCI probing
121 has been done. */
122extern int early_read_config_byte(struct pci_controller *hose, int bus,
123 int dev_fn, int where, u8 *val);
124extern int early_read_config_word(struct pci_controller *hose, int bus,
125 int dev_fn, int where, u16 *val);
126extern int early_read_config_dword(struct pci_controller *hose, int bus,
127 int dev_fn, int where, u32 *val);
128extern int early_write_config_byte(struct pci_controller *hose, int bus,
129 int dev_fn, int where, u8 val);
130extern int early_write_config_word(struct pci_controller *hose, int bus,
131 int dev_fn, int where, u16 val);
132extern int early_write_config_dword(struct pci_controller *hose, int bus,
133 int dev_fn, int where, u32 val);
134
135extern int early_find_capability(struct pci_controller *hose, int bus,
136 int dev_fn, int cap);
137
138extern void setup_indirect_pci(struct pci_controller *hose,
139 resource_size_t cfg_addr,
140 resource_size_t cfg_data, u32 flags);
141
142/* Get the PCI host controller for an OF device */
143extern struct pci_controller *pci_find_hose_for_OF_device(
144 struct device_node *node);
145
146/* Fill up host controller resources from the OF node */
147extern void pci_process_bridge_OF_ranges(struct pci_controller *hose,
148 struct device_node *dev, int primary);
149
150/* Allocate & free a PCI host bridge structure */
151extern struct pci_controller *pcibios_alloc_controller(struct device_node *dev);
152extern void pcibios_free_controller(struct pci_controller *phb);
153extern void pcibios_setup_phb_resources(struct pci_controller *hose);
154
155#ifdef CONFIG_PCI
156extern unsigned int pci_flags;
157
158static inline void pci_set_flags(int flags)
159{
160 pci_flags = flags;
161}
162
163static inline void pci_add_flags(int flags)
164{
165 pci_flags |= flags;
166}
167
168static inline int pci_has_flag(int flag)
169{
170 return pci_flags & flag;
171}
172
173extern struct list_head hose_list;
174
175extern unsigned long pci_address_to_pio(phys_addr_t address);
176extern int pcibios_vaddr_is_ioport(void __iomem *address);
177#else
178static inline unsigned long pci_address_to_pio(phys_addr_t address)
179{
180 return (unsigned long)-1;
181}
182static inline int pcibios_vaddr_is_ioport(void __iomem *address)
183{
184 return 0;
185}
186
187static inline void pci_set_flags(int flags) { }
188static inline void pci_add_flags(int flags) { }
189static inline int pci_has_flag(int flag)
190{
191 return 0;
192}
193#endif /* CONFIG_PCI */
194
195#endif /* __KERNEL__ */
196#endif /* _ASM_MICROBLAZE_PCI_BRIDGE_H */
diff --git a/arch/microblaze/include/asm/pci.h b/arch/microblaze/include/asm/pci.h
index 9f0df5faf2c8..bdd65aaee30d 100644
--- a/arch/microblaze/include/asm/pci.h
+++ b/arch/microblaze/include/asm/pci.h
@@ -1 +1,177 @@
1#include <asm-generic/pci.h> 1/*
2 * This program is free software; you can redistribute it and/or
3 * modify it under the terms of the GNU General Public License
4 * as published by the Free Software Foundation; either version
5 * 2 of the License, or (at your option) any later version.
6 *
7 * Based on powerpc version
8 */
9
10#ifndef __ASM_MICROBLAZE_PCI_H
11#define __ASM_MICROBLAZE_PCI_H
12#ifdef __KERNEL__
13
14#include <linux/types.h>
15#include <linux/slab.h>
16#include <linux/string.h>
17#include <linux/dma-mapping.h>
18#include <linux/pci.h>
19
20#include <asm/scatterlist.h>
21#include <asm/io.h>
22#include <asm/prom.h>
23#include <asm/pci-bridge.h>
24
25#define PCIBIOS_MIN_IO 0x1000
26#define PCIBIOS_MIN_MEM 0x10000000
27
28struct pci_dev;
29
30/* Values for the `which' argument to sys_pciconfig_iobase syscall. */
31#define IOBASE_BRIDGE_NUMBER 0
32#define IOBASE_MEMORY 1
33#define IOBASE_IO 2
34#define IOBASE_ISA_IO 3
35#define IOBASE_ISA_MEM 4
36
37#define pcibios_scan_all_fns(a, b) 0
38
39/*
40 * Set this to 1 if you want the kernel to re-assign all PCI
41 * bus numbers (don't do that on ppc64 yet !)
42 */
43#define pcibios_assign_all_busses() \
44 (pci_has_flag(PCI_REASSIGN_ALL_BUS))
45
46static inline void pcibios_set_master(struct pci_dev *dev)
47{
48 /* No special bus mastering setup handling */
49}
50
51static inline void pcibios_penalize_isa_irq(int irq, int active)
52{
53 /* We don't do dynamic PCI IRQ allocation */
54}
55
56#ifdef CONFIG_PCI
57extern void set_pci_dma_ops(struct dma_map_ops *dma_ops);
58extern struct dma_map_ops *get_pci_dma_ops(void);
59#else /* CONFIG_PCI */
60#define set_pci_dma_ops(d)
61#define get_pci_dma_ops() NULL
62#endif
63
64#ifdef CONFIG_PCI
65static inline void pci_dma_burst_advice(struct pci_dev *pdev,
66 enum pci_dma_burst_strategy *strat,
67 unsigned long *strategy_parameter)
68{
69 *strat = PCI_DMA_BURST_INFINITY;
70 *strategy_parameter = ~0UL;
71}
72#endif
73
74extern int pci_domain_nr(struct pci_bus *bus);
75
76/* Decide whether to display the domain number in /proc */
77extern int pci_proc_domain(struct pci_bus *bus);
78
79struct vm_area_struct;
80/* Map a range of PCI memory or I/O space for a device into user space */
81int pci_mmap_page_range(struct pci_dev *pdev, struct vm_area_struct *vma,
82 enum pci_mmap_state mmap_state, int write_combine);
83
84/* Tell drivers/pci/proc.c that we have pci_mmap_page_range() */
85#define HAVE_PCI_MMAP 1
86
87extern int pci_legacy_read(struct pci_bus *bus, loff_t port, u32 *val,
88 size_t count);
89extern int pci_legacy_write(struct pci_bus *bus, loff_t port, u32 val,
90 size_t count);
91extern int pci_mmap_legacy_page_range(struct pci_bus *bus,
92 struct vm_area_struct *vma,
93 enum pci_mmap_state mmap_state);
94
95#define HAVE_PCI_LEGACY 1
96
97/* pci_unmap_{page,single} is a nop so... */
98#define DECLARE_PCI_UNMAP_ADDR(ADDR_NAME)
99#define DECLARE_PCI_UNMAP_LEN(LEN_NAME)
100#define pci_unmap_addr(PTR, ADDR_NAME) (0)
101#define pci_unmap_addr_set(PTR, ADDR_NAME, VAL) do { } while (0)
102#define pci_unmap_len(PTR, LEN_NAME) (0)
103#define pci_unmap_len_set(PTR, LEN_NAME, VAL) do { } while (0)
104
105/* The PCI address space does equal the physical memory
106 * address space (no IOMMU). The IDE and SCSI device layers use
107 * this boolean for bounce buffer decisions.
108 */
109#define PCI_DMA_BUS_IS_PHYS (1)
110
111extern void pcibios_resource_to_bus(struct pci_dev *dev,
112 struct pci_bus_region *region,
113 struct resource *res);
114
115extern void pcibios_bus_to_resource(struct pci_dev *dev,
116 struct resource *res,
117 struct pci_bus_region *region);
118
119static inline struct resource *pcibios_select_root(struct pci_dev *pdev,
120 struct resource *res)
121{
122 struct resource *root = NULL;
123
124 if (res->flags & IORESOURCE_IO)
125 root = &ioport_resource;
126 if (res->flags & IORESOURCE_MEM)
127 root = &iomem_resource;
128
129 return root;
130}
131
132extern void pcibios_claim_one_bus(struct pci_bus *b);
133
134extern void pcibios_finish_adding_to_bus(struct pci_bus *bus);
135
136extern void pcibios_resource_survey(void);
137
138extern struct pci_controller *init_phb_dynamic(struct device_node *dn);
139extern int remove_phb_dynamic(struct pci_controller *phb);
140
141extern struct pci_dev *of_create_pci_dev(struct device_node *node,
142 struct pci_bus *bus, int devfn);
143
144extern void of_scan_pci_bridge(struct device_node *node,
145 struct pci_dev *dev);
146
147extern void of_scan_bus(struct device_node *node, struct pci_bus *bus);
148extern void of_rescan_bus(struct device_node *node, struct pci_bus *bus);
149
150extern int pci_read_irq_line(struct pci_dev *dev);
151
152extern int pci_bus_find_capability(struct pci_bus *bus,
153 unsigned int devfn, int cap);
154
155struct file;
156extern pgprot_t pci_phys_mem_access_prot(struct file *file,
157 unsigned long pfn,
158 unsigned long size,
159 pgprot_t prot);
160
161#define HAVE_ARCH_PCI_RESOURCE_TO_USER
162extern void pci_resource_to_user(const struct pci_dev *dev, int bar,
163 const struct resource *rsrc,
164 resource_size_t *start, resource_size_t *end);
165
166extern void pcibios_setup_bus_devices(struct pci_bus *bus);
167extern void pcibios_setup_bus_self(struct pci_bus *bus);
168
169/* This part of code was originaly in xilinx-pci.h */
170#ifdef CONFIG_PCI_XILINX
171extern void __init xilinx_pci_init(void);
172#else
173static inline void __init xilinx_pci_init(void) { return; }
174#endif
175
176#endif /* __KERNEL__ */
177#endif /* __ASM_MICROBLAZE_PCI_H */
diff --git a/arch/microblaze/include/asm/pgalloc.h b/arch/microblaze/include/asm/pgalloc.h
index 7547f5064560..f44b0d696fe2 100644
--- a/arch/microblaze/include/asm/pgalloc.h
+++ b/arch/microblaze/include/asm/pgalloc.h
@@ -19,6 +19,7 @@
19#include <asm/io.h> 19#include <asm/io.h>
20#include <asm/page.h> 20#include <asm/page.h>
21#include <asm/cache.h> 21#include <asm/cache.h>
22#include <asm/pgtable.h>
22 23
23#define PGDIR_ORDER 0 24#define PGDIR_ORDER 0
24 25
@@ -111,7 +112,6 @@ static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm,
111 unsigned long address) 112 unsigned long address)
112{ 113{
113 pte_t *pte; 114 pte_t *pte;
114 extern int mem_init_done;
115 extern void *early_get_page(void); 115 extern void *early_get_page(void);
116 if (mem_init_done) { 116 if (mem_init_done) {
117 pte = (pte_t *)__get_free_page(GFP_KERNEL | 117 pte = (pte_t *)__get_free_page(GFP_KERNEL |
diff --git a/arch/microblaze/include/asm/pgtable.h b/arch/microblaze/include/asm/pgtable.h
index cc3a4dfc3eaa..dd2bb60651c7 100644
--- a/arch/microblaze/include/asm/pgtable.h
+++ b/arch/microblaze/include/asm/pgtable.h
@@ -16,6 +16,10 @@
16#define io_remap_pfn_range(vma, vaddr, pfn, size, prot) \ 16#define io_remap_pfn_range(vma, vaddr, pfn, size, prot) \
17 remap_pfn_range(vma, vaddr, pfn, size, prot) 17 remap_pfn_range(vma, vaddr, pfn, size, prot)
18 18
19#ifndef __ASSEMBLY__
20extern int mem_init_done;
21#endif
22
19#ifndef CONFIG_MMU 23#ifndef CONFIG_MMU
20 24
21#define pgd_present(pgd) (1) /* pages are always present on non MMU */ 25#define pgd_present(pgd) (1) /* pages are always present on non MMU */
@@ -51,6 +55,8 @@ static inline int pte_file(pte_t pte) { return 0; }
51 55
52#define arch_enter_lazy_cpu_mode() do {} while (0) 56#define arch_enter_lazy_cpu_mode() do {} while (0)
53 57
58#define pgprot_noncached_wc(prot) prot
59
54#else /* CONFIG_MMU */ 60#else /* CONFIG_MMU */
55 61
56#include <asm-generic/4level-fixup.h> 62#include <asm-generic/4level-fixup.h>
@@ -68,7 +74,6 @@ static inline int pte_file(pte_t pte) { return 0; }
68 74
69extern unsigned long va_to_phys(unsigned long address); 75extern unsigned long va_to_phys(unsigned long address);
70extern pte_t *va_to_pte(unsigned long address); 76extern pte_t *va_to_pte(unsigned long address);
71extern unsigned long ioremap_bot, ioremap_base;
72 77
73/* 78/*
74 * The following only work if pte_present() is true. 79 * The following only work if pte_present() is true.
@@ -85,11 +90,25 @@ static inline pte_t pte_mkspecial(pte_t pte) { return pte; }
85#define VMALLOC_START (CONFIG_KERNEL_START + \ 90#define VMALLOC_START (CONFIG_KERNEL_START + \
86 max(32 * 1024 * 1024UL, memory_size)) 91 max(32 * 1024 * 1024UL, memory_size))
87#define VMALLOC_END ioremap_bot 92#define VMALLOC_END ioremap_bot
88#define VMALLOC_VMADDR(x) ((unsigned long)(x))
89 93
90#endif /* __ASSEMBLY__ */ 94#endif /* __ASSEMBLY__ */
91 95
92/* 96/*
97 * Macro to mark a page protection value as "uncacheable".
98 */
99
100#define _PAGE_CACHE_CTL (_PAGE_GUARDED | _PAGE_NO_CACHE | \
101 _PAGE_WRITETHRU)
102
103#define pgprot_noncached(prot) \
104 (__pgprot((pgprot_val(prot) & ~_PAGE_CACHE_CTL) | \
105 _PAGE_NO_CACHE | _PAGE_GUARDED))
106
107#define pgprot_noncached_wc(prot) \
108 (__pgprot((pgprot_val(prot) & ~_PAGE_CACHE_CTL) | \
109 _PAGE_NO_CACHE))
110
111/*
93 * The MicroBlaze MMU is identical to the PPC-40x MMU, and uses a hash 112 * The MicroBlaze MMU is identical to the PPC-40x MMU, and uses a hash
94 * table containing PTEs, together with a set of 16 segment registers, to 113 * table containing PTEs, together with a set of 16 segment registers, to
95 * define the virtual to physical address mapping. 114 * define the virtual to physical address mapping.
@@ -397,7 +416,7 @@ static inline unsigned long pte_update(pte_t *p, unsigned long clr,
397 mts rmsr, %2\n\ 416 mts rmsr, %2\n\
398 nop" 417 nop"
399 : "=&r" (old), "=&r" (tmp), "=&r" (msr), "=m" (*p) 418 : "=&r" (old), "=&r" (tmp), "=&r" (msr), "=m" (*p)
400 : "r" ((unsigned long)(p+1) - 4), "r" (clr), "r" (set), "m" (*p) 419 : "r" ((unsigned long)(p + 1) - 4), "r" (clr), "r" (set), "m" (*p)
401 : "cc"); 420 : "cc");
402 421
403 return old; 422 return old;
@@ -566,18 +585,11 @@ void mapin_ram(void);
566int map_page(unsigned long va, phys_addr_t pa, int flags); 585int map_page(unsigned long va, phys_addr_t pa, int flags);
567 586
568extern int mem_init_done; 587extern int mem_init_done;
569extern unsigned long ioremap_base;
570extern unsigned long ioremap_bot;
571 588
572asmlinkage void __init mmu_init(void); 589asmlinkage void __init mmu_init(void);
573 590
574void __init *early_get_page(void); 591void __init *early_get_page(void);
575 592
576void *consistent_alloc(int gfp, size_t size, dma_addr_t *dma_handle);
577void consistent_free(void *vaddr);
578void consistent_sync(void *vaddr, size_t size, int direction);
579void consistent_sync_page(struct page *page, unsigned long offset,
580 size_t size, int direction);
581#endif /* __ASSEMBLY__ */ 593#endif /* __ASSEMBLY__ */
582#endif /* __KERNEL__ */ 594#endif /* __KERNEL__ */
583 595
@@ -586,6 +598,14 @@ void consistent_sync_page(struct page *page, unsigned long offset,
586#ifndef __ASSEMBLY__ 598#ifndef __ASSEMBLY__
587#include <asm-generic/pgtable.h> 599#include <asm-generic/pgtable.h>
588 600
601extern unsigned long ioremap_bot, ioremap_base;
602
603void *consistent_alloc(int gfp, size_t size, dma_addr_t *dma_handle);
604void consistent_free(void *vaddr);
605void consistent_sync(void *vaddr, size_t size, int direction);
606void consistent_sync_page(struct page *page, unsigned long offset,
607 size_t size, int direction);
608
589void setup_memory(void); 609void setup_memory(void);
590#endif /* __ASSEMBLY__ */ 610#endif /* __ASSEMBLY__ */
591 611
diff --git a/arch/microblaze/include/asm/prom.h b/arch/microblaze/include/asm/prom.h
index 03f45a963204..e7d67a329bd7 100644
--- a/arch/microblaze/include/asm/prom.h
+++ b/arch/microblaze/include/asm/prom.h
@@ -31,6 +31,21 @@
31/* Other Prototypes */ 31/* Other Prototypes */
32extern int early_uartlite_console(void); 32extern int early_uartlite_console(void);
33 33
34#ifdef CONFIG_PCI
35/*
36 * PCI <-> OF matching functions
37 * (XXX should these be here?)
38 */
39struct pci_bus;
40struct pci_dev;
41extern int pci_device_from_OF_node(struct device_node *node,
42 u8 *bus, u8 *devfn);
43extern struct device_node *pci_busdev_to_OF_node(struct pci_bus *bus,
44 int devfn);
45extern struct device_node *pci_device_to_OF_node(struct pci_dev *dev);
46extern void pci_create_OF_bus_map(void);
47#endif
48
34/* 49/*
35 * OF address retreival & translation 50 * OF address retreival & translation
36 */ 51 */
diff --git a/arch/microblaze/include/asm/system.h b/arch/microblaze/include/asm/system.h
index 88fc92cdd8ce..48c4f0335e3f 100644
--- a/arch/microblaze/include/asm/system.h
+++ b/arch/microblaze/include/asm/system.h
@@ -88,6 +88,9 @@ void free_initmem(void);
88extern char *klimit; 88extern char *klimit;
89extern void ret_from_fork(void); 89extern void ret_from_fork(void);
90 90
91extern void *alloc_maybe_bootmem(size_t size, gfp_t mask);
92extern void *zalloc_maybe_bootmem(size_t size, gfp_t mask);
93
91#ifdef CONFIG_DEBUG_FS 94#ifdef CONFIG_DEBUG_FS
92extern struct dentry *of_debugfs_root; 95extern struct dentry *of_debugfs_root;
93#endif 96#endif
diff --git a/arch/microblaze/include/asm/tlbflush.h b/arch/microblaze/include/asm/tlbflush.h
index 10ec70cd8735..bcb8b41d55af 100644
--- a/arch/microblaze/include/asm/tlbflush.h
+++ b/arch/microblaze/include/asm/tlbflush.h
@@ -23,7 +23,7 @@
23extern void _tlbie(unsigned long address); 23extern void _tlbie(unsigned long address);
24extern void _tlbia(void); 24extern void _tlbia(void);
25 25
26#define __tlbia() _tlbia() 26#define __tlbia() { preempt_disable(); _tlbia(); preempt_enable(); }
27 27
28static inline void local_flush_tlb_all(void) 28static inline void local_flush_tlb_all(void)
29 { __tlbia(); } 29 { __tlbia(); }
diff --git a/arch/microblaze/kernel/Makefile b/arch/microblaze/kernel/Makefile
index b07594eccf9b..e51bc1520825 100644
--- a/arch/microblaze/kernel/Makefile
+++ b/arch/microblaze/kernel/Makefile
@@ -14,7 +14,7 @@ endif
14 14
15extra-y := head.o vmlinux.lds 15extra-y := head.o vmlinux.lds
16 16
17obj-y += exceptions.o \ 17obj-y += dma.o exceptions.o \
18 hw_exception_handler.o init_task.o intc.o irq.o of_device.o \ 18 hw_exception_handler.o init_task.o intc.o irq.o of_device.o \
19 of_platform.o process.o prom.o prom_parse.o ptrace.o \ 19 of_platform.o process.o prom.o prom_parse.o ptrace.o \
20 setup.o signal.o sys_microblaze.o timer.o traps.o reset.o 20 setup.o signal.o sys_microblaze.o timer.o traps.o reset.o
diff --git a/arch/microblaze/kernel/asm-offsets.c b/arch/microblaze/kernel/asm-offsets.c
index 7bc7b68f97db..0071260a672c 100644
--- a/arch/microblaze/kernel/asm-offsets.c
+++ b/arch/microblaze/kernel/asm-offsets.c
@@ -90,6 +90,7 @@ int main(int argc, char *argv[])
90 DEFINE(TI_FLAGS, offsetof(struct thread_info, flags)); 90 DEFINE(TI_FLAGS, offsetof(struct thread_info, flags));
91 DEFINE(TI_ADDR_LIMIT, offsetof(struct thread_info, addr_limit)); 91 DEFINE(TI_ADDR_LIMIT, offsetof(struct thread_info, addr_limit));
92 DEFINE(TI_CPU_CONTEXT, offsetof(struct thread_info, cpu_context)); 92 DEFINE(TI_CPU_CONTEXT, offsetof(struct thread_info, cpu_context));
93 DEFINE(TI_PREEMPT_COUNT, offsetof(struct thread_info, preempt_count));
93 BLANK(); 94 BLANK();
94 95
95 /* struct cpu_context */ 96 /* struct cpu_context */
diff --git a/arch/microblaze/kernel/cpu/cache.c b/arch/microblaze/kernel/cpu/cache.c
index 2a56bccce4e0..f04d8a86dead 100644
--- a/arch/microblaze/kernel/cpu/cache.c
+++ b/arch/microblaze/kernel/cpu/cache.c
@@ -15,25 +15,6 @@
15#include <asm/cpuinfo.h> 15#include <asm/cpuinfo.h>
16#include <asm/pvr.h> 16#include <asm/pvr.h>
17 17
18static inline void __invalidate_flush_icache(unsigned int addr)
19{
20 __asm__ __volatile__ ("wic %0, r0;" \
21 : : "r" (addr));
22}
23
24static inline void __flush_dcache(unsigned int addr)
25{
26 __asm__ __volatile__ ("wdc.flush %0, r0;" \
27 : : "r" (addr));
28}
29
30static inline void __invalidate_dcache(unsigned int baseaddr,
31 unsigned int offset)
32{
33 __asm__ __volatile__ ("wdc.clear %0, %1;" \
34 : : "r" (baseaddr), "r" (offset));
35}
36
37static inline void __enable_icache_msr(void) 18static inline void __enable_icache_msr(void)
38{ 19{
39 __asm__ __volatile__ (" msrset r0, %0; \ 20 __asm__ __volatile__ (" msrset r0, %0; \
@@ -148,9 +129,9 @@ do { \
148 int step = -line_length; \ 129 int step = -line_length; \
149 BUG_ON(step >= 0); \ 130 BUG_ON(step >= 0); \
150 \ 131 \
151 __asm__ __volatile__ (" 1: " #op " r0, %0; \ 132 __asm__ __volatile__ (" 1: " #op " r0, %0; \
152 bgtid %0, 1b; \ 133 bgtid %0, 1b; \
153 addk %0, %0, %1; \ 134 addk %0, %0, %1; \
154 " : : "r" (len), "r" (step) \ 135 " : : "r" (len), "r" (step) \
155 : "memory"); \ 136 : "memory"); \
156} while (0); 137} while (0);
@@ -162,9 +143,9 @@ do { \
162 int count = end - start; \ 143 int count = end - start; \
163 BUG_ON(count <= 0); \ 144 BUG_ON(count <= 0); \
164 \ 145 \
165 __asm__ __volatile__ (" 1: " #op " %0, %1; \ 146 __asm__ __volatile__ (" 1: " #op " %0, %1; \
166 bgtid %1, 1b; \ 147 bgtid %1, 1b; \
167 addk %1, %1, %2; \ 148 addk %1, %1, %2; \
168 " : : "r" (start), "r" (count), \ 149 " : : "r" (start), "r" (count), \
169 "r" (step) : "memory"); \ 150 "r" (step) : "memory"); \
170} while (0); 151} while (0);
@@ -175,7 +156,7 @@ do { \
175 int volatile temp; \ 156 int volatile temp; \
176 BUG_ON(end - start <= 0); \ 157 BUG_ON(end - start <= 0); \
177 \ 158 \
178 __asm__ __volatile__ (" 1: " #op " %1, r0; \ 159 __asm__ __volatile__ (" 1: " #op " %1, r0; \
179 cmpu %0, %1, %2; \ 160 cmpu %0, %1, %2; \
180 bgtid %0, 1b; \ 161 bgtid %0, 1b; \
181 addk %1, %1, %3; \ 162 addk %1, %1, %3; \
@@ -183,10 +164,14 @@ do { \
183 "r" (line_length) : "memory"); \ 164 "r" (line_length) : "memory"); \
184} while (0); 165} while (0);
185 166
167#define ASM_LOOP
168
186static void __flush_icache_range_msr_irq(unsigned long start, unsigned long end) 169static void __flush_icache_range_msr_irq(unsigned long start, unsigned long end)
187{ 170{
188 unsigned long flags; 171 unsigned long flags;
189 172#ifndef ASM_LOOP
173 int i;
174#endif
190 pr_debug("%s: start 0x%x, end 0x%x\n", __func__, 175 pr_debug("%s: start 0x%x, end 0x%x\n", __func__,
191 (unsigned int)start, (unsigned int) end); 176 (unsigned int)start, (unsigned int) end);
192 177
@@ -196,8 +181,13 @@ static void __flush_icache_range_msr_irq(unsigned long start, unsigned long end)
196 local_irq_save(flags); 181 local_irq_save(flags);
197 __disable_icache_msr(); 182 __disable_icache_msr();
198 183
184#ifdef ASM_LOOP
199 CACHE_RANGE_LOOP_1(start, end, cpuinfo.icache_line_length, wic); 185 CACHE_RANGE_LOOP_1(start, end, cpuinfo.icache_line_length, wic);
200 186#else
187 for (i = start; i < end; i += cpuinfo.icache_line_length)
188 __asm__ __volatile__ ("wic %0, r0;" \
189 : : "r" (i));
190#endif
201 __enable_icache_msr(); 191 __enable_icache_msr();
202 local_irq_restore(flags); 192 local_irq_restore(flags);
203} 193}
@@ -206,7 +196,9 @@ static void __flush_icache_range_nomsr_irq(unsigned long start,
206 unsigned long end) 196 unsigned long end)
207{ 197{
208 unsigned long flags; 198 unsigned long flags;
209 199#ifndef ASM_LOOP
200 int i;
201#endif
210 pr_debug("%s: start 0x%x, end 0x%x\n", __func__, 202 pr_debug("%s: start 0x%x, end 0x%x\n", __func__,
211 (unsigned int)start, (unsigned int) end); 203 (unsigned int)start, (unsigned int) end);
212 204
@@ -216,7 +208,13 @@ static void __flush_icache_range_nomsr_irq(unsigned long start,
216 local_irq_save(flags); 208 local_irq_save(flags);
217 __disable_icache_nomsr(); 209 __disable_icache_nomsr();
218 210
211#ifdef ASM_LOOP
219 CACHE_RANGE_LOOP_1(start, end, cpuinfo.icache_line_length, wic); 212 CACHE_RANGE_LOOP_1(start, end, cpuinfo.icache_line_length, wic);
213#else
214 for (i = start; i < end; i += cpuinfo.icache_line_length)
215 __asm__ __volatile__ ("wic %0, r0;" \
216 : : "r" (i));
217#endif
220 218
221 __enable_icache_nomsr(); 219 __enable_icache_nomsr();
222 local_irq_restore(flags); 220 local_irq_restore(flags);
@@ -225,25 +223,41 @@ static void __flush_icache_range_nomsr_irq(unsigned long start,
225static void __flush_icache_range_noirq(unsigned long start, 223static void __flush_icache_range_noirq(unsigned long start,
226 unsigned long end) 224 unsigned long end)
227{ 225{
226#ifndef ASM_LOOP
227 int i;
228#endif
228 pr_debug("%s: start 0x%x, end 0x%x\n", __func__, 229 pr_debug("%s: start 0x%x, end 0x%x\n", __func__,
229 (unsigned int)start, (unsigned int) end); 230 (unsigned int)start, (unsigned int) end);
230 231
231 CACHE_LOOP_LIMITS(start, end, 232 CACHE_LOOP_LIMITS(start, end,
232 cpuinfo.icache_line_length, cpuinfo.icache_size); 233 cpuinfo.icache_line_length, cpuinfo.icache_size);
234#ifdef ASM_LOOP
233 CACHE_RANGE_LOOP_1(start, end, cpuinfo.icache_line_length, wic); 235 CACHE_RANGE_LOOP_1(start, end, cpuinfo.icache_line_length, wic);
236#else
237 for (i = start; i < end; i += cpuinfo.icache_line_length)
238 __asm__ __volatile__ ("wic %0, r0;" \
239 : : "r" (i));
240#endif
234} 241}
235 242
236static void __flush_icache_all_msr_irq(void) 243static void __flush_icache_all_msr_irq(void)
237{ 244{
238 unsigned long flags; 245 unsigned long flags;
239 246#ifndef ASM_LOOP
247 int i;
248#endif
240 pr_debug("%s\n", __func__); 249 pr_debug("%s\n", __func__);
241 250
242 local_irq_save(flags); 251 local_irq_save(flags);
243 __disable_icache_msr(); 252 __disable_icache_msr();
244 253#ifdef ASM_LOOP
245 CACHE_ALL_LOOP(cpuinfo.icache_size, cpuinfo.icache_line_length, wic); 254 CACHE_ALL_LOOP(cpuinfo.icache_size, cpuinfo.icache_line_length, wic);
246 255#else
256 for (i = 0; i < cpuinfo.icache_size;
257 i += cpuinfo.icache_line_length)
258 __asm__ __volatile__ ("wic %0, r0;" \
259 : : "r" (i));
260#endif
247 __enable_icache_msr(); 261 __enable_icache_msr();
248 local_irq_restore(flags); 262 local_irq_restore(flags);
249} 263}
@@ -251,35 +265,59 @@ static void __flush_icache_all_msr_irq(void)
251static void __flush_icache_all_nomsr_irq(void) 265static void __flush_icache_all_nomsr_irq(void)
252{ 266{
253 unsigned long flags; 267 unsigned long flags;
254 268#ifndef ASM_LOOP
269 int i;
270#endif
255 pr_debug("%s\n", __func__); 271 pr_debug("%s\n", __func__);
256 272
257 local_irq_save(flags); 273 local_irq_save(flags);
258 __disable_icache_nomsr(); 274 __disable_icache_nomsr();
259 275#ifdef ASM_LOOP
260 CACHE_ALL_LOOP(cpuinfo.icache_size, cpuinfo.icache_line_length, wic); 276 CACHE_ALL_LOOP(cpuinfo.icache_size, cpuinfo.icache_line_length, wic);
261 277#else
278 for (i = 0; i < cpuinfo.icache_size;
279 i += cpuinfo.icache_line_length)
280 __asm__ __volatile__ ("wic %0, r0;" \
281 : : "r" (i));
282#endif
262 __enable_icache_nomsr(); 283 __enable_icache_nomsr();
263 local_irq_restore(flags); 284 local_irq_restore(flags);
264} 285}
265 286
266static void __flush_icache_all_noirq(void) 287static void __flush_icache_all_noirq(void)
267{ 288{
289#ifndef ASM_LOOP
290 int i;
291#endif
268 pr_debug("%s\n", __func__); 292 pr_debug("%s\n", __func__);
293#ifdef ASM_LOOP
269 CACHE_ALL_LOOP(cpuinfo.icache_size, cpuinfo.icache_line_length, wic); 294 CACHE_ALL_LOOP(cpuinfo.icache_size, cpuinfo.icache_line_length, wic);
295#else
296 for (i = 0; i < cpuinfo.icache_size;
297 i += cpuinfo.icache_line_length)
298 __asm__ __volatile__ ("wic %0, r0;" \
299 : : "r" (i));
300#endif
270} 301}
271 302
272static void __invalidate_dcache_all_msr_irq(void) 303static void __invalidate_dcache_all_msr_irq(void)
273{ 304{
274 unsigned long flags; 305 unsigned long flags;
275 306#ifndef ASM_LOOP
307 int i;
308#endif
276 pr_debug("%s\n", __func__); 309 pr_debug("%s\n", __func__);
277 310
278 local_irq_save(flags); 311 local_irq_save(flags);
279 __disable_dcache_msr(); 312 __disable_dcache_msr();
280 313#ifdef ASM_LOOP
281 CACHE_ALL_LOOP(cpuinfo.dcache_size, cpuinfo.dcache_line_length, wdc); 314 CACHE_ALL_LOOP(cpuinfo.dcache_size, cpuinfo.dcache_line_length, wdc);
282 315#else
316 for (i = 0; i < cpuinfo.dcache_size;
317 i += cpuinfo.dcache_line_length)
318 __asm__ __volatile__ ("wdc %0, r0;" \
319 : : "r" (i));
320#endif
283 __enable_dcache_msr(); 321 __enable_dcache_msr();
284 local_irq_restore(flags); 322 local_irq_restore(flags);
285} 323}
@@ -287,60 +325,107 @@ static void __invalidate_dcache_all_msr_irq(void)
287static void __invalidate_dcache_all_nomsr_irq(void) 325static void __invalidate_dcache_all_nomsr_irq(void)
288{ 326{
289 unsigned long flags; 327 unsigned long flags;
290 328#ifndef ASM_LOOP
329 int i;
330#endif
291 pr_debug("%s\n", __func__); 331 pr_debug("%s\n", __func__);
292 332
293 local_irq_save(flags); 333 local_irq_save(flags);
294 __disable_dcache_nomsr(); 334 __disable_dcache_nomsr();
295 335#ifdef ASM_LOOP
296 CACHE_ALL_LOOP(cpuinfo.dcache_size, cpuinfo.dcache_line_length, wdc); 336 CACHE_ALL_LOOP(cpuinfo.dcache_size, cpuinfo.dcache_line_length, wdc);
297 337#else
338 for (i = 0; i < cpuinfo.dcache_size;
339 i += cpuinfo.dcache_line_length)
340 __asm__ __volatile__ ("wdc %0, r0;" \
341 : : "r" (i));
342#endif
298 __enable_dcache_nomsr(); 343 __enable_dcache_nomsr();
299 local_irq_restore(flags); 344 local_irq_restore(flags);
300} 345}
301 346
302static void __invalidate_dcache_all_noirq_wt(void) 347static void __invalidate_dcache_all_noirq_wt(void)
303{ 348{
349#ifndef ASM_LOOP
350 int i;
351#endif
304 pr_debug("%s\n", __func__); 352 pr_debug("%s\n", __func__);
353#ifdef ASM_LOOP
305 CACHE_ALL_LOOP(cpuinfo.dcache_size, cpuinfo.dcache_line_length, wdc) 354 CACHE_ALL_LOOP(cpuinfo.dcache_size, cpuinfo.dcache_line_length, wdc)
355#else
356 for (i = 0; i < cpuinfo.dcache_size;
357 i += cpuinfo.dcache_line_length)
358 __asm__ __volatile__ ("wdc %0, r0;" \
359 : : "r" (i));
360#endif
306} 361}
307 362
308/* FIXME this is weird - should be only wdc but not work 363/* FIXME this is weird - should be only wdc but not work
309 * MS: I am getting bus errors and other weird things */ 364 * MS: I am getting bus errors and other weird things */
310static void __invalidate_dcache_all_wb(void) 365static void __invalidate_dcache_all_wb(void)
311{ 366{
367#ifndef ASM_LOOP
368 int i;
369#endif
312 pr_debug("%s\n", __func__); 370 pr_debug("%s\n", __func__);
371#ifdef ASM_LOOP
313 CACHE_ALL_LOOP2(cpuinfo.dcache_size, cpuinfo.dcache_line_length, 372 CACHE_ALL_LOOP2(cpuinfo.dcache_size, cpuinfo.dcache_line_length,
314 wdc.clear) 373 wdc.clear)
374#else
375 for (i = 0; i < cpuinfo.dcache_size;
376 i += cpuinfo.dcache_line_length)
377 __asm__ __volatile__ ("wdc.clear %0, r0;" \
378 : : "r" (i));
379#endif
315} 380}
316 381
317static void __invalidate_dcache_range_wb(unsigned long start, 382static void __invalidate_dcache_range_wb(unsigned long start,
318 unsigned long end) 383 unsigned long end)
319{ 384{
385#ifndef ASM_LOOP
386 int i;
387#endif
320 pr_debug("%s: start 0x%x, end 0x%x\n", __func__, 388 pr_debug("%s: start 0x%x, end 0x%x\n", __func__,
321 (unsigned int)start, (unsigned int) end); 389 (unsigned int)start, (unsigned int) end);
322 390
323 CACHE_LOOP_LIMITS(start, end, 391 CACHE_LOOP_LIMITS(start, end,
324 cpuinfo.dcache_line_length, cpuinfo.dcache_size); 392 cpuinfo.dcache_line_length, cpuinfo.dcache_size);
393#ifdef ASM_LOOP
325 CACHE_RANGE_LOOP_2(start, end, cpuinfo.dcache_line_length, wdc.clear); 394 CACHE_RANGE_LOOP_2(start, end, cpuinfo.dcache_line_length, wdc.clear);
395#else
396 for (i = start; i < end; i += cpuinfo.icache_line_length)
397 __asm__ __volatile__ ("wdc.clear %0, r0;" \
398 : : "r" (i));
399#endif
326} 400}
327 401
328static void __invalidate_dcache_range_nomsr_wt(unsigned long start, 402static void __invalidate_dcache_range_nomsr_wt(unsigned long start,
329 unsigned long end) 403 unsigned long end)
330{ 404{
405#ifndef ASM_LOOP
406 int i;
407#endif
331 pr_debug("%s: start 0x%x, end 0x%x\n", __func__, 408 pr_debug("%s: start 0x%x, end 0x%x\n", __func__,
332 (unsigned int)start, (unsigned int) end); 409 (unsigned int)start, (unsigned int) end);
333 CACHE_LOOP_LIMITS(start, end, 410 CACHE_LOOP_LIMITS(start, end,
334 cpuinfo.dcache_line_length, cpuinfo.dcache_size); 411 cpuinfo.dcache_line_length, cpuinfo.dcache_size);
335 412
413#ifdef ASM_LOOP
336 CACHE_RANGE_LOOP_1(start, end, cpuinfo.dcache_line_length, wdc); 414 CACHE_RANGE_LOOP_1(start, end, cpuinfo.dcache_line_length, wdc);
415#else
416 for (i = start; i < end; i += cpuinfo.icache_line_length)
417 __asm__ __volatile__ ("wdc %0, r0;" \
418 : : "r" (i));
419#endif
337} 420}
338 421
339static void __invalidate_dcache_range_msr_irq_wt(unsigned long start, 422static void __invalidate_dcache_range_msr_irq_wt(unsigned long start,
340 unsigned long end) 423 unsigned long end)
341{ 424{
342 unsigned long flags; 425 unsigned long flags;
343 426#ifndef ASM_LOOP
427 int i;
428#endif
344 pr_debug("%s: start 0x%x, end 0x%x\n", __func__, 429 pr_debug("%s: start 0x%x, end 0x%x\n", __func__,
345 (unsigned int)start, (unsigned int) end); 430 (unsigned int)start, (unsigned int) end);
346 CACHE_LOOP_LIMITS(start, end, 431 CACHE_LOOP_LIMITS(start, end,
@@ -349,7 +434,13 @@ static void __invalidate_dcache_range_msr_irq_wt(unsigned long start,
349 local_irq_save(flags); 434 local_irq_save(flags);
350 __disable_dcache_msr(); 435 __disable_dcache_msr();
351 436
437#ifdef ASM_LOOP
352 CACHE_RANGE_LOOP_1(start, end, cpuinfo.dcache_line_length, wdc); 438 CACHE_RANGE_LOOP_1(start, end, cpuinfo.dcache_line_length, wdc);
439#else
440 for (i = start; i < end; i += cpuinfo.icache_line_length)
441 __asm__ __volatile__ ("wdc %0, r0;" \
442 : : "r" (i));
443#endif
353 444
354 __enable_dcache_msr(); 445 __enable_dcache_msr();
355 local_irq_restore(flags); 446 local_irq_restore(flags);
@@ -359,7 +450,9 @@ static void __invalidate_dcache_range_nomsr_irq(unsigned long start,
359 unsigned long end) 450 unsigned long end)
360{ 451{
361 unsigned long flags; 452 unsigned long flags;
362 453#ifndef ASM_LOOP
454 int i;
455#endif
363 pr_debug("%s: start 0x%x, end 0x%x\n", __func__, 456 pr_debug("%s: start 0x%x, end 0x%x\n", __func__,
364 (unsigned int)start, (unsigned int) end); 457 (unsigned int)start, (unsigned int) end);
365 458
@@ -369,7 +462,13 @@ static void __invalidate_dcache_range_nomsr_irq(unsigned long start,
369 local_irq_save(flags); 462 local_irq_save(flags);
370 __disable_dcache_nomsr(); 463 __disable_dcache_nomsr();
371 464
465#ifdef ASM_LOOP
372 CACHE_RANGE_LOOP_1(start, end, cpuinfo.dcache_line_length, wdc); 466 CACHE_RANGE_LOOP_1(start, end, cpuinfo.dcache_line_length, wdc);
467#else
468 for (i = start; i < end; i += cpuinfo.icache_line_length)
469 __asm__ __volatile__ ("wdc %0, r0;" \
470 : : "r" (i));
471#endif
373 472
374 __enable_dcache_nomsr(); 473 __enable_dcache_nomsr();
375 local_irq_restore(flags); 474 local_irq_restore(flags);
@@ -377,19 +476,38 @@ static void __invalidate_dcache_range_nomsr_irq(unsigned long start,
377 476
378static void __flush_dcache_all_wb(void) 477static void __flush_dcache_all_wb(void)
379{ 478{
479#ifndef ASM_LOOP
480 int i;
481#endif
380 pr_debug("%s\n", __func__); 482 pr_debug("%s\n", __func__);
483#ifdef ASM_LOOP
381 CACHE_ALL_LOOP(cpuinfo.dcache_size, cpuinfo.dcache_line_length, 484 CACHE_ALL_LOOP(cpuinfo.dcache_size, cpuinfo.dcache_line_length,
382 wdc.flush); 485 wdc.flush);
486#else
487 for (i = 0; i < cpuinfo.dcache_size;
488 i += cpuinfo.dcache_line_length)
489 __asm__ __volatile__ ("wdc.flush %0, r0;" \
490 : : "r" (i));
491#endif
383} 492}
384 493
385static void __flush_dcache_range_wb(unsigned long start, unsigned long end) 494static void __flush_dcache_range_wb(unsigned long start, unsigned long end)
386{ 495{
496#ifndef ASM_LOOP
497 int i;
498#endif
387 pr_debug("%s: start 0x%x, end 0x%x\n", __func__, 499 pr_debug("%s: start 0x%x, end 0x%x\n", __func__,
388 (unsigned int)start, (unsigned int) end); 500 (unsigned int)start, (unsigned int) end);
389 501
390 CACHE_LOOP_LIMITS(start, end, 502 CACHE_LOOP_LIMITS(start, end,
391 cpuinfo.dcache_line_length, cpuinfo.dcache_size); 503 cpuinfo.dcache_line_length, cpuinfo.dcache_size);
504#ifdef ASM_LOOP
392 CACHE_RANGE_LOOP_2(start, end, cpuinfo.dcache_line_length, wdc.flush); 505 CACHE_RANGE_LOOP_2(start, end, cpuinfo.dcache_line_length, wdc.flush);
506#else
507 for (i = start; i < end; i += cpuinfo.icache_line_length)
508 __asm__ __volatile__ ("wdc.flush %0, r0;" \
509 : : "r" (i));
510#endif
393} 511}
394 512
395/* struct for wb caches and for wt caches */ 513/* struct for wb caches and for wt caches */
@@ -493,7 +611,7 @@ const struct scache wt_nomsr_noirq = {
493#define CPUVER_7_20_A 0x0c 611#define CPUVER_7_20_A 0x0c
494#define CPUVER_7_20_D 0x0f 612#define CPUVER_7_20_D 0x0f
495 613
496#define INFO(s) printk(KERN_INFO "cache: " s " \n"); 614#define INFO(s) printk(KERN_INFO "cache: " s "\n");
497 615
498void microblaze_cache_init(void) 616void microblaze_cache_init(void)
499{ 617{
@@ -532,4 +650,9 @@ void microblaze_cache_init(void)
532 } 650 }
533 } 651 }
534 } 652 }
653 invalidate_dcache();
654 enable_dcache();
655
656 invalidate_icache();
657 enable_icache();
535} 658}
diff --git a/arch/microblaze/kernel/dma.c b/arch/microblaze/kernel/dma.c
new file mode 100644
index 000000000000..b1084974fccd
--- /dev/null
+++ b/arch/microblaze/kernel/dma.c
@@ -0,0 +1,156 @@
1/*
2 * Copyright (C) 2009-2010 PetaLogix
3 * Copyright (C) 2006 Benjamin Herrenschmidt, IBM Corporation
4 *
5 * Provide default implementations of the DMA mapping callbacks for
6 * directly mapped busses.
7 */
8
9#include <linux/device.h>
10#include <linux/dma-mapping.h>
11#include <linux/dma-debug.h>
12#include <asm/bug.h>
13#include <asm/cacheflush.h>
14
15/*
16 * Generic direct DMA implementation
17 *
18 * This implementation supports a per-device offset that can be applied if
19 * the address at which memory is visible to devices is not 0. Platform code
20 * can set archdata.dma_data to an unsigned long holding the offset. By
21 * default the offset is PCI_DRAM_OFFSET.
22 */
23static inline void __dma_sync_page(unsigned long paddr, unsigned long offset,
24 size_t size, enum dma_data_direction direction)
25{
26 switch (direction) {
27 case DMA_TO_DEVICE:
28 flush_dcache_range(paddr + offset, paddr + offset + size);
29 break;
30 case DMA_FROM_DEVICE:
31 invalidate_dcache_range(paddr + offset, paddr + offset + size);
32 break;
33 default:
34 BUG();
35 }
36}
37
38static unsigned long get_dma_direct_offset(struct device *dev)
39{
40 if (dev)
41 return (unsigned long)dev->archdata.dma_data;
42
43 return PCI_DRAM_OFFSET; /* FIXME Not sure if is correct */
44}
45
46#define NOT_COHERENT_CACHE
47
48static void *dma_direct_alloc_coherent(struct device *dev, size_t size,
49 dma_addr_t *dma_handle, gfp_t flag)
50{
51#ifdef NOT_COHERENT_CACHE
52 return consistent_alloc(flag, size, dma_handle);
53#else
54 void *ret;
55 struct page *page;
56 int node = dev_to_node(dev);
57
58 /* ignore region specifiers */
59 flag &= ~(__GFP_HIGHMEM);
60
61 page = alloc_pages_node(node, flag, get_order(size));
62 if (page == NULL)
63 return NULL;
64 ret = page_address(page);
65 memset(ret, 0, size);
66 *dma_handle = virt_to_phys(ret) + get_dma_direct_offset(dev);
67
68 return ret;
69#endif
70}
71
72static void dma_direct_free_coherent(struct device *dev, size_t size,
73 void *vaddr, dma_addr_t dma_handle)
74{
75#ifdef NOT_COHERENT_CACHE
76 consistent_free(vaddr);
77#else
78 free_pages((unsigned long)vaddr, get_order(size));
79#endif
80}
81
82static int dma_direct_map_sg(struct device *dev, struct scatterlist *sgl,
83 int nents, enum dma_data_direction direction,
84 struct dma_attrs *attrs)
85{
86 struct scatterlist *sg;
87 int i;
88
89 /* FIXME this part of code is untested */
90 for_each_sg(sgl, sg, nents, i) {
91 sg->dma_address = sg_phys(sg) + get_dma_direct_offset(dev);
92 sg->dma_length = sg->length;
93 __dma_sync_page(page_to_phys(sg_page(sg)), sg->offset,
94 sg->length, direction);
95 }
96
97 return nents;
98}
99
100static void dma_direct_unmap_sg(struct device *dev, struct scatterlist *sg,
101 int nents, enum dma_data_direction direction,
102 struct dma_attrs *attrs)
103{
104}
105
106static int dma_direct_dma_supported(struct device *dev, u64 mask)
107{
108 return 1;
109}
110
111static inline dma_addr_t dma_direct_map_page(struct device *dev,
112 struct page *page,
113 unsigned long offset,
114 size_t size,
115 enum dma_data_direction direction,
116 struct dma_attrs *attrs)
117{
118 __dma_sync_page(page_to_phys(page), offset, size, direction);
119 return page_to_phys(page) + offset + get_dma_direct_offset(dev);
120}
121
122static inline void dma_direct_unmap_page(struct device *dev,
123 dma_addr_t dma_address,
124 size_t size,
125 enum dma_data_direction direction,
126 struct dma_attrs *attrs)
127{
128/* There is not necessary to do cache cleanup
129 *
130 * phys_to_virt is here because in __dma_sync_page is __virt_to_phys and
131 * dma_address is physical address
132 */
133 __dma_sync_page(dma_address, 0 , size, direction);
134}
135
136struct dma_map_ops dma_direct_ops = {
137 .alloc_coherent = dma_direct_alloc_coherent,
138 .free_coherent = dma_direct_free_coherent,
139 .map_sg = dma_direct_map_sg,
140 .unmap_sg = dma_direct_unmap_sg,
141 .dma_supported = dma_direct_dma_supported,
142 .map_page = dma_direct_map_page,
143 .unmap_page = dma_direct_unmap_page,
144};
145EXPORT_SYMBOL(dma_direct_ops);
146
147/* Number of entries preallocated for DMA-API debugging */
148#define PREALLOC_DMA_DEBUG_ENTRIES (1 << 16)
149
150static int __init dma_init(void)
151{
152 dma_debug_init(PREALLOC_DMA_DEBUG_ENTRIES);
153
154 return 0;
155}
156fs_initcall(dma_init);
diff --git a/arch/microblaze/kernel/entry.S b/arch/microblaze/kernel/entry.S
index 3bad4ff49471..c0ede25c5b99 100644
--- a/arch/microblaze/kernel/entry.S
+++ b/arch/microblaze/kernel/entry.S
@@ -305,7 +305,7 @@ C_ENTRY(_user_exception):
305 swi r11, r1, PTO+PT_R1; /* Store user SP. */ 305 swi r11, r1, PTO+PT_R1; /* Store user SP. */
306 addi r11, r0, 1; 306 addi r11, r0, 1;
307 swi r11, r0, TOPHYS(PER_CPU(KM)); /* Now we're in kernel-mode. */ 307 swi r11, r0, TOPHYS(PER_CPU(KM)); /* Now we're in kernel-mode. */
3082: lwi r31, r0, TOPHYS(PER_CPU(CURRENT_SAVE)); /* get saved current */ 3082: lwi CURRENT_TASK, r0, TOPHYS(PER_CPU(CURRENT_SAVE));
309 /* Save away the syscall number. */ 309 /* Save away the syscall number. */
310 swi r12, r1, PTO+PT_R0; 310 swi r12, r1, PTO+PT_R0;
311 tovirt(r1,r1) 311 tovirt(r1,r1)
@@ -322,8 +322,7 @@ C_ENTRY(_user_exception):
322 rtid r11, 0 322 rtid r11, 0
323 nop 323 nop
3243: 3243:
325 add r11, r0, CURRENT_TASK /* Get current task ptr into r11 */ 325 lwi r11, CURRENT_TASK, TS_THREAD_INFO /* get thread info */
326 lwi r11, r11, TS_THREAD_INFO /* get thread info */
327 lwi r11, r11, TI_FLAGS /* get flags in thread info */ 326 lwi r11, r11, TI_FLAGS /* get flags in thread info */
328 andi r11, r11, _TIF_WORK_SYSCALL_MASK 327 andi r11, r11, _TIF_WORK_SYSCALL_MASK
329 beqi r11, 4f 328 beqi r11, 4f
@@ -382,60 +381,50 @@ C_ENTRY(ret_from_trap):
382/* See if returning to kernel mode, if so, skip resched &c. */ 381/* See if returning to kernel mode, if so, skip resched &c. */
383 bnei r11, 2f; 382 bnei r11, 2f;
384 383
384 swi r3, r1, PTO + PT_R3
385 swi r4, r1, PTO + PT_R4
386
385 /* We're returning to user mode, so check for various conditions that 387 /* We're returning to user mode, so check for various conditions that
386 * trigger rescheduling. */ 388 * trigger rescheduling. */
387 # FIXME: Restructure all these flag checks. 389 /* FIXME: Restructure all these flag checks. */
388 add r11, r0, CURRENT_TASK; /* Get current task ptr into r11 */ 390 lwi r11, CURRENT_TASK, TS_THREAD_INFO; /* get thread info */
389 lwi r11, r11, TS_THREAD_INFO; /* get thread info */
390 lwi r11, r11, TI_FLAGS; /* get flags in thread info */ 391 lwi r11, r11, TI_FLAGS; /* get flags in thread info */
391 andi r11, r11, _TIF_WORK_SYSCALL_MASK 392 andi r11, r11, _TIF_WORK_SYSCALL_MASK
392 beqi r11, 1f 393 beqi r11, 1f
393 394
394 swi r3, r1, PTO + PT_R3
395 swi r4, r1, PTO + PT_R4
396 brlid r15, do_syscall_trace_leave 395 brlid r15, do_syscall_trace_leave
397 addik r5, r1, PTO + PT_R0 396 addik r5, r1, PTO + PT_R0
398 lwi r3, r1, PTO + PT_R3
399 lwi r4, r1, PTO + PT_R4
4001: 3971:
401
402 /* We're returning to user mode, so check for various conditions that 398 /* We're returning to user mode, so check for various conditions that
403 * trigger rescheduling. */ 399 * trigger rescheduling. */
404 /* Get current task ptr into r11 */ 400 /* get thread info from current task */
405 add r11, r0, CURRENT_TASK; /* Get current task ptr into r11 */ 401 lwi r11, CURRENT_TASK, TS_THREAD_INFO;
406 lwi r11, r11, TS_THREAD_INFO; /* get thread info */
407 lwi r11, r11, TI_FLAGS; /* get flags in thread info */ 402 lwi r11, r11, TI_FLAGS; /* get flags in thread info */
408 andi r11, r11, _TIF_NEED_RESCHED; 403 andi r11, r11, _TIF_NEED_RESCHED;
409 beqi r11, 5f; 404 beqi r11, 5f;
410 405
411 swi r3, r1, PTO + PT_R3; /* store syscall result */
412 swi r4, r1, PTO + PT_R4;
413 bralid r15, schedule; /* Call scheduler */ 406 bralid r15, schedule; /* Call scheduler */
414 nop; /* delay slot */ 407 nop; /* delay slot */
415 lwi r3, r1, PTO + PT_R3; /* restore syscall result */
416 lwi r4, r1, PTO + PT_R4;
417 408
418 /* Maybe handle a signal */ 409 /* Maybe handle a signal */
4195: add r11, r0, CURRENT_TASK; /* Get current task ptr into r11 */ 4105: /* get thread info from current task*/
420 lwi r11, r11, TS_THREAD_INFO; /* get thread info */ 411 lwi r11, CURRENT_TASK, TS_THREAD_INFO;
421 lwi r11, r11, TI_FLAGS; /* get flags in thread info */ 412 lwi r11, r11, TI_FLAGS; /* get flags in thread info */
422 andi r11, r11, _TIF_SIGPENDING; 413 andi r11, r11, _TIF_SIGPENDING;
423 beqi r11, 1f; /* Signals to handle, handle them */ 414 beqi r11, 1f; /* Signals to handle, handle them */
424 415
425 swi r3, r1, PTO + PT_R3; /* store syscall result */
426 swi r4, r1, PTO + PT_R4;
427 la r5, r1, PTO; /* Arg 1: struct pt_regs *regs */ 416 la r5, r1, PTO; /* Arg 1: struct pt_regs *regs */
428 add r6, r0, r0; /* Arg 2: sigset_t *oldset */
429 addi r7, r0, 1; /* Arg 3: int in_syscall */ 417 addi r7, r0, 1; /* Arg 3: int in_syscall */
430 bralid r15, do_signal; /* Handle any signals */ 418 bralid r15, do_signal; /* Handle any signals */
431 nop; 419 add r6, r0, r0; /* Arg 2: sigset_t *oldset */
420
421/* Finally, return to user state. */
4221:
432 lwi r3, r1, PTO + PT_R3; /* restore syscall result */ 423 lwi r3, r1, PTO + PT_R3; /* restore syscall result */
433 lwi r4, r1, PTO + PT_R4; 424 lwi r4, r1, PTO + PT_R4;
434 425
435/* Finally, return to user state. */ 426 swi r0, r0, PER_CPU(KM); /* Now officially in user state. */
4361: swi r0, r0, PER_CPU(KM); /* Now officially in user state. */ 427 swi CURRENT_TASK, r0, PER_CPU(CURRENT_SAVE); /* save current */
437 add r11, r0, CURRENT_TASK; /* Get current task ptr into r11 */
438 swi r11, r0, PER_CPU(CURRENT_SAVE); /* save current */
439 VM_OFF; 428 VM_OFF;
440 tophys(r1,r1); 429 tophys(r1,r1);
441 RESTORE_REGS; 430 RESTORE_REGS;
@@ -565,7 +554,7 @@ C_ENTRY(sys_rt_sigreturn_wrapper):
565 swi r11, r1, PTO+PT_R1; /* Store user SP. */ \ 554 swi r11, r1, PTO+PT_R1; /* Store user SP. */ \
566 addi r11, r0, 1; \ 555 addi r11, r0, 1; \
567 swi r11, r0, TOPHYS(PER_CPU(KM)); /* Now we're in kernel-mode.*/\ 556 swi r11, r0, TOPHYS(PER_CPU(KM)); /* Now we're in kernel-mode.*/\
5682: lwi r31, r0, TOPHYS(PER_CPU(CURRENT_SAVE)); /* get saved current */\ 5572: lwi CURRENT_TASK, r0, TOPHYS(PER_CPU(CURRENT_SAVE)); \
569 /* Save away the syscall number. */ \ 558 /* Save away the syscall number. */ \
570 swi r0, r1, PTO+PT_R0; \ 559 swi r0, r1, PTO+PT_R0; \
571 tovirt(r1,r1) 560 tovirt(r1,r1)
@@ -673,9 +662,7 @@ C_ENTRY(ret_from_exc):
673 662
674 /* We're returning to user mode, so check for various conditions that 663 /* We're returning to user mode, so check for various conditions that
675 trigger rescheduling. */ 664 trigger rescheduling. */
676 /* Get current task ptr into r11 */ 665 lwi r11, CURRENT_TASK, TS_THREAD_INFO; /* get thread info */
677 add r11, r0, CURRENT_TASK; /* Get current task ptr into r11 */
678 lwi r11, r11, TS_THREAD_INFO; /* get thread info */
679 lwi r11, r11, TI_FLAGS; /* get flags in thread info */ 666 lwi r11, r11, TI_FLAGS; /* get flags in thread info */
680 andi r11, r11, _TIF_NEED_RESCHED; 667 andi r11, r11, _TIF_NEED_RESCHED;
681 beqi r11, 5f; 668 beqi r11, 5f;
@@ -685,8 +672,7 @@ C_ENTRY(ret_from_exc):
685 nop; /* delay slot */ 672 nop; /* delay slot */
686 673
687 /* Maybe handle a signal */ 674 /* Maybe handle a signal */
6885: add r11, r0, CURRENT_TASK; /* Get current task ptr into r11 */ 6755: lwi r11, CURRENT_TASK, TS_THREAD_INFO; /* get thread info */
689 lwi r11, r11, TS_THREAD_INFO; /* get thread info */
690 lwi r11, r11, TI_FLAGS; /* get flags in thread info */ 676 lwi r11, r11, TI_FLAGS; /* get flags in thread info */
691 andi r11, r11, _TIF_SIGPENDING; 677 andi r11, r11, _TIF_SIGPENDING;
692 beqi r11, 1f; /* Signals to handle, handle them */ 678 beqi r11, 1f; /* Signals to handle, handle them */
@@ -705,15 +691,13 @@ C_ENTRY(ret_from_exc):
705 * store return registers separately because this macros is use 691 * store return registers separately because this macros is use
706 * for others exceptions */ 692 * for others exceptions */
707 la r5, r1, PTO; /* Arg 1: struct pt_regs *regs */ 693 la r5, r1, PTO; /* Arg 1: struct pt_regs *regs */
708 add r6, r0, r0; /* Arg 2: sigset_t *oldset */
709 addi r7, r0, 0; /* Arg 3: int in_syscall */ 694 addi r7, r0, 0; /* Arg 3: int in_syscall */
710 bralid r15, do_signal; /* Handle any signals */ 695 bralid r15, do_signal; /* Handle any signals */
711 nop; 696 add r6, r0, r0; /* Arg 2: sigset_t *oldset */
712 697
713/* Finally, return to user state. */ 698/* Finally, return to user state. */
7141: swi r0, r0, PER_CPU(KM); /* Now officially in user state. */ 6991: swi r0, r0, PER_CPU(KM); /* Now officially in user state. */
715 add r11, r0, CURRENT_TASK; /* Get current task ptr into r11 */ 700 swi CURRENT_TASK, r0, PER_CPU(CURRENT_SAVE); /* save current */
716 swi r11, r0, PER_CPU(CURRENT_SAVE); /* save current */
717 VM_OFF; 701 VM_OFF;
718 tophys(r1,r1); 702 tophys(r1,r1);
719 703
@@ -802,7 +786,7 @@ C_ENTRY(_interrupt):
802 swi r11, r0, TOPHYS(PER_CPU(KM)); 786 swi r11, r0, TOPHYS(PER_CPU(KM));
803 787
8042: 7882:
805 lwi r31, r0, TOPHYS(PER_CPU(CURRENT_SAVE)); 789 lwi CURRENT_TASK, r0, TOPHYS(PER_CPU(CURRENT_SAVE));
806 swi r0, r1, PTO + PT_R0; 790 swi r0, r1, PTO + PT_R0;
807 tovirt(r1,r1) 791 tovirt(r1,r1)
808 la r5, r1, PTO; 792 la r5, r1, PTO;
@@ -817,8 +801,7 @@ ret_from_irq:
817 lwi r11, r1, PTO + PT_MODE; 801 lwi r11, r1, PTO + PT_MODE;
818 bnei r11, 2f; 802 bnei r11, 2f;
819 803
820 add r11, r0, CURRENT_TASK; 804 lwi r11, CURRENT_TASK, TS_THREAD_INFO;
821 lwi r11, r11, TS_THREAD_INFO;
822 lwi r11, r11, TI_FLAGS; /* MS: get flags from thread info */ 805 lwi r11, r11, TI_FLAGS; /* MS: get flags from thread info */
823 andi r11, r11, _TIF_NEED_RESCHED; 806 andi r11, r11, _TIF_NEED_RESCHED;
824 beqi r11, 5f 807 beqi r11, 5f
@@ -826,8 +809,7 @@ ret_from_irq:
826 nop; /* delay slot */ 809 nop; /* delay slot */
827 810
828 /* Maybe handle a signal */ 811 /* Maybe handle a signal */
8295: add r11, r0, CURRENT_TASK; 8125: lwi r11, CURRENT_TASK, TS_THREAD_INFO; /* MS: get thread info */
830 lwi r11, r11, TS_THREAD_INFO; /* MS: get thread info */
831 lwi r11, r11, TI_FLAGS; /* get flags in thread info */ 813 lwi r11, r11, TI_FLAGS; /* get flags in thread info */
832 andi r11, r11, _TIF_SIGPENDING; 814 andi r11, r11, _TIF_SIGPENDING;
833 beqid r11, no_intr_resched 815 beqid r11, no_intr_resched
@@ -842,8 +824,7 @@ no_intr_resched:
842 /* Disable interrupts, we are now committed to the state restore */ 824 /* Disable interrupts, we are now committed to the state restore */
843 disable_irq 825 disable_irq
844 swi r0, r0, PER_CPU(KM); /* MS: Now officially in user state. */ 826 swi r0, r0, PER_CPU(KM); /* MS: Now officially in user state. */
845 add r11, r0, CURRENT_TASK; 827 swi CURRENT_TASK, r0, PER_CPU(CURRENT_SAVE);
846 swi r11, r0, PER_CPU(CURRENT_SAVE);
847 VM_OFF; 828 VM_OFF;
848 tophys(r1,r1); 829 tophys(r1,r1);
849 lwi r3, r1, PTO + PT_R3; /* MS: restore saved r3, r4 registers */ 830 lwi r3, r1, PTO + PT_R3; /* MS: restore saved r3, r4 registers */
@@ -853,7 +834,28 @@ no_intr_resched:
853 lwi r1, r1, PT_R1 - PT_SIZE; 834 lwi r1, r1, PT_R1 - PT_SIZE;
854 bri 6f; 835 bri 6f;
855/* MS: Return to kernel state. */ 836/* MS: Return to kernel state. */
8562: VM_OFF /* MS: turn off MMU */ 8372:
838#ifdef CONFIG_PREEMPT
839 lwi r11, CURRENT_TASK, TS_THREAD_INFO;
840 /* MS: get preempt_count from thread info */
841 lwi r5, r11, TI_PREEMPT_COUNT;
842 bgti r5, restore;
843
844 lwi r5, r11, TI_FLAGS; /* get flags in thread info */
845 andi r5, r5, _TIF_NEED_RESCHED;
846 beqi r5, restore /* if zero jump over */
847
848preempt:
849 /* interrupts are off that's why I am calling preempt_chedule_irq */
850 bralid r15, preempt_schedule_irq
851 nop
852 lwi r11, CURRENT_TASK, TS_THREAD_INFO; /* get thread info */
853 lwi r5, r11, TI_FLAGS; /* get flags in thread info */
854 andi r5, r5, _TIF_NEED_RESCHED;
855 bnei r5, preempt /* if non zero jump to resched */
856restore:
857#endif
858 VM_OFF /* MS: turn off MMU */
857 tophys(r1,r1) 859 tophys(r1,r1)
858 lwi r3, r1, PTO + PT_R3; /* MS: restore saved r3, r4 registers */ 860 lwi r3, r1, PTO + PT_R3; /* MS: restore saved r3, r4 registers */
859 lwi r4, r1, PTO + PT_R4; 861 lwi r4, r1, PTO + PT_R4;
@@ -915,7 +917,7 @@ C_ENTRY(_debug_exception):
915 swi r11, r1, PTO+PT_R1; /* Store user SP. */ 917 swi r11, r1, PTO+PT_R1; /* Store user SP. */
916 addi r11, r0, 1; 918 addi r11, r0, 1;
917 swi r11, r0, TOPHYS(PER_CPU(KM)); /* Now we're in kernel-mode. */ 919 swi r11, r0, TOPHYS(PER_CPU(KM)); /* Now we're in kernel-mode. */
9182: lwi r31, r0, TOPHYS(PER_CPU(CURRENT_SAVE)); /* get saved current */ 9202: lwi CURRENT_TASK, r0, TOPHYS(PER_CPU(CURRENT_SAVE));
919 /* Save away the syscall number. */ 921 /* Save away the syscall number. */
920 swi r0, r1, PTO+PT_R0; 922 swi r0, r1, PTO+PT_R0;
921 tovirt(r1,r1) 923 tovirt(r1,r1)
@@ -935,8 +937,7 @@ dbtrap_call: rtbd r11, 0;
935 bnei r11, 2f; 937 bnei r11, 2f;
936 938
937 /* Get current task ptr into r11 */ 939 /* Get current task ptr into r11 */
938 add r11, r0, CURRENT_TASK; /* Get current task ptr into r11 */ 940 lwi r11, CURRENT_TASK, TS_THREAD_INFO; /* get thread info */
939 lwi r11, r11, TS_THREAD_INFO; /* get thread info */
940 lwi r11, r11, TI_FLAGS; /* get flags in thread info */ 941 lwi r11, r11, TI_FLAGS; /* get flags in thread info */
941 andi r11, r11, _TIF_NEED_RESCHED; 942 andi r11, r11, _TIF_NEED_RESCHED;
942 beqi r11, 5f; 943 beqi r11, 5f;
@@ -949,8 +950,7 @@ dbtrap_call: rtbd r11, 0;
949 /* XXX m68knommu also checks TASK_STATE & TASK_COUNTER here. */ 950 /* XXX m68knommu also checks TASK_STATE & TASK_COUNTER here. */
950 951
951 /* Maybe handle a signal */ 952 /* Maybe handle a signal */
9525: add r11, r0, CURRENT_TASK; /* Get current task ptr into r11 */ 9535: lwi r11, CURRENT_TASK, TS_THREAD_INFO; /* get thread info */
953 lwi r11, r11, TS_THREAD_INFO; /* get thread info */
954 lwi r11, r11, TI_FLAGS; /* get flags in thread info */ 954 lwi r11, r11, TI_FLAGS; /* get flags in thread info */
955 andi r11, r11, _TIF_SIGPENDING; 955 andi r11, r11, _TIF_SIGPENDING;
956 beqi r11, 1f; /* Signals to handle, handle them */ 956 beqi r11, 1f; /* Signals to handle, handle them */
@@ -966,16 +966,14 @@ dbtrap_call: rtbd r11, 0;
966 (in a possibly modified form) after do_signal returns. */ 966 (in a possibly modified form) after do_signal returns. */
967 967
968 la r5, r1, PTO; /* Arg 1: struct pt_regs *regs */ 968 la r5, r1, PTO; /* Arg 1: struct pt_regs *regs */
969 add r6, r0, r0; /* Arg 2: sigset_t *oldset */
970 addi r7, r0, 0; /* Arg 3: int in_syscall */ 969 addi r7, r0, 0; /* Arg 3: int in_syscall */
971 bralid r15, do_signal; /* Handle any signals */ 970 bralid r15, do_signal; /* Handle any signals */
972 nop; 971 add r6, r0, r0; /* Arg 2: sigset_t *oldset */
973 972
974 973
975/* Finally, return to user state. */ 974/* Finally, return to user state. */
9761: swi r0, r0, PER_CPU(KM); /* Now officially in user state. */ 9751: swi r0, r0, PER_CPU(KM); /* Now officially in user state. */
977 add r11, r0, CURRENT_TASK; /* Get current task ptr into r11 */ 976 swi CURRENT_TASK, r0, PER_CPU(CURRENT_SAVE); /* save current */
978 swi r11, r0, PER_CPU(CURRENT_SAVE); /* save current */
979 VM_OFF; 977 VM_OFF;
980 tophys(r1,r1); 978 tophys(r1,r1);
981 979
@@ -1007,7 +1005,7 @@ DBTRAP_return: /* Make global symbol for debugging */
1007 1005
1008ENTRY(_switch_to) 1006ENTRY(_switch_to)
1009 /* prepare return value */ 1007 /* prepare return value */
1010 addk r3, r0, r31 1008 addk r3, r0, CURRENT_TASK
1011 1009
1012 /* save registers in cpu_context */ 1010 /* save registers in cpu_context */
1013 /* use r11 and r12, volatile registers, as temp register */ 1011 /* use r11 and r12, volatile registers, as temp register */
@@ -1051,10 +1049,10 @@ ENTRY(_switch_to)
1051 nop 1049 nop
1052 swi r12, r11, CC_FSR 1050 swi r12, r11, CC_FSR
1053 1051
1054 /* update r31, the current */ 1052 /* update r31, the current-give me pointer to task which will be next */
1055 lwi r31, r6, TI_TASK/* give me pointer to task which will be next */ 1053 lwi CURRENT_TASK, r6, TI_TASK
1056 /* stored it to current_save too */ 1054 /* stored it to current_save too */
1057 swi r31, r0, PER_CPU(CURRENT_SAVE) 1055 swi CURRENT_TASK, r0, PER_CPU(CURRENT_SAVE)
1058 1056
1059 /* get new process' cpu context and restore */ 1057 /* get new process' cpu context and restore */
1060 /* give me start where start context of next task */ 1058 /* give me start where start context of next task */
diff --git a/arch/microblaze/kernel/head.S b/arch/microblaze/kernel/head.S
index 30916193fcc7..cb7815cfe5ab 100644
--- a/arch/microblaze/kernel/head.S
+++ b/arch/microblaze/kernel/head.S
@@ -99,8 +99,8 @@ no_fdt_arg:
99 tophys(r4,r4) /* convert to phys address */ 99 tophys(r4,r4) /* convert to phys address */
100 ori r3, r0, COMMAND_LINE_SIZE - 1 /* number of loops */ 100 ori r3, r0, COMMAND_LINE_SIZE - 1 /* number of loops */
101_copy_command_line: 101_copy_command_line:
102 lbu r7, r5, r6 /* r7=r5+r6 - r5 contain pointer to command line */ 102 lbu r2, r5, r6 /* r7=r5+r6 - r5 contain pointer to command line */
103 sb r7, r4, r6 /* addr[r4+r6]= r7*/ 103 sb r2, r4, r6 /* addr[r4+r6]= r7*/
104 addik r6, r6, 1 /* increment counting */ 104 addik r6, r6, 1 /* increment counting */
105 bgtid r3, _copy_command_line /* loop for all entries */ 105 bgtid r3, _copy_command_line /* loop for all entries */
106 addik r3, r3, -1 /* descrement loop */ 106 addik r3, r3, -1 /* descrement loop */
@@ -136,6 +136,11 @@ _invalidate:
136 addik r3, r3, -1 136 addik r3, r3, -1
137 /* sync */ 137 /* sync */
138 138
139 /* Setup the kernel PID */
140 mts rpid,r0 /* Load the kernel PID */
141 nop
142 bri 4
143
139 /* 144 /*
140 * We should still be executing code at physical address area 145 * We should still be executing code at physical address area
141 * RAM_BASEADDR at this point. However, kernel code is at 146 * RAM_BASEADDR at this point. However, kernel code is at
@@ -146,10 +151,6 @@ _invalidate:
146 addik r3,r0, CONFIG_KERNEL_START /* Load the kernel virtual address */ 151 addik r3,r0, CONFIG_KERNEL_START /* Load the kernel virtual address */
147 tophys(r4,r3) /* Load the kernel physical address */ 152 tophys(r4,r3) /* Load the kernel physical address */
148 153
149 mts rpid,r0 /* Load the kernel PID */
150 nop
151 bri 4
152
153 /* 154 /*
154 * Configure and load two entries into TLB slots 0 and 1. 155 * Configure and load two entries into TLB slots 0 and 1.
155 * In case we are pinning TLBs, these are reserved in by the 156 * In case we are pinning TLBs, these are reserved in by the
diff --git a/arch/microblaze/kernel/irq.c b/arch/microblaze/kernel/irq.c
index 0f06034d1fe0..6f39e2c001f3 100644
--- a/arch/microblaze/kernel/irq.c
+++ b/arch/microblaze/kernel/irq.c
@@ -93,3 +93,18 @@ skip:
93 } 93 }
94 return 0; 94 return 0;
95} 95}
96
97/* MS: There is no any advance mapping mechanism. We are using simple 32bit
98 intc without any cascades or any connection that's why mapping is 1:1 */
99unsigned int irq_create_mapping(struct irq_host *host, irq_hw_number_t hwirq)
100{
101 return hwirq;
102}
103EXPORT_SYMBOL_GPL(irq_create_mapping);
104
105unsigned int irq_create_of_mapping(struct device_node *controller,
106 u32 *intspec, unsigned int intsize)
107{
108 return intspec[0];
109}
110EXPORT_SYMBOL_GPL(irq_create_of_mapping);
diff --git a/arch/microblaze/kernel/setup.c b/arch/microblaze/kernel/setup.c
index bb8c4b9ccb80..f974ec7aa357 100644
--- a/arch/microblaze/kernel/setup.c
+++ b/arch/microblaze/kernel/setup.c
@@ -22,7 +22,10 @@
22#include <linux/io.h> 22#include <linux/io.h>
23#include <linux/bug.h> 23#include <linux/bug.h>
24#include <linux/param.h> 24#include <linux/param.h>
25#include <linux/pci.h>
25#include <linux/cache.h> 26#include <linux/cache.h>
27#include <linux/of_platform.h>
28#include <linux/dma-mapping.h>
26#include <asm/cacheflush.h> 29#include <asm/cacheflush.h>
27#include <asm/entry.h> 30#include <asm/entry.h>
28#include <asm/cpuinfo.h> 31#include <asm/cpuinfo.h>
@@ -54,14 +57,10 @@ void __init setup_arch(char **cmdline_p)
54 57
55 microblaze_cache_init(); 58 microblaze_cache_init();
56 59
57 invalidate_dcache();
58 enable_dcache();
59
60 invalidate_icache();
61 enable_icache();
62
63 setup_memory(); 60 setup_memory();
64 61
62 xilinx_pci_init();
63
65#if defined(CONFIG_SELFMOD_INTC) || defined(CONFIG_SELFMOD_TIMER) 64#if defined(CONFIG_SELFMOD_INTC) || defined(CONFIG_SELFMOD_TIMER)
66 printk(KERN_NOTICE "Self modified code enable\n"); 65 printk(KERN_NOTICE "Self modified code enable\n");
67#endif 66#endif
@@ -188,3 +187,37 @@ static int microblaze_debugfs_init(void)
188} 187}
189arch_initcall(microblaze_debugfs_init); 188arch_initcall(microblaze_debugfs_init);
190#endif 189#endif
190
191static int dflt_bus_notify(struct notifier_block *nb,
192 unsigned long action, void *data)
193{
194 struct device *dev = data;
195
196 /* We are only intereted in device addition */
197 if (action != BUS_NOTIFY_ADD_DEVICE)
198 return 0;
199
200 set_dma_ops(dev, &dma_direct_ops);
201
202 return NOTIFY_DONE;
203}
204
205static struct notifier_block dflt_plat_bus_notifier = {
206 .notifier_call = dflt_bus_notify,
207 .priority = INT_MAX,
208};
209
210static struct notifier_block dflt_of_bus_notifier = {
211 .notifier_call = dflt_bus_notify,
212 .priority = INT_MAX,
213};
214
215static int __init setup_bus_notifier(void)
216{
217 bus_register_notifier(&platform_bus_type, &dflt_plat_bus_notifier);
218 bus_register_notifier(&of_platform_bus_type, &dflt_of_bus_notifier);
219
220 return 0;
221}
222
223arch_initcall(setup_bus_notifier);
diff --git a/arch/microblaze/mm/Makefile b/arch/microblaze/mm/Makefile
index 6c8a924d9e26..09c49ed87235 100644
--- a/arch/microblaze/mm/Makefile
+++ b/arch/microblaze/mm/Makefile
@@ -2,6 +2,6 @@
2# Makefile 2# Makefile
3# 3#
4 4
5obj-y := init.o 5obj-y := consistent.o init.o
6 6
7obj-$(CONFIG_MMU) += pgtable.o mmu_context.o fault.o 7obj-$(CONFIG_MMU) += pgtable.o mmu_context.o fault.o
diff --git a/arch/microblaze/mm/consistent.c b/arch/microblaze/mm/consistent.c
new file mode 100644
index 000000000000..a9b443e3fb98
--- /dev/null
+++ b/arch/microblaze/mm/consistent.c
@@ -0,0 +1,246 @@
1/*
2 * Microblaze support for cache consistent memory.
3 * Copyright (C) 2010 Michal Simek <monstr@monstr.eu>
4 * Copyright (C) 2010 PetaLogix
5 * Copyright (C) 2005 John Williams <jwilliams@itee.uq.edu.au>
6 *
7 * Based on PowerPC version derived from arch/arm/mm/consistent.c
8 * Copyright (C) 2001 Dan Malek (dmalek@jlc.net)
9 * Copyright (C) 2000 Russell King
10 *
11 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of the GNU General Public License version 2 as
13 * published by the Free Software Foundation.
14 */
15
16#include <linux/module.h>
17#include <linux/signal.h>
18#include <linux/sched.h>
19#include <linux/kernel.h>
20#include <linux/errno.h>
21#include <linux/string.h>
22#include <linux/types.h>
23#include <linux/ptrace.h>
24#include <linux/mman.h>
25#include <linux/mm.h>
26#include <linux/swap.h>
27#include <linux/stddef.h>
28#include <linux/vmalloc.h>
29#include <linux/init.h>
30#include <linux/delay.h>
31#include <linux/bootmem.h>
32#include <linux/highmem.h>
33#include <linux/pci.h>
34#include <linux/interrupt.h>
35
36#include <asm/pgalloc.h>
37#include <linux/io.h>
38#include <linux/hardirq.h>
39#include <asm/mmu_context.h>
40#include <asm/mmu.h>
41#include <linux/uaccess.h>
42#include <asm/pgtable.h>
43#include <asm/cpuinfo.h>
44
45#ifndef CONFIG_MMU
46
47/* I have to use dcache values because I can't relate on ram size */
48#define UNCACHED_SHADOW_MASK (cpuinfo.dcache_high - cpuinfo.dcache_base + 1)
49
50/*
51 * Consistent memory allocators. Used for DMA devices that want to
52 * share uncached memory with the processor core.
53 * My crufty no-MMU approach is simple. In the HW platform we can optionally
54 * mirror the DDR up above the processor cacheable region. So, memory accessed
55 * in this mirror region will not be cached. It's alloced from the same
56 * pool as normal memory, but the handle we return is shifted up into the
57 * uncached region. This will no doubt cause big problems if memory allocated
58 * here is not also freed properly. -- JW
59 */
60void *consistent_alloc(int gfp, size_t size, dma_addr_t *dma_handle)
61{
62 struct page *page, *end, *free;
63 unsigned long order;
64 void *ret, *virt;
65
66 if (in_interrupt())
67 BUG();
68
69 size = PAGE_ALIGN(size);
70 order = get_order(size);
71
72 page = alloc_pages(gfp, order);
73 if (!page)
74 goto no_page;
75
76 /* We could do with a page_to_phys and page_to_bus here. */
77 virt = page_address(page);
78 ret = ioremap(virt_to_phys(virt), size);
79 if (!ret)
80 goto no_remap;
81
82 /*
83 * Here's the magic! Note if the uncached shadow is not implemented,
84 * it's up to the calling code to also test that condition and make
85 * other arranegments, such as manually flushing the cache and so on.
86 */
87#ifdef CONFIG_XILINX_UNCACHED_SHADOW
88 ret = (void *)((unsigned) ret | UNCACHED_SHADOW_MASK);
89#endif
90 /* dma_handle is same as physical (shadowed) address */
91 *dma_handle = (dma_addr_t)ret;
92
93 /*
94 * free wasted pages. We skip the first page since we know
95 * that it will have count = 1 and won't require freeing.
96 * We also mark the pages in use as reserved so that
97 * remap_page_range works.
98 */
99 page = virt_to_page(virt);
100 free = page + (size >> PAGE_SHIFT);
101 end = page + (1 << order);
102
103 for (; page < end; page++) {
104 init_page_count(page);
105 if (page >= free)
106 __free_page(page);
107 else
108 SetPageReserved(page);
109 }
110
111 return ret;
112no_remap:
113 __free_pages(page, order);
114no_page:
115 return NULL;
116}
117
118#else
119
120void *consistent_alloc(int gfp, size_t size, dma_addr_t *dma_handle)
121{
122 int order, err, i;
123 unsigned long page, va, flags;
124 phys_addr_t pa;
125 struct vm_struct *area;
126 void *ret;
127
128 if (in_interrupt())
129 BUG();
130
131 /* Only allocate page size areas. */
132 size = PAGE_ALIGN(size);
133 order = get_order(size);
134
135 page = __get_free_pages(gfp, order);
136 if (!page) {
137 BUG();
138 return NULL;
139 }
140
141 /*
142 * we need to ensure that there are no cachelines in use,
143 * or worse dirty in this area.
144 */
145 flush_dcache_range(virt_to_phys(page), virt_to_phys(page) + size);
146
147 /* Allocate some common virtual space to map the new pages. */
148 area = get_vm_area(size, VM_ALLOC);
149 if (area == NULL) {
150 free_pages(page, order);
151 return NULL;
152 }
153 va = (unsigned long) area->addr;
154 ret = (void *)va;
155
156 /* This gives us the real physical address of the first page. */
157 *dma_handle = pa = virt_to_bus((void *)page);
158
159 /* MS: This is the whole magic - use cache inhibit pages */
160 flags = _PAGE_KERNEL | _PAGE_NO_CACHE;
161
162 /*
163 * Set refcount=1 on all pages in an order>0
164 * allocation so that vfree() will actually
165 * free all pages that were allocated.
166 */
167 if (order > 0) {
168 struct page *rpage = virt_to_page(page);
169 for (i = 1; i < (1 << order); i++)
170 init_page_count(rpage+i);
171 }
172
173 err = 0;
174 for (i = 0; i < size && err == 0; i += PAGE_SIZE)
175 err = map_page(va+i, pa+i, flags);
176
177 if (err) {
178 vfree((void *)va);
179 return NULL;
180 }
181
182 return ret;
183}
184#endif /* CONFIG_MMU */
185EXPORT_SYMBOL(consistent_alloc);
186
187/*
188 * free page(s) as defined by the above mapping.
189 */
190void consistent_free(void *vaddr)
191{
192 if (in_interrupt())
193 BUG();
194
195 /* Clear SHADOW_MASK bit in address, and free as per usual */
196#ifdef CONFIG_XILINX_UNCACHED_SHADOW
197 vaddr = (void *)((unsigned)vaddr & ~UNCACHED_SHADOW_MASK);
198#endif
199 vfree(vaddr);
200}
201EXPORT_SYMBOL(consistent_free);
202
203/*
204 * make an area consistent.
205 */
206void consistent_sync(void *vaddr, size_t size, int direction)
207{
208 unsigned long start;
209 unsigned long end;
210
211 start = (unsigned long)vaddr;
212
213 /* Convert start address back down to unshadowed memory region */
214#ifdef CONFIG_XILINX_UNCACHED_SHADOW
215 start &= ~UNCACHED_SHADOW_MASK;
216#endif
217 end = start + size;
218
219 switch (direction) {
220 case PCI_DMA_NONE:
221 BUG();
222 case PCI_DMA_FROMDEVICE: /* invalidate only */
223 flush_dcache_range(start, end);
224 break;
225 case PCI_DMA_TODEVICE: /* writeback only */
226 flush_dcache_range(start, end);
227 break;
228 case PCI_DMA_BIDIRECTIONAL: /* writeback and invalidate */
229 flush_dcache_range(start, end);
230 break;
231 }
232}
233EXPORT_SYMBOL(consistent_sync);
234
235/*
236 * consistent_sync_page makes memory consistent. identical
237 * to consistent_sync, but takes a struct page instead of a
238 * virtual address
239 */
240void consistent_sync_page(struct page *page, unsigned long offset,
241 size_t size, int direction)
242{
243 unsigned long start = (unsigned long)page_address(page) + offset;
244 consistent_sync((void *)start, size, direction);
245}
246EXPORT_SYMBOL(consistent_sync_page);
diff --git a/arch/microblaze/mm/init.c b/arch/microblaze/mm/init.c
index a57cedf36715..1608e2e1a44a 100644
--- a/arch/microblaze/mm/init.c
+++ b/arch/microblaze/mm/init.c
@@ -23,6 +23,9 @@
23#include <asm/sections.h> 23#include <asm/sections.h>
24#include <asm/tlb.h> 24#include <asm/tlb.h>
25 25
26/* Use for MMU and noMMU because of PCI generic code */
27int mem_init_done;
28
26#ifndef CONFIG_MMU 29#ifndef CONFIG_MMU
27unsigned int __page_offset; 30unsigned int __page_offset;
28EXPORT_SYMBOL(__page_offset); 31EXPORT_SYMBOL(__page_offset);
@@ -30,7 +33,6 @@ EXPORT_SYMBOL(__page_offset);
30#else 33#else
31DEFINE_PER_CPU(struct mmu_gather, mmu_gathers); 34DEFINE_PER_CPU(struct mmu_gather, mmu_gathers);
32 35
33int mem_init_done;
34static int init_bootmem_done; 36static int init_bootmem_done;
35#endif /* CONFIG_MMU */ 37#endif /* CONFIG_MMU */
36 38
@@ -193,12 +195,6 @@ void free_initmem(void)
193 (unsigned long)(&__init_end)); 195 (unsigned long)(&__init_end));
194} 196}
195 197
196/* FIXME from arch/powerpc/mm/mem.c*/
197void show_mem(void)
198{
199 printk(KERN_NOTICE "%s\n", __func__);
200}
201
202void __init mem_init(void) 198void __init mem_init(void)
203{ 199{
204 high_memory = (void *)__va(memory_end); 200 high_memory = (void *)__va(memory_end);
@@ -208,9 +204,7 @@ void __init mem_init(void)
208 printk(KERN_INFO "Memory: %luk/%luk available\n", 204 printk(KERN_INFO "Memory: %luk/%luk available\n",
209 nr_free_pages() << (PAGE_SHIFT-10), 205 nr_free_pages() << (PAGE_SHIFT-10),
210 num_physpages << (PAGE_SHIFT-10)); 206 num_physpages << (PAGE_SHIFT-10));
211#ifdef CONFIG_MMU
212 mem_init_done = 1; 207 mem_init_done = 1;
213#endif
214} 208}
215 209
216#ifndef CONFIG_MMU 210#ifndef CONFIG_MMU
@@ -222,6 +216,10 @@ int ___range_ok(unsigned long addr, unsigned long size)
222} 216}
223EXPORT_SYMBOL(___range_ok); 217EXPORT_SYMBOL(___range_ok);
224 218
219int page_is_ram(unsigned long pfn)
220{
221 return __range_ok(pfn, 0);
222}
225#else 223#else
226int page_is_ram(unsigned long pfn) 224int page_is_ram(unsigned long pfn)
227{ 225{
@@ -349,4 +347,27 @@ void __init *early_get_page(void)
349 } 347 }
350 return p; 348 return p;
351} 349}
350
352#endif /* CONFIG_MMU */ 351#endif /* CONFIG_MMU */
352
353void * __init_refok alloc_maybe_bootmem(size_t size, gfp_t mask)
354{
355 if (mem_init_done)
356 return kmalloc(size, mask);
357 else
358 return alloc_bootmem(size);
359}
360
361void * __init_refok zalloc_maybe_bootmem(size_t size, gfp_t mask)
362{
363 void *p;
364
365 if (mem_init_done)
366 p = kzalloc(size, mask);
367 else {
368 p = alloc_bootmem(size);
369 if (p)
370 memset(p, 0, size);
371 }
372 return p;
373}
diff --git a/arch/microblaze/mm/pgtable.c b/arch/microblaze/mm/pgtable.c
index 2820081b21ab..63a6fd07c48f 100644
--- a/arch/microblaze/mm/pgtable.c
+++ b/arch/microblaze/mm/pgtable.c
@@ -103,7 +103,7 @@ static void __iomem *__ioremap(phys_addr_t addr, unsigned long size,
103 area = get_vm_area(size, VM_IOREMAP); 103 area = get_vm_area(size, VM_IOREMAP);
104 if (area == NULL) 104 if (area == NULL)
105 return NULL; 105 return NULL;
106 v = VMALLOC_VMADDR(area->addr); 106 v = (unsigned long) area->addr;
107 } else { 107 } else {
108 v = (ioremap_bot -= size); 108 v = (ioremap_bot -= size);
109 } 109 }
diff --git a/arch/microblaze/pci/Makefile b/arch/microblaze/pci/Makefile
new file mode 100644
index 000000000000..9889cc2e1294
--- /dev/null
+++ b/arch/microblaze/pci/Makefile
@@ -0,0 +1,6 @@
1#
2# Makefile
3#
4
5obj-$(CONFIG_PCI) += pci_32.o pci-common.o indirect_pci.o iomap.o
6obj-$(CONFIG_PCI_XILINX) += xilinx_pci.o
diff --git a/arch/microblaze/pci/indirect_pci.c b/arch/microblaze/pci/indirect_pci.c
new file mode 100644
index 000000000000..25f18f017f21
--- /dev/null
+++ b/arch/microblaze/pci/indirect_pci.c
@@ -0,0 +1,163 @@
1/*
2 * Support for indirect PCI bridges.
3 *
4 * Copyright (C) 1998 Gabriel Paubert.
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
10 */
11
12#include <linux/kernel.h>
13#include <linux/pci.h>
14#include <linux/delay.h>
15#include <linux/string.h>
16#include <linux/init.h>
17
18#include <asm/io.h>
19#include <asm/prom.h>
20#include <asm/pci-bridge.h>
21
22static int
23indirect_read_config(struct pci_bus *bus, unsigned int devfn, int offset,
24 int len, u32 *val)
25{
26 struct pci_controller *hose = pci_bus_to_host(bus);
27 volatile void __iomem *cfg_data;
28 u8 cfg_type = 0;
29 u32 bus_no, reg;
30
31 if (hose->indirect_type & INDIRECT_TYPE_NO_PCIE_LINK) {
32 if (bus->number != hose->first_busno)
33 return PCIBIOS_DEVICE_NOT_FOUND;
34 if (devfn != 0)
35 return PCIBIOS_DEVICE_NOT_FOUND;
36 }
37
38 if (hose->indirect_type & INDIRECT_TYPE_SET_CFG_TYPE)
39 if (bus->number != hose->first_busno)
40 cfg_type = 1;
41
42 bus_no = (bus->number == hose->first_busno) ?
43 hose->self_busno : bus->number;
44
45 if (hose->indirect_type & INDIRECT_TYPE_EXT_REG)
46 reg = ((offset & 0xf00) << 16) | (offset & 0xfc);
47 else
48 reg = offset & 0xfc; /* Only 3 bits for function */
49
50 if (hose->indirect_type & INDIRECT_TYPE_BIG_ENDIAN)
51 out_be32(hose->cfg_addr, (0x80000000 | (bus_no << 16) |
52 (devfn << 8) | reg | cfg_type));
53 else
54 out_le32(hose->cfg_addr, (0x80000000 | (bus_no << 16) |
55 (devfn << 8) | reg | cfg_type));
56
57 /*
58 * Note: the caller has already checked that offset is
59 * suitably aligned and that len is 1, 2 or 4.
60 */
61 cfg_data = hose->cfg_data + (offset & 3); /* Only 3 bits for function */
62 switch (len) {
63 case 1:
64 *val = in_8(cfg_data);
65 break;
66 case 2:
67 *val = in_le16(cfg_data);
68 break;
69 default:
70 *val = in_le32(cfg_data);
71 break;
72 }
73 return PCIBIOS_SUCCESSFUL;
74}
75
76static int
77indirect_write_config(struct pci_bus *bus, unsigned int devfn, int offset,
78 int len, u32 val)
79{
80 struct pci_controller *hose = pci_bus_to_host(bus);
81 volatile void __iomem *cfg_data;
82 u8 cfg_type = 0;
83 u32 bus_no, reg;
84
85 if (hose->indirect_type & INDIRECT_TYPE_NO_PCIE_LINK) {
86 if (bus->number != hose->first_busno)
87 return PCIBIOS_DEVICE_NOT_FOUND;
88 if (devfn != 0)
89 return PCIBIOS_DEVICE_NOT_FOUND;
90 }
91
92 if (hose->indirect_type & INDIRECT_TYPE_SET_CFG_TYPE)
93 if (bus->number != hose->first_busno)
94 cfg_type = 1;
95
96 bus_no = (bus->number == hose->first_busno) ?
97 hose->self_busno : bus->number;
98
99 if (hose->indirect_type & INDIRECT_TYPE_EXT_REG)
100 reg = ((offset & 0xf00) << 16) | (offset & 0xfc);
101 else
102 reg = offset & 0xfc;
103
104 if (hose->indirect_type & INDIRECT_TYPE_BIG_ENDIAN)
105 out_be32(hose->cfg_addr, (0x80000000 | (bus_no << 16) |
106 (devfn << 8) | reg | cfg_type));
107 else
108 out_le32(hose->cfg_addr, (0x80000000 | (bus_no << 16) |
109 (devfn << 8) | reg | cfg_type));
110
111 /* surpress setting of PCI_PRIMARY_BUS */
112 if (hose->indirect_type & INDIRECT_TYPE_SURPRESS_PRIMARY_BUS)
113 if ((offset == PCI_PRIMARY_BUS) &&
114 (bus->number == hose->first_busno))
115 val &= 0xffffff00;
116
117 /* Workaround for PCI_28 Errata in 440EPx/GRx */
118 if ((hose->indirect_type & INDIRECT_TYPE_BROKEN_MRM) &&
119 offset == PCI_CACHE_LINE_SIZE) {
120 val = 0;
121 }
122
123 /*
124 * Note: the caller has already checked that offset is
125 * suitably aligned and that len is 1, 2 or 4.
126 */
127 cfg_data = hose->cfg_data + (offset & 3);
128 switch (len) {
129 case 1:
130 out_8(cfg_data, val);
131 break;
132 case 2:
133 out_le16(cfg_data, val);
134 break;
135 default:
136 out_le32(cfg_data, val);
137 break;
138 }
139
140 return PCIBIOS_SUCCESSFUL;
141}
142
143static struct pci_ops indirect_pci_ops = {
144 .read = indirect_read_config,
145 .write = indirect_write_config,
146};
147
148void __init
149setup_indirect_pci(struct pci_controller *hose,
150 resource_size_t cfg_addr,
151 resource_size_t cfg_data, u32 flags)
152{
153 resource_size_t base = cfg_addr & PAGE_MASK;
154 void __iomem *mbase;
155
156 mbase = ioremap(base, PAGE_SIZE);
157 hose->cfg_addr = mbase + (cfg_addr & ~PAGE_MASK);
158 if ((cfg_data & PAGE_MASK) != base)
159 mbase = ioremap(cfg_data & PAGE_MASK, PAGE_SIZE);
160 hose->cfg_data = mbase + (cfg_data & ~PAGE_MASK);
161 hose->ops = &indirect_pci_ops;
162 hose->indirect_type = flags;
163}
diff --git a/arch/microblaze/pci/iomap.c b/arch/microblaze/pci/iomap.c
new file mode 100644
index 000000000000..3fbf16f4e16c
--- /dev/null
+++ b/arch/microblaze/pci/iomap.c
@@ -0,0 +1,39 @@
1/*
2 * ppc64 "iomap" interface implementation.
3 *
4 * (C) Copyright 2004 Linus Torvalds
5 */
6#include <linux/init.h>
7#include <linux/pci.h>
8#include <linux/mm.h>
9#include <asm/io.h>
10#include <asm/pci-bridge.h>
11
12void __iomem *pci_iomap(struct pci_dev *dev, int bar, unsigned long max)
13{
14 resource_size_t start = pci_resource_start(dev, bar);
15 resource_size_t len = pci_resource_len(dev, bar);
16 unsigned long flags = pci_resource_flags(dev, bar);
17
18 if (!len)
19 return NULL;
20 if (max && len > max)
21 len = max;
22 if (flags & IORESOURCE_IO)
23 return ioport_map(start, len);
24 if (flags & IORESOURCE_MEM)
25 return ioremap(start, len);
26 /* What? */
27 return NULL;
28}
29EXPORT_SYMBOL(pci_iomap);
30
31void pci_iounmap(struct pci_dev *dev, void __iomem *addr)
32{
33 if (isa_vaddr_is_ioport(addr))
34 return;
35 if (pcibios_vaddr_is_ioport(addr))
36 return;
37 iounmap(addr);
38}
39EXPORT_SYMBOL(pci_iounmap);
diff --git a/arch/microblaze/pci/pci-common.c b/arch/microblaze/pci/pci-common.c
new file mode 100644
index 000000000000..0be34350d733
--- /dev/null
+++ b/arch/microblaze/pci/pci-common.c
@@ -0,0 +1,1642 @@
1/*
2 * Contains common pci routines for ALL ppc platform
3 * (based on pci_32.c and pci_64.c)
4 *
5 * Port for PPC64 David Engebretsen, IBM Corp.
6 * Contains common pci routines for ppc64 platform, pSeries and iSeries brands.
7 *
8 * Copyright (C) 2003 Anton Blanchard <anton@au.ibm.com>, IBM
9 * Rework, based on alpha PCI code.
10 *
11 * Common pmac/prep/chrp pci routines. -- Cort
12 *
13 * This program is free software; you can redistribute it and/or
14 * modify it under the terms of the GNU General Public License
15 * as published by the Free Software Foundation; either version
16 * 2 of the License, or (at your option) any later version.
17 */
18
19#include <linux/kernel.h>
20#include <linux/pci.h>
21#include <linux/string.h>
22#include <linux/init.h>
23#include <linux/bootmem.h>
24#include <linux/mm.h>
25#include <linux/list.h>
26#include <linux/syscalls.h>
27#include <linux/irq.h>
28#include <linux/vmalloc.h>
29
30#include <asm/processor.h>
31#include <asm/io.h>
32#include <asm/prom.h>
33#include <asm/pci-bridge.h>
34#include <asm/byteorder.h>
35
36static DEFINE_SPINLOCK(hose_spinlock);
37LIST_HEAD(hose_list);
38
39/* XXX kill that some day ... */
40static int global_phb_number; /* Global phb counter */
41
42/* ISA Memory physical address */
43resource_size_t isa_mem_base;
44
45/* Default PCI flags is 0 on ppc32, modified at boot on ppc64 */
46unsigned int pci_flags;
47
48static struct dma_map_ops *pci_dma_ops = &dma_direct_ops;
49
50void set_pci_dma_ops(struct dma_map_ops *dma_ops)
51{
52 pci_dma_ops = dma_ops;
53}
54
55struct dma_map_ops *get_pci_dma_ops(void)
56{
57 return pci_dma_ops;
58}
59EXPORT_SYMBOL(get_pci_dma_ops);
60
61int pci_set_dma_mask(struct pci_dev *dev, u64 mask)
62{
63 return dma_set_mask(&dev->dev, mask);
64}
65
66int pci_set_consistent_dma_mask(struct pci_dev *dev, u64 mask)
67{
68 int rc;
69
70 rc = dma_set_mask(&dev->dev, mask);
71 dev->dev.coherent_dma_mask = dev->dma_mask;
72
73 return rc;
74}
75
76struct pci_controller *pcibios_alloc_controller(struct device_node *dev)
77{
78 struct pci_controller *phb;
79
80 phb = zalloc_maybe_bootmem(sizeof(struct pci_controller), GFP_KERNEL);
81 if (!phb)
82 return NULL;
83 spin_lock(&hose_spinlock);
84 phb->global_number = global_phb_number++;
85 list_add_tail(&phb->list_node, &hose_list);
86 spin_unlock(&hose_spinlock);
87 phb->dn = dev;
88 phb->is_dynamic = mem_init_done;
89 return phb;
90}
91
92void pcibios_free_controller(struct pci_controller *phb)
93{
94 spin_lock(&hose_spinlock);
95 list_del(&phb->list_node);
96 spin_unlock(&hose_spinlock);
97
98 if (phb->is_dynamic)
99 kfree(phb);
100}
101
102static resource_size_t pcibios_io_size(const struct pci_controller *hose)
103{
104 return hose->io_resource.end - hose->io_resource.start + 1;
105}
106
107int pcibios_vaddr_is_ioport(void __iomem *address)
108{
109 int ret = 0;
110 struct pci_controller *hose;
111 resource_size_t size;
112
113 spin_lock(&hose_spinlock);
114 list_for_each_entry(hose, &hose_list, list_node) {
115 size = pcibios_io_size(hose);
116 if (address >= hose->io_base_virt &&
117 address < (hose->io_base_virt + size)) {
118 ret = 1;
119 break;
120 }
121 }
122 spin_unlock(&hose_spinlock);
123 return ret;
124}
125
126unsigned long pci_address_to_pio(phys_addr_t address)
127{
128 struct pci_controller *hose;
129 resource_size_t size;
130 unsigned long ret = ~0;
131
132 spin_lock(&hose_spinlock);
133 list_for_each_entry(hose, &hose_list, list_node) {
134 size = pcibios_io_size(hose);
135 if (address >= hose->io_base_phys &&
136 address < (hose->io_base_phys + size)) {
137 unsigned long base =
138 (unsigned long)hose->io_base_virt - _IO_BASE;
139 ret = base + (address - hose->io_base_phys);
140 break;
141 }
142 }
143 spin_unlock(&hose_spinlock);
144
145 return ret;
146}
147EXPORT_SYMBOL_GPL(pci_address_to_pio);
148
149/*
150 * Return the domain number for this bus.
151 */
152int pci_domain_nr(struct pci_bus *bus)
153{
154 struct pci_controller *hose = pci_bus_to_host(bus);
155
156 return hose->global_number;
157}
158EXPORT_SYMBOL(pci_domain_nr);
159
160/* This routine is meant to be used early during boot, when the
161 * PCI bus numbers have not yet been assigned, and you need to
162 * issue PCI config cycles to an OF device.
163 * It could also be used to "fix" RTAS config cycles if you want
164 * to set pci_assign_all_buses to 1 and still use RTAS for PCI
165 * config cycles.
166 */
167struct pci_controller *pci_find_hose_for_OF_device(struct device_node *node)
168{
169 while (node) {
170 struct pci_controller *hose, *tmp;
171 list_for_each_entry_safe(hose, tmp, &hose_list, list_node)
172 if (hose->dn == node)
173 return hose;
174 node = node->parent;
175 }
176 return NULL;
177}
178
179static ssize_t pci_show_devspec(struct device *dev,
180 struct device_attribute *attr, char *buf)
181{
182 struct pci_dev *pdev;
183 struct device_node *np;
184
185 pdev = to_pci_dev(dev);
186 np = pci_device_to_OF_node(pdev);
187 if (np == NULL || np->full_name == NULL)
188 return 0;
189 return sprintf(buf, "%s", np->full_name);
190}
191static DEVICE_ATTR(devspec, S_IRUGO, pci_show_devspec, NULL);
192
193/* Add sysfs properties */
194int pcibios_add_platform_entries(struct pci_dev *pdev)
195{
196 return device_create_file(&pdev->dev, &dev_attr_devspec);
197}
198
199char __devinit *pcibios_setup(char *str)
200{
201 return str;
202}
203
204/*
205 * Reads the interrupt pin to determine if interrupt is use by card.
206 * If the interrupt is used, then gets the interrupt line from the
207 * openfirmware and sets it in the pci_dev and pci_config line.
208 */
209int pci_read_irq_line(struct pci_dev *pci_dev)
210{
211 struct of_irq oirq;
212 unsigned int virq;
213
214 /* The current device-tree that iSeries generates from the HV
215 * PCI informations doesn't contain proper interrupt routing,
216 * and all the fallback would do is print out crap, so we
217 * don't attempt to resolve the interrupts here at all, some
218 * iSeries specific fixup does it.
219 *
220 * In the long run, we will hopefully fix the generated device-tree
221 * instead.
222 */
223 pr_debug("PCI: Try to map irq for %s...\n", pci_name(pci_dev));
224
225#ifdef DEBUG
226 memset(&oirq, 0xff, sizeof(oirq));
227#endif
228 /* Try to get a mapping from the device-tree */
229 if (of_irq_map_pci(pci_dev, &oirq)) {
230 u8 line, pin;
231
232 /* If that fails, lets fallback to what is in the config
233 * space and map that through the default controller. We
234 * also set the type to level low since that's what PCI
235 * interrupts are. If your platform does differently, then
236 * either provide a proper interrupt tree or don't use this
237 * function.
238 */
239 if (pci_read_config_byte(pci_dev, PCI_INTERRUPT_PIN, &pin))
240 return -1;
241 if (pin == 0)
242 return -1;
243 if (pci_read_config_byte(pci_dev, PCI_INTERRUPT_LINE, &line) ||
244 line == 0xff || line == 0) {
245 return -1;
246 }
247 pr_debug(" No map ! Using line %d (pin %d) from PCI config\n",
248 line, pin);
249
250 virq = irq_create_mapping(NULL, line);
251 if (virq != NO_IRQ)
252 set_irq_type(virq, IRQ_TYPE_LEVEL_LOW);
253 } else {
254 pr_debug(" Got one, spec %d cells (0x%08x 0x%08x...) on %s\n",
255 oirq.size, oirq.specifier[0], oirq.specifier[1],
256 oirq.controller ? oirq.controller->full_name :
257 "<default>");
258
259 virq = irq_create_of_mapping(oirq.controller, oirq.specifier,
260 oirq.size);
261 }
262 if (virq == NO_IRQ) {
263 pr_debug(" Failed to map !\n");
264 return -1;
265 }
266
267 pr_debug(" Mapped to linux irq %d\n", virq);
268
269 pci_dev->irq = virq;
270
271 return 0;
272}
273EXPORT_SYMBOL(pci_read_irq_line);
274
275/*
276 * Platform support for /proc/bus/pci/X/Y mmap()s,
277 * modelled on the sparc64 implementation by Dave Miller.
278 * -- paulus.
279 */
280
281/*
282 * Adjust vm_pgoff of VMA such that it is the physical page offset
283 * corresponding to the 32-bit pci bus offset for DEV requested by the user.
284 *
285 * Basically, the user finds the base address for his device which he wishes
286 * to mmap. They read the 32-bit value from the config space base register,
287 * add whatever PAGE_SIZE multiple offset they wish, and feed this into the
288 * offset parameter of mmap on /proc/bus/pci/XXX for that device.
289 *
290 * Returns negative error code on failure, zero on success.
291 */
292static struct resource *__pci_mmap_make_offset(struct pci_dev *dev,
293 resource_size_t *offset,
294 enum pci_mmap_state mmap_state)
295{
296 struct pci_controller *hose = pci_bus_to_host(dev->bus);
297 unsigned long io_offset = 0;
298 int i, res_bit;
299
300 if (hose == 0)
301 return NULL; /* should never happen */
302
303 /* If memory, add on the PCI bridge address offset */
304 if (mmap_state == pci_mmap_mem) {
305#if 0 /* See comment in pci_resource_to_user() for why this is disabled */
306 *offset += hose->pci_mem_offset;
307#endif
308 res_bit = IORESOURCE_MEM;
309 } else {
310 io_offset = (unsigned long)hose->io_base_virt - _IO_BASE;
311 *offset += io_offset;
312 res_bit = IORESOURCE_IO;
313 }
314
315 /*
316 * Check that the offset requested corresponds to one of the
317 * resources of the device.
318 */
319 for (i = 0; i <= PCI_ROM_RESOURCE; i++) {
320 struct resource *rp = &dev->resource[i];
321 int flags = rp->flags;
322
323 /* treat ROM as memory (should be already) */
324 if (i == PCI_ROM_RESOURCE)
325 flags |= IORESOURCE_MEM;
326
327 /* Active and same type? */
328 if ((flags & res_bit) == 0)
329 continue;
330
331 /* In the range of this resource? */
332 if (*offset < (rp->start & PAGE_MASK) || *offset > rp->end)
333 continue;
334
335 /* found it! construct the final physical address */
336 if (mmap_state == pci_mmap_io)
337 *offset += hose->io_base_phys - io_offset;
338 return rp;
339 }
340
341 return NULL;
342}
343
344/*
345 * Set vm_page_prot of VMA, as appropriate for this architecture, for a pci
346 * device mapping.
347 */
348static pgprot_t __pci_mmap_set_pgprot(struct pci_dev *dev, struct resource *rp,
349 pgprot_t protection,
350 enum pci_mmap_state mmap_state,
351 int write_combine)
352{
353 pgprot_t prot = protection;
354
355 /* Write combine is always 0 on non-memory space mappings. On
356 * memory space, if the user didn't pass 1, we check for a
357 * "prefetchable" resource. This is a bit hackish, but we use
358 * this to workaround the inability of /sysfs to provide a write
359 * combine bit
360 */
361 if (mmap_state != pci_mmap_mem)
362 write_combine = 0;
363 else if (write_combine == 0) {
364 if (rp->flags & IORESOURCE_PREFETCH)
365 write_combine = 1;
366 }
367
368 return pgprot_noncached(prot);
369}
370
371/*
372 * This one is used by /dev/mem and fbdev who have no clue about the
373 * PCI device, it tries to find the PCI device first and calls the
374 * above routine
375 */
376pgprot_t pci_phys_mem_access_prot(struct file *file,
377 unsigned long pfn,
378 unsigned long size,
379 pgprot_t prot)
380{
381 struct pci_dev *pdev = NULL;
382 struct resource *found = NULL;
383 resource_size_t offset = ((resource_size_t)pfn) << PAGE_SHIFT;
384 int i;
385
386 if (page_is_ram(pfn))
387 return prot;
388
389 prot = pgprot_noncached(prot);
390 for_each_pci_dev(pdev) {
391 for (i = 0; i <= PCI_ROM_RESOURCE; i++) {
392 struct resource *rp = &pdev->resource[i];
393 int flags = rp->flags;
394
395 /* Active and same type? */
396 if ((flags & IORESOURCE_MEM) == 0)
397 continue;
398 /* In the range of this resource? */
399 if (offset < (rp->start & PAGE_MASK) ||
400 offset > rp->end)
401 continue;
402 found = rp;
403 break;
404 }
405 if (found)
406 break;
407 }
408 if (found) {
409 if (found->flags & IORESOURCE_PREFETCH)
410 prot = pgprot_noncached_wc(prot);
411 pci_dev_put(pdev);
412 }
413
414 pr_debug("PCI: Non-PCI map for %llx, prot: %lx\n",
415 (unsigned long long)offset, pgprot_val(prot));
416
417 return prot;
418}
419
420/*
421 * Perform the actual remap of the pages for a PCI device mapping, as
422 * appropriate for this architecture. The region in the process to map
423 * is described by vm_start and vm_end members of VMA, the base physical
424 * address is found in vm_pgoff.
425 * The pci device structure is provided so that architectures may make mapping
426 * decisions on a per-device or per-bus basis.
427 *
428 * Returns a negative error code on failure, zero on success.
429 */
430int pci_mmap_page_range(struct pci_dev *dev, struct vm_area_struct *vma,
431 enum pci_mmap_state mmap_state, int write_combine)
432{
433 resource_size_t offset =
434 ((resource_size_t)vma->vm_pgoff) << PAGE_SHIFT;
435 struct resource *rp;
436 int ret;
437
438 rp = __pci_mmap_make_offset(dev, &offset, mmap_state);
439 if (rp == NULL)
440 return -EINVAL;
441
442 vma->vm_pgoff = offset >> PAGE_SHIFT;
443 vma->vm_page_prot = __pci_mmap_set_pgprot(dev, rp,
444 vma->vm_page_prot,
445 mmap_state, write_combine);
446
447 ret = remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff,
448 vma->vm_end - vma->vm_start, vma->vm_page_prot);
449
450 return ret;
451}
452
453/* This provides legacy IO read access on a bus */
454int pci_legacy_read(struct pci_bus *bus, loff_t port, u32 *val, size_t size)
455{
456 unsigned long offset;
457 struct pci_controller *hose = pci_bus_to_host(bus);
458 struct resource *rp = &hose->io_resource;
459 void __iomem *addr;
460
461 /* Check if port can be supported by that bus. We only check
462 * the ranges of the PHB though, not the bus itself as the rules
463 * for forwarding legacy cycles down bridges are not our problem
464 * here. So if the host bridge supports it, we do it.
465 */
466 offset = (unsigned long)hose->io_base_virt - _IO_BASE;
467 offset += port;
468
469 if (!(rp->flags & IORESOURCE_IO))
470 return -ENXIO;
471 if (offset < rp->start || (offset + size) > rp->end)
472 return -ENXIO;
473 addr = hose->io_base_virt + port;
474
475 switch (size) {
476 case 1:
477 *((u8 *)val) = in_8(addr);
478 return 1;
479 case 2:
480 if (port & 1)
481 return -EINVAL;
482 *((u16 *)val) = in_le16(addr);
483 return 2;
484 case 4:
485 if (port & 3)
486 return -EINVAL;
487 *((u32 *)val) = in_le32(addr);
488 return 4;
489 }
490 return -EINVAL;
491}
492
493/* This provides legacy IO write access on a bus */
494int pci_legacy_write(struct pci_bus *bus, loff_t port, u32 val, size_t size)
495{
496 unsigned long offset;
497 struct pci_controller *hose = pci_bus_to_host(bus);
498 struct resource *rp = &hose->io_resource;
499 void __iomem *addr;
500
501 /* Check if port can be supported by that bus. We only check
502 * the ranges of the PHB though, not the bus itself as the rules
503 * for forwarding legacy cycles down bridges are not our problem
504 * here. So if the host bridge supports it, we do it.
505 */
506 offset = (unsigned long)hose->io_base_virt - _IO_BASE;
507 offset += port;
508
509 if (!(rp->flags & IORESOURCE_IO))
510 return -ENXIO;
511 if (offset < rp->start || (offset + size) > rp->end)
512 return -ENXIO;
513 addr = hose->io_base_virt + port;
514
515 /* WARNING: The generic code is idiotic. It gets passed a pointer
516 * to what can be a 1, 2 or 4 byte quantity and always reads that
517 * as a u32, which means that we have to correct the location of
518 * the data read within those 32 bits for size 1 and 2
519 */
520 switch (size) {
521 case 1:
522 out_8(addr, val >> 24);
523 return 1;
524 case 2:
525 if (port & 1)
526 return -EINVAL;
527 out_le16(addr, val >> 16);
528 return 2;
529 case 4:
530 if (port & 3)
531 return -EINVAL;
532 out_le32(addr, val);
533 return 4;
534 }
535 return -EINVAL;
536}
537
538/* This provides legacy IO or memory mmap access on a bus */
539int pci_mmap_legacy_page_range(struct pci_bus *bus,
540 struct vm_area_struct *vma,
541 enum pci_mmap_state mmap_state)
542{
543 struct pci_controller *hose = pci_bus_to_host(bus);
544 resource_size_t offset =
545 ((resource_size_t)vma->vm_pgoff) << PAGE_SHIFT;
546 resource_size_t size = vma->vm_end - vma->vm_start;
547 struct resource *rp;
548
549 pr_debug("pci_mmap_legacy_page_range(%04x:%02x, %s @%llx..%llx)\n",
550 pci_domain_nr(bus), bus->number,
551 mmap_state == pci_mmap_mem ? "MEM" : "IO",
552 (unsigned long long)offset,
553 (unsigned long long)(offset + size - 1));
554
555 if (mmap_state == pci_mmap_mem) {
556 /* Hack alert !
557 *
558 * Because X is lame and can fail starting if it gets an error
559 * trying to mmap legacy_mem (instead of just moving on without
560 * legacy memory access) we fake it here by giving it anonymous
561 * memory, effectively behaving just like /dev/zero
562 */
563 if ((offset + size) > hose->isa_mem_size) {
564#ifdef CONFIG_MMU
565 printk(KERN_DEBUG
566 "Process %s (pid:%d) mapped non-existing PCI"
567 "legacy memory for 0%04x:%02x\n",
568 current->comm, current->pid, pci_domain_nr(bus),
569 bus->number);
570#endif
571 if (vma->vm_flags & VM_SHARED)
572 return shmem_zero_setup(vma);
573 return 0;
574 }
575 offset += hose->isa_mem_phys;
576 } else {
577 unsigned long io_offset = (unsigned long)hose->io_base_virt - \
578 _IO_BASE;
579 unsigned long roffset = offset + io_offset;
580 rp = &hose->io_resource;
581 if (!(rp->flags & IORESOURCE_IO))
582 return -ENXIO;
583 if (roffset < rp->start || (roffset + size) > rp->end)
584 return -ENXIO;
585 offset += hose->io_base_phys;
586 }
587 pr_debug(" -> mapping phys %llx\n", (unsigned long long)offset);
588
589 vma->vm_pgoff = offset >> PAGE_SHIFT;
590 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
591 return remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff,
592 vma->vm_end - vma->vm_start,
593 vma->vm_page_prot);
594}
595
596void pci_resource_to_user(const struct pci_dev *dev, int bar,
597 const struct resource *rsrc,
598 resource_size_t *start, resource_size_t *end)
599{
600 struct pci_controller *hose = pci_bus_to_host(dev->bus);
601 resource_size_t offset = 0;
602
603 if (hose == NULL)
604 return;
605
606 if (rsrc->flags & IORESOURCE_IO)
607 offset = (unsigned long)hose->io_base_virt - _IO_BASE;
608
609 /* We pass a fully fixed up address to userland for MMIO instead of
610 * a BAR value because X is lame and expects to be able to use that
611 * to pass to /dev/mem !
612 *
613 * That means that we'll have potentially 64 bits values where some
614 * userland apps only expect 32 (like X itself since it thinks only
615 * Sparc has 64 bits MMIO) but if we don't do that, we break it on
616 * 32 bits CHRPs :-(
617 *
618 * Hopefully, the sysfs insterface is immune to that gunk. Once X
619 * has been fixed (and the fix spread enough), we can re-enable the
620 * 2 lines below and pass down a BAR value to userland. In that case
621 * we'll also have to re-enable the matching code in
622 * __pci_mmap_make_offset().
623 *
624 * BenH.
625 */
626#if 0
627 else if (rsrc->flags & IORESOURCE_MEM)
628 offset = hose->pci_mem_offset;
629#endif
630
631 *start = rsrc->start - offset;
632 *end = rsrc->end - offset;
633}
634
635/**
636 * pci_process_bridge_OF_ranges - Parse PCI bridge resources from device tree
637 * @hose: newly allocated pci_controller to be setup
638 * @dev: device node of the host bridge
639 * @primary: set if primary bus (32 bits only, soon to be deprecated)
640 *
641 * This function will parse the "ranges" property of a PCI host bridge device
642 * node and setup the resource mapping of a pci controller based on its
643 * content.
644 *
645 * Life would be boring if it wasn't for a few issues that we have to deal
646 * with here:
647 *
648 * - We can only cope with one IO space range and up to 3 Memory space
649 * ranges. However, some machines (thanks Apple !) tend to split their
650 * space into lots of small contiguous ranges. So we have to coalesce.
651 *
652 * - We can only cope with all memory ranges having the same offset
653 * between CPU addresses and PCI addresses. Unfortunately, some bridges
654 * are setup for a large 1:1 mapping along with a small "window" which
655 * maps PCI address 0 to some arbitrary high address of the CPU space in
656 * order to give access to the ISA memory hole.
657 * The way out of here that I've chosen for now is to always set the
658 * offset based on the first resource found, then override it if we
659 * have a different offset and the previous was set by an ISA hole.
660 *
661 * - Some busses have IO space not starting at 0, which causes trouble with
662 * the way we do our IO resource renumbering. The code somewhat deals with
663 * it for 64 bits but I would expect problems on 32 bits.
664 *
665 * - Some 32 bits platforms such as 4xx can have physical space larger than
666 * 32 bits so we need to use 64 bits values for the parsing
667 */
668void __devinit pci_process_bridge_OF_ranges(struct pci_controller *hose,
669 struct device_node *dev,
670 int primary)
671{
672 const u32 *ranges;
673 int rlen;
674 int pna = of_n_addr_cells(dev);
675 int np = pna + 5;
676 int memno = 0, isa_hole = -1;
677 u32 pci_space;
678 unsigned long long pci_addr, cpu_addr, pci_next, cpu_next, size;
679 unsigned long long isa_mb = 0;
680 struct resource *res;
681
682 printk(KERN_INFO "PCI host bridge %s %s ranges:\n",
683 dev->full_name, primary ? "(primary)" : "");
684
685 /* Get ranges property */
686 ranges = of_get_property(dev, "ranges", &rlen);
687 if (ranges == NULL)
688 return;
689
690 /* Parse it */
691 pr_debug("Parsing ranges property...\n");
692 while ((rlen -= np * 4) >= 0) {
693 /* Read next ranges element */
694 pci_space = ranges[0];
695 pci_addr = of_read_number(ranges + 1, 2);
696 cpu_addr = of_translate_address(dev, ranges + 3);
697 size = of_read_number(ranges + pna + 3, 2);
698
699 pr_debug("pci_space: 0x%08x pci_addr:0x%016llx "
700 "cpu_addr:0x%016llx size:0x%016llx\n",
701 pci_space, pci_addr, cpu_addr, size);
702
703 ranges += np;
704
705 /* If we failed translation or got a zero-sized region
706 * (some FW try to feed us with non sensical zero sized regions
707 * such as power3 which look like some kind of attempt
708 * at exposing the VGA memory hole)
709 */
710 if (cpu_addr == OF_BAD_ADDR || size == 0)
711 continue;
712
713 /* Now consume following elements while they are contiguous */
714 for (; rlen >= np * sizeof(u32);
715 ranges += np, rlen -= np * 4) {
716 if (ranges[0] != pci_space)
717 break;
718 pci_next = of_read_number(ranges + 1, 2);
719 cpu_next = of_translate_address(dev, ranges + 3);
720 if (pci_next != pci_addr + size ||
721 cpu_next != cpu_addr + size)
722 break;
723 size += of_read_number(ranges + pna + 3, 2);
724 }
725
726 /* Act based on address space type */
727 res = NULL;
728 switch ((pci_space >> 24) & 0x3) {
729 case 1: /* PCI IO space */
730 printk(KERN_INFO
731 " IO 0x%016llx..0x%016llx -> 0x%016llx\n",
732 cpu_addr, cpu_addr + size - 1, pci_addr);
733
734 /* We support only one IO range */
735 if (hose->pci_io_size) {
736 printk(KERN_INFO
737 " \\--> Skipped (too many) !\n");
738 continue;
739 }
740 /* On 32 bits, limit I/O space to 16MB */
741 if (size > 0x01000000)
742 size = 0x01000000;
743
744 /* 32 bits needs to map IOs here */
745 hose->io_base_virt = ioremap(cpu_addr, size);
746
747 /* Expect trouble if pci_addr is not 0 */
748 if (primary)
749 isa_io_base =
750 (unsigned long)hose->io_base_virt;
751 /* pci_io_size and io_base_phys always represent IO
752 * space starting at 0 so we factor in pci_addr
753 */
754 hose->pci_io_size = pci_addr + size;
755 hose->io_base_phys = cpu_addr - pci_addr;
756
757 /* Build resource */
758 res = &hose->io_resource;
759 res->flags = IORESOURCE_IO;
760 res->start = pci_addr;
761 break;
762 case 2: /* PCI Memory space */
763 case 3: /* PCI 64 bits Memory space */
764 printk(KERN_INFO
765 " MEM 0x%016llx..0x%016llx -> 0x%016llx %s\n",
766 cpu_addr, cpu_addr + size - 1, pci_addr,
767 (pci_space & 0x40000000) ? "Prefetch" : "");
768
769 /* We support only 3 memory ranges */
770 if (memno >= 3) {
771 printk(KERN_INFO
772 " \\--> Skipped (too many) !\n");
773 continue;
774 }
775 /* Handles ISA memory hole space here */
776 if (pci_addr == 0) {
777 isa_mb = cpu_addr;
778 isa_hole = memno;
779 if (primary || isa_mem_base == 0)
780 isa_mem_base = cpu_addr;
781 hose->isa_mem_phys = cpu_addr;
782 hose->isa_mem_size = size;
783 }
784
785 /* We get the PCI/Mem offset from the first range or
786 * the, current one if the offset came from an ISA
787 * hole. If they don't match, bugger.
788 */
789 if (memno == 0 ||
790 (isa_hole >= 0 && pci_addr != 0 &&
791 hose->pci_mem_offset == isa_mb))
792 hose->pci_mem_offset = cpu_addr - pci_addr;
793 else if (pci_addr != 0 &&
794 hose->pci_mem_offset != cpu_addr - pci_addr) {
795 printk(KERN_INFO
796 " \\--> Skipped (offset mismatch) !\n");
797 continue;
798 }
799
800 /* Build resource */
801 res = &hose->mem_resources[memno++];
802 res->flags = IORESOURCE_MEM;
803 if (pci_space & 0x40000000)
804 res->flags |= IORESOURCE_PREFETCH;
805 res->start = cpu_addr;
806 break;
807 }
808 if (res != NULL) {
809 res->name = dev->full_name;
810 res->end = res->start + size - 1;
811 res->parent = NULL;
812 res->sibling = NULL;
813 res->child = NULL;
814 }
815 }
816
817 /* If there's an ISA hole and the pci_mem_offset is -not- matching
818 * the ISA hole offset, then we need to remove the ISA hole from
819 * the resource list for that brige
820 */
821 if (isa_hole >= 0 && hose->pci_mem_offset != isa_mb) {
822 unsigned int next = isa_hole + 1;
823 printk(KERN_INFO " Removing ISA hole at 0x%016llx\n", isa_mb);
824 if (next < memno)
825 memmove(&hose->mem_resources[isa_hole],
826 &hose->mem_resources[next],
827 sizeof(struct resource) * (memno - next));
828 hose->mem_resources[--memno].flags = 0;
829 }
830}
831
832/* Decide whether to display the domain number in /proc */
833int pci_proc_domain(struct pci_bus *bus)
834{
835 struct pci_controller *hose = pci_bus_to_host(bus);
836
837 if (!(pci_flags & PCI_ENABLE_PROC_DOMAINS))
838 return 0;
839 if (pci_flags & PCI_COMPAT_DOMAIN_0)
840 return hose->global_number != 0;
841 return 1;
842}
843
844void pcibios_resource_to_bus(struct pci_dev *dev, struct pci_bus_region *region,
845 struct resource *res)
846{
847 resource_size_t offset = 0, mask = (resource_size_t)-1;
848 struct pci_controller *hose = pci_bus_to_host(dev->bus);
849
850 if (!hose)
851 return;
852 if (res->flags & IORESOURCE_IO) {
853 offset = (unsigned long)hose->io_base_virt - _IO_BASE;
854 mask = 0xffffffffu;
855 } else if (res->flags & IORESOURCE_MEM)
856 offset = hose->pci_mem_offset;
857
858 region->start = (res->start - offset) & mask;
859 region->end = (res->end - offset) & mask;
860}
861EXPORT_SYMBOL(pcibios_resource_to_bus);
862
863void pcibios_bus_to_resource(struct pci_dev *dev, struct resource *res,
864 struct pci_bus_region *region)
865{
866 resource_size_t offset = 0, mask = (resource_size_t)-1;
867 struct pci_controller *hose = pci_bus_to_host(dev->bus);
868
869 if (!hose)
870 return;
871 if (res->flags & IORESOURCE_IO) {
872 offset = (unsigned long)hose->io_base_virt - _IO_BASE;
873 mask = 0xffffffffu;
874 } else if (res->flags & IORESOURCE_MEM)
875 offset = hose->pci_mem_offset;
876 res->start = (region->start + offset) & mask;
877 res->end = (region->end + offset) & mask;
878}
879EXPORT_SYMBOL(pcibios_bus_to_resource);
880
881/* Fixup a bus resource into a linux resource */
882static void __devinit fixup_resource(struct resource *res, struct pci_dev *dev)
883{
884 struct pci_controller *hose = pci_bus_to_host(dev->bus);
885 resource_size_t offset = 0, mask = (resource_size_t)-1;
886
887 if (res->flags & IORESOURCE_IO) {
888 offset = (unsigned long)hose->io_base_virt - _IO_BASE;
889 mask = 0xffffffffu;
890 } else if (res->flags & IORESOURCE_MEM)
891 offset = hose->pci_mem_offset;
892
893 res->start = (res->start + offset) & mask;
894 res->end = (res->end + offset) & mask;
895}
896
897/* This header fixup will do the resource fixup for all devices as they are
898 * probed, but not for bridge ranges
899 */
900static void __devinit pcibios_fixup_resources(struct pci_dev *dev)
901{
902 struct pci_controller *hose = pci_bus_to_host(dev->bus);
903 int i;
904
905 if (!hose) {
906 printk(KERN_ERR "No host bridge for PCI dev %s !\n",
907 pci_name(dev));
908 return;
909 }
910 for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) {
911 struct resource *res = dev->resource + i;
912 if (!res->flags)
913 continue;
914 /* On platforms that have PCI_PROBE_ONLY set, we don't
915 * consider 0 as an unassigned BAR value. It's technically
916 * a valid value, but linux doesn't like it... so when we can
917 * re-assign things, we do so, but if we can't, we keep it
918 * around and hope for the best...
919 */
920 if (res->start == 0 && !(pci_flags & PCI_PROBE_ONLY)) {
921 pr_debug("PCI:%s Resource %d %016llx-%016llx [%x]" \
922 "is unassigned\n",
923 pci_name(dev), i,
924 (unsigned long long)res->start,
925 (unsigned long long)res->end,
926 (unsigned int)res->flags);
927 res->end -= res->start;
928 res->start = 0;
929 res->flags |= IORESOURCE_UNSET;
930 continue;
931 }
932
933 pr_debug("PCI:%s Resource %d %016llx-%016llx [%x] fixup...\n",
934 pci_name(dev), i,
935 (unsigned long long)res->start,\
936 (unsigned long long)res->end,
937 (unsigned int)res->flags);
938
939 fixup_resource(res, dev);
940
941 pr_debug("PCI:%s %016llx-%016llx\n",
942 pci_name(dev),
943 (unsigned long long)res->start,
944 (unsigned long long)res->end);
945 }
946}
947DECLARE_PCI_FIXUP_HEADER(PCI_ANY_ID, PCI_ANY_ID, pcibios_fixup_resources);
948
949/* This function tries to figure out if a bridge resource has been initialized
950 * by the firmware or not. It doesn't have to be absolutely bullet proof, but
951 * things go more smoothly when it gets it right. It should covers cases such
952 * as Apple "closed" bridge resources and bare-metal pSeries unassigned bridges
953 */
954static int __devinit pcibios_uninitialized_bridge_resource(struct pci_bus *bus,
955 struct resource *res)
956{
957 struct pci_controller *hose = pci_bus_to_host(bus);
958 struct pci_dev *dev = bus->self;
959 resource_size_t offset;
960 u16 command;
961 int i;
962
963 /* We don't do anything if PCI_PROBE_ONLY is set */
964 if (pci_flags & PCI_PROBE_ONLY)
965 return 0;
966
967 /* Job is a bit different between memory and IO */
968 if (res->flags & IORESOURCE_MEM) {
969 /* If the BAR is non-0 (res != pci_mem_offset) then it's
970 * probably been initialized by somebody
971 */
972 if (res->start != hose->pci_mem_offset)
973 return 0;
974
975 /* The BAR is 0, let's check if memory decoding is enabled on
976 * the bridge. If not, we consider it unassigned
977 */
978 pci_read_config_word(dev, PCI_COMMAND, &command);
979 if ((command & PCI_COMMAND_MEMORY) == 0)
980 return 1;
981
982 /* Memory decoding is enabled and the BAR is 0. If any of
983 * the bridge resources covers that starting address (0 then
984 * it's good enough for us for memory
985 */
986 for (i = 0; i < 3; i++) {
987 if ((hose->mem_resources[i].flags & IORESOURCE_MEM) &&
988 hose->mem_resources[i].start == hose->pci_mem_offset)
989 return 0;
990 }
991
992 /* Well, it starts at 0 and we know it will collide so we may as
993 * well consider it as unassigned. That covers the Apple case.
994 */
995 return 1;
996 } else {
997 /* If the BAR is non-0, then we consider it assigned */
998 offset = (unsigned long)hose->io_base_virt - _IO_BASE;
999 if (((res->start - offset) & 0xfffffffful) != 0)
1000 return 0;
1001
1002 /* Here, we are a bit different than memory as typically IO
1003 * space starting at low addresses -is- valid. What we do
1004 * instead if that we consider as unassigned anything that
1005 * doesn't have IO enabled in the PCI command register,
1006 * and that's it.
1007 */
1008 pci_read_config_word(dev, PCI_COMMAND, &command);
1009 if (command & PCI_COMMAND_IO)
1010 return 0;
1011
1012 /* It's starting at 0 and IO is disabled in the bridge, consider
1013 * it unassigned
1014 */
1015 return 1;
1016 }
1017}
1018
1019/* Fixup resources of a PCI<->PCI bridge */
1020static void __devinit pcibios_fixup_bridge(struct pci_bus *bus)
1021{
1022 struct resource *res;
1023 int i;
1024
1025 struct pci_dev *dev = bus->self;
1026
1027 for (i = 0; i < PCI_BUS_NUM_RESOURCES; ++i) {
1028 res = bus->resource[i];
1029 if (!res)
1030 continue;
1031 if (!res->flags)
1032 continue;
1033 if (i >= 3 && bus->self->transparent)
1034 continue;
1035
1036 pr_debug("PCI:%s Bus rsrc %d %016llx-%016llx [%x] fixup...\n",
1037 pci_name(dev), i,
1038 (unsigned long long)res->start,\
1039 (unsigned long long)res->end,
1040 (unsigned int)res->flags);
1041
1042 /* Perform fixup */
1043 fixup_resource(res, dev);
1044
1045 /* Try to detect uninitialized P2P bridge resources,
1046 * and clear them out so they get re-assigned later
1047 */
1048 if (pcibios_uninitialized_bridge_resource(bus, res)) {
1049 res->flags = 0;
1050 pr_debug("PCI:%s (unassigned)\n",
1051 pci_name(dev));
1052 } else {
1053 pr_debug("PCI:%s %016llx-%016llx\n",
1054 pci_name(dev),
1055 (unsigned long long)res->start,
1056 (unsigned long long)res->end);
1057 }
1058 }
1059}
1060
1061void __devinit pcibios_setup_bus_self(struct pci_bus *bus)
1062{
1063 /* Fix up the bus resources for P2P bridges */
1064 if (bus->self != NULL)
1065 pcibios_fixup_bridge(bus);
1066}
1067
1068void __devinit pcibios_setup_bus_devices(struct pci_bus *bus)
1069{
1070 struct pci_dev *dev;
1071
1072 pr_debug("PCI: Fixup bus devices %d (%s)\n",
1073 bus->number, bus->self ? pci_name(bus->self) : "PHB");
1074
1075 list_for_each_entry(dev, &bus->devices, bus_list) {
1076 struct dev_archdata *sd = &dev->dev.archdata;
1077
1078 /* Setup OF node pointer in archdata */
1079 sd->of_node = pci_device_to_OF_node(dev);
1080
1081 /* Fixup NUMA node as it may not be setup yet by the generic
1082 * code and is needed by the DMA init
1083 */
1084 set_dev_node(&dev->dev, pcibus_to_node(dev->bus));
1085
1086 /* Hook up default DMA ops */
1087 sd->dma_ops = pci_dma_ops;
1088 sd->dma_data = (void *)PCI_DRAM_OFFSET;
1089
1090 /* Read default IRQs and fixup if necessary */
1091 pci_read_irq_line(dev);
1092 }
1093}
1094
1095void __devinit pcibios_fixup_bus(struct pci_bus *bus)
1096{
1097 /* When called from the generic PCI probe, read PCI<->PCI bridge
1098 * bases. This is -not- called when generating the PCI tree from
1099 * the OF device-tree.
1100 */
1101 if (bus->self != NULL)
1102 pci_read_bridge_bases(bus);
1103
1104 /* Now fixup the bus bus */
1105 pcibios_setup_bus_self(bus);
1106
1107 /* Now fixup devices on that bus */
1108 pcibios_setup_bus_devices(bus);
1109}
1110EXPORT_SYMBOL(pcibios_fixup_bus);
1111
1112static int skip_isa_ioresource_align(struct pci_dev *dev)
1113{
1114 if ((pci_flags & PCI_CAN_SKIP_ISA_ALIGN) &&
1115 !(dev->bus->bridge_ctl & PCI_BRIDGE_CTL_ISA))
1116 return 1;
1117 return 0;
1118}
1119
1120/*
1121 * We need to avoid collisions with `mirrored' VGA ports
1122 * and other strange ISA hardware, so we always want the
1123 * addresses to be allocated in the 0x000-0x0ff region
1124 * modulo 0x400.
1125 *
1126 * Why? Because some silly external IO cards only decode
1127 * the low 10 bits of the IO address. The 0x00-0xff region
1128 * is reserved for motherboard devices that decode all 16
1129 * bits, so it's ok to allocate at, say, 0x2800-0x28ff,
1130 * but we want to try to avoid allocating at 0x2900-0x2bff
1131 * which might have be mirrored at 0x0100-0x03ff..
1132 */
1133void pcibios_align_resource(void *data, struct resource *res,
1134 resource_size_t size, resource_size_t align)
1135{
1136 struct pci_dev *dev = data;
1137
1138 if (res->flags & IORESOURCE_IO) {
1139 resource_size_t start = res->start;
1140
1141 if (skip_isa_ioresource_align(dev))
1142 return;
1143 if (start & 0x300) {
1144 start = (start + 0x3ff) & ~0x3ff;
1145 res->start = start;
1146 }
1147 }
1148}
1149EXPORT_SYMBOL(pcibios_align_resource);
1150
1151/*
1152 * Reparent resource children of pr that conflict with res
1153 * under res, and make res replace those children.
1154 */
1155static int __init reparent_resources(struct resource *parent,
1156 struct resource *res)
1157{
1158 struct resource *p, **pp;
1159 struct resource **firstpp = NULL;
1160
1161 for (pp = &parent->child; (p = *pp) != NULL; pp = &p->sibling) {
1162 if (p->end < res->start)
1163 continue;
1164 if (res->end < p->start)
1165 break;
1166 if (p->start < res->start || p->end > res->end)
1167 return -1; /* not completely contained */
1168 if (firstpp == NULL)
1169 firstpp = pp;
1170 }
1171 if (firstpp == NULL)
1172 return -1; /* didn't find any conflicting entries? */
1173 res->parent = parent;
1174 res->child = *firstpp;
1175 res->sibling = *pp;
1176 *firstpp = res;
1177 *pp = NULL;
1178 for (p = res->child; p != NULL; p = p->sibling) {
1179 p->parent = res;
1180 pr_debug("PCI: Reparented %s [%llx..%llx] under %s\n",
1181 p->name,
1182 (unsigned long long)p->start,
1183 (unsigned long long)p->end, res->name);
1184 }
1185 return 0;
1186}
1187
1188/*
1189 * Handle resources of PCI devices. If the world were perfect, we could
1190 * just allocate all the resource regions and do nothing more. It isn't.
1191 * On the other hand, we cannot just re-allocate all devices, as it would
1192 * require us to know lots of host bridge internals. So we attempt to
1193 * keep as much of the original configuration as possible, but tweak it
1194 * when it's found to be wrong.
1195 *
1196 * Known BIOS problems we have to work around:
1197 * - I/O or memory regions not configured
1198 * - regions configured, but not enabled in the command register
1199 * - bogus I/O addresses above 64K used
1200 * - expansion ROMs left enabled (this may sound harmless, but given
1201 * the fact the PCI specs explicitly allow address decoders to be
1202 * shared between expansion ROMs and other resource regions, it's
1203 * at least dangerous)
1204 *
1205 * Our solution:
1206 * (1) Allocate resources for all buses behind PCI-to-PCI bridges.
1207 * This gives us fixed barriers on where we can allocate.
1208 * (2) Allocate resources for all enabled devices. If there is
1209 * a collision, just mark the resource as unallocated. Also
1210 * disable expansion ROMs during this step.
1211 * (3) Try to allocate resources for disabled devices. If the
1212 * resources were assigned correctly, everything goes well,
1213 * if they weren't, they won't disturb allocation of other
1214 * resources.
1215 * (4) Assign new addresses to resources which were either
1216 * not configured at all or misconfigured. If explicitly
1217 * requested by the user, configure expansion ROM address
1218 * as well.
1219 */
1220
1221void pcibios_allocate_bus_resources(struct pci_bus *bus)
1222{
1223 struct pci_bus *b;
1224 int i;
1225 struct resource *res, *pr;
1226
1227 pr_debug("PCI: Allocating bus resources for %04x:%02x...\n",
1228 pci_domain_nr(bus), bus->number);
1229
1230 for (i = 0; i < PCI_BUS_NUM_RESOURCES; ++i) {
1231 res = bus->resource[i];
1232 if (!res || !res->flags
1233 || res->start > res->end || res->parent)
1234 continue;
1235 if (bus->parent == NULL)
1236 pr = (res->flags & IORESOURCE_IO) ?
1237 &ioport_resource : &iomem_resource;
1238 else {
1239 /* Don't bother with non-root busses when
1240 * re-assigning all resources. We clear the
1241 * resource flags as if they were colliding
1242 * and as such ensure proper re-allocation
1243 * later.
1244 */
1245 if (pci_flags & PCI_REASSIGN_ALL_RSRC)
1246 goto clear_resource;
1247 pr = pci_find_parent_resource(bus->self, res);
1248 if (pr == res) {
1249 /* this happens when the generic PCI
1250 * code (wrongly) decides that this
1251 * bridge is transparent -- paulus
1252 */
1253 continue;
1254 }
1255 }
1256
1257 pr_debug("PCI: %s (bus %d) bridge rsrc %d: %016llx-%016llx "
1258 "[0x%x], parent %p (%s)\n",
1259 bus->self ? pci_name(bus->self) : "PHB",
1260 bus->number, i,
1261 (unsigned long long)res->start,
1262 (unsigned long long)res->end,
1263 (unsigned int)res->flags,
1264 pr, (pr && pr->name) ? pr->name : "nil");
1265
1266 if (pr && !(pr->flags & IORESOURCE_UNSET)) {
1267 if (request_resource(pr, res) == 0)
1268 continue;
1269 /*
1270 * Must be a conflict with an existing entry.
1271 * Move that entry (or entries) under the
1272 * bridge resource and try again.
1273 */
1274 if (reparent_resources(pr, res) == 0)
1275 continue;
1276 }
1277 printk(KERN_WARNING "PCI: Cannot allocate resource region "
1278 "%d of PCI bridge %d, will remap\n", i, bus->number);
1279clear_resource:
1280 res->flags = 0;
1281 }
1282
1283 list_for_each_entry(b, &bus->children, node)
1284 pcibios_allocate_bus_resources(b);
1285}
1286
1287static inline void __devinit alloc_resource(struct pci_dev *dev, int idx)
1288{
1289 struct resource *pr, *r = &dev->resource[idx];
1290
1291 pr_debug("PCI: Allocating %s: Resource %d: %016llx..%016llx [%x]\n",
1292 pci_name(dev), idx,
1293 (unsigned long long)r->start,
1294 (unsigned long long)r->end,
1295 (unsigned int)r->flags);
1296
1297 pr = pci_find_parent_resource(dev, r);
1298 if (!pr || (pr->flags & IORESOURCE_UNSET) ||
1299 request_resource(pr, r) < 0) {
1300 printk(KERN_WARNING "PCI: Cannot allocate resource region %d"
1301 " of device %s, will remap\n", idx, pci_name(dev));
1302 if (pr)
1303 pr_debug("PCI: parent is %p: %016llx-%016llx [%x]\n",
1304 pr,
1305 (unsigned long long)pr->start,
1306 (unsigned long long)pr->end,
1307 (unsigned int)pr->flags);
1308 /* We'll assign a new address later */
1309 r->flags |= IORESOURCE_UNSET;
1310 r->end -= r->start;
1311 r->start = 0;
1312 }
1313}
1314
1315static void __init pcibios_allocate_resources(int pass)
1316{
1317 struct pci_dev *dev = NULL;
1318 int idx, disabled;
1319 u16 command;
1320 struct resource *r;
1321
1322 for_each_pci_dev(dev) {
1323 pci_read_config_word(dev, PCI_COMMAND, &command);
1324 for (idx = 0; idx <= PCI_ROM_RESOURCE; idx++) {
1325 r = &dev->resource[idx];
1326 if (r->parent) /* Already allocated */
1327 continue;
1328 if (!r->flags || (r->flags & IORESOURCE_UNSET))
1329 continue; /* Not assigned at all */
1330 /* We only allocate ROMs on pass 1 just in case they
1331 * have been screwed up by firmware
1332 */
1333 if (idx == PCI_ROM_RESOURCE)
1334 disabled = 1;
1335 if (r->flags & IORESOURCE_IO)
1336 disabled = !(command & PCI_COMMAND_IO);
1337 else
1338 disabled = !(command & PCI_COMMAND_MEMORY);
1339 if (pass == disabled)
1340 alloc_resource(dev, idx);
1341 }
1342 if (pass)
1343 continue;
1344 r = &dev->resource[PCI_ROM_RESOURCE];
1345 if (r->flags) {
1346 /* Turn the ROM off, leave the resource region,
1347 * but keep it unregistered.
1348 */
1349 u32 reg;
1350 pci_read_config_dword(dev, dev->rom_base_reg, &reg);
1351 if (reg & PCI_ROM_ADDRESS_ENABLE) {
1352 pr_debug("PCI: Switching off ROM of %s\n",
1353 pci_name(dev));
1354 r->flags &= ~IORESOURCE_ROM_ENABLE;
1355 pci_write_config_dword(dev, dev->rom_base_reg,
1356 reg & ~PCI_ROM_ADDRESS_ENABLE);
1357 }
1358 }
1359 }
1360}
1361
1362static void __init pcibios_reserve_legacy_regions(struct pci_bus *bus)
1363{
1364 struct pci_controller *hose = pci_bus_to_host(bus);
1365 resource_size_t offset;
1366 struct resource *res, *pres;
1367 int i;
1368
1369 pr_debug("Reserving legacy ranges for domain %04x\n",
1370 pci_domain_nr(bus));
1371
1372 /* Check for IO */
1373 if (!(hose->io_resource.flags & IORESOURCE_IO))
1374 goto no_io;
1375 offset = (unsigned long)hose->io_base_virt - _IO_BASE;
1376 res = kzalloc(sizeof(struct resource), GFP_KERNEL);
1377 BUG_ON(res == NULL);
1378 res->name = "Legacy IO";
1379 res->flags = IORESOURCE_IO;
1380 res->start = offset;
1381 res->end = (offset + 0xfff) & 0xfffffffful;
1382 pr_debug("Candidate legacy IO: %pR\n", res);
1383 if (request_resource(&hose->io_resource, res)) {
1384 printk(KERN_DEBUG
1385 "PCI %04x:%02x Cannot reserve Legacy IO %pR\n",
1386 pci_domain_nr(bus), bus->number, res);
1387 kfree(res);
1388 }
1389
1390 no_io:
1391 /* Check for memory */
1392 offset = hose->pci_mem_offset;
1393 pr_debug("hose mem offset: %016llx\n", (unsigned long long)offset);
1394 for (i = 0; i < 3; i++) {
1395 pres = &hose->mem_resources[i];
1396 if (!(pres->flags & IORESOURCE_MEM))
1397 continue;
1398 pr_debug("hose mem res: %pR\n", pres);
1399 if ((pres->start - offset) <= 0xa0000 &&
1400 (pres->end - offset) >= 0xbffff)
1401 break;
1402 }
1403 if (i >= 3)
1404 return;
1405 res = kzalloc(sizeof(struct resource), GFP_KERNEL);
1406 BUG_ON(res == NULL);
1407 res->name = "Legacy VGA memory";
1408 res->flags = IORESOURCE_MEM;
1409 res->start = 0xa0000 + offset;
1410 res->end = 0xbffff + offset;
1411 pr_debug("Candidate VGA memory: %pR\n", res);
1412 if (request_resource(pres, res)) {
1413 printk(KERN_DEBUG
1414 "PCI %04x:%02x Cannot reserve VGA memory %pR\n",
1415 pci_domain_nr(bus), bus->number, res);
1416 kfree(res);
1417 }
1418}
1419
1420void __init pcibios_resource_survey(void)
1421{
1422 struct pci_bus *b;
1423
1424 /* Allocate and assign resources. If we re-assign everything, then
1425 * we skip the allocate phase
1426 */
1427 list_for_each_entry(b, &pci_root_buses, node)
1428 pcibios_allocate_bus_resources(b);
1429
1430 if (!(pci_flags & PCI_REASSIGN_ALL_RSRC)) {
1431 pcibios_allocate_resources(0);
1432 pcibios_allocate_resources(1);
1433 }
1434
1435 /* Before we start assigning unassigned resource, we try to reserve
1436 * the low IO area and the VGA memory area if they intersect the
1437 * bus available resources to avoid allocating things on top of them
1438 */
1439 if (!(pci_flags & PCI_PROBE_ONLY)) {
1440 list_for_each_entry(b, &pci_root_buses, node)
1441 pcibios_reserve_legacy_regions(b);
1442 }
1443
1444 /* Now, if the platform didn't decide to blindly trust the firmware,
1445 * we proceed to assigning things that were left unassigned
1446 */
1447 if (!(pci_flags & PCI_PROBE_ONLY)) {
1448 pr_debug("PCI: Assigning unassigned resources...\n");
1449 pci_assign_unassigned_resources();
1450 }
1451}
1452
1453#ifdef CONFIG_HOTPLUG
1454
1455/* This is used by the PCI hotplug driver to allocate resource
1456 * of newly plugged busses. We can try to consolidate with the
1457 * rest of the code later, for now, keep it as-is as our main
1458 * resource allocation function doesn't deal with sub-trees yet.
1459 */
1460void __devinit pcibios_claim_one_bus(struct pci_bus *bus)
1461{
1462 struct pci_dev *dev;
1463 struct pci_bus *child_bus;
1464
1465 list_for_each_entry(dev, &bus->devices, bus_list) {
1466 int i;
1467
1468 for (i = 0; i < PCI_NUM_RESOURCES; i++) {
1469 struct resource *r = &dev->resource[i];
1470
1471 if (r->parent || !r->start || !r->flags)
1472 continue;
1473
1474 pr_debug("PCI: Claiming %s: "
1475 "Resource %d: %016llx..%016llx [%x]\n",
1476 pci_name(dev), i,
1477 (unsigned long long)r->start,
1478 (unsigned long long)r->end,
1479 (unsigned int)r->flags);
1480
1481 pci_claim_resource(dev, i);
1482 }
1483 }
1484
1485 list_for_each_entry(child_bus, &bus->children, node)
1486 pcibios_claim_one_bus(child_bus);
1487}
1488EXPORT_SYMBOL_GPL(pcibios_claim_one_bus);
1489
1490
1491/* pcibios_finish_adding_to_bus
1492 *
1493 * This is to be called by the hotplug code after devices have been
1494 * added to a bus, this include calling it for a PHB that is just
1495 * being added
1496 */
1497void pcibios_finish_adding_to_bus(struct pci_bus *bus)
1498{
1499 pr_debug("PCI: Finishing adding to hotplug bus %04x:%02x\n",
1500 pci_domain_nr(bus), bus->number);
1501
1502 /* Allocate bus and devices resources */
1503 pcibios_allocate_bus_resources(bus);
1504 pcibios_claim_one_bus(bus);
1505
1506 /* Add new devices to global lists. Register in proc, sysfs. */
1507 pci_bus_add_devices(bus);
1508
1509 /* Fixup EEH */
1510 eeh_add_device_tree_late(bus);
1511}
1512EXPORT_SYMBOL_GPL(pcibios_finish_adding_to_bus);
1513
1514#endif /* CONFIG_HOTPLUG */
1515
1516int pcibios_enable_device(struct pci_dev *dev, int mask)
1517{
1518 return pci_enable_resources(dev, mask);
1519}
1520
1521void __devinit pcibios_setup_phb_resources(struct pci_controller *hose)
1522{
1523 struct pci_bus *bus = hose->bus;
1524 struct resource *res;
1525 int i;
1526
1527 /* Hookup PHB IO resource */
1528 bus->resource[0] = res = &hose->io_resource;
1529
1530 if (!res->flags) {
1531 printk(KERN_WARNING "PCI: I/O resource not set for host"
1532 " bridge %s (domain %d)\n",
1533 hose->dn->full_name, hose->global_number);
1534 /* Workaround for lack of IO resource only on 32-bit */
1535 res->start = (unsigned long)hose->io_base_virt - isa_io_base;
1536 res->end = res->start + IO_SPACE_LIMIT;
1537 res->flags = IORESOURCE_IO;
1538 }
1539
1540 pr_debug("PCI: PHB IO resource = %016llx-%016llx [%lx]\n",
1541 (unsigned long long)res->start,
1542 (unsigned long long)res->end,
1543 (unsigned long)res->flags);
1544
1545 /* Hookup PHB Memory resources */
1546 for (i = 0; i < 3; ++i) {
1547 res = &hose->mem_resources[i];
1548 if (!res->flags) {
1549 if (i > 0)
1550 continue;
1551 printk(KERN_ERR "PCI: Memory resource 0 not set for "
1552 "host bridge %s (domain %d)\n",
1553 hose->dn->full_name, hose->global_number);
1554
1555 /* Workaround for lack of MEM resource only on 32-bit */
1556 res->start = hose->pci_mem_offset;
1557 res->end = (resource_size_t)-1LL;
1558 res->flags = IORESOURCE_MEM;
1559
1560 }
1561 bus->resource[i+1] = res;
1562
1563 pr_debug("PCI: PHB MEM resource %d = %016llx-%016llx [%lx]\n",
1564 i, (unsigned long long)res->start,
1565 (unsigned long long)res->end,
1566 (unsigned long)res->flags);
1567 }
1568
1569 pr_debug("PCI: PHB MEM offset = %016llx\n",
1570 (unsigned long long)hose->pci_mem_offset);
1571 pr_debug("PCI: PHB IO offset = %08lx\n",
1572 (unsigned long)hose->io_base_virt - _IO_BASE);
1573}
1574
1575/*
1576 * Null PCI config access functions, for the case when we can't
1577 * find a hose.
1578 */
1579#define NULL_PCI_OP(rw, size, type) \
1580static int \
1581null_##rw##_config_##size(struct pci_dev *dev, int offset, type val) \
1582{ \
1583 return PCIBIOS_DEVICE_NOT_FOUND; \
1584}
1585
1586static int
1587null_read_config(struct pci_bus *bus, unsigned int devfn, int offset,
1588 int len, u32 *val)
1589{
1590 return PCIBIOS_DEVICE_NOT_FOUND;
1591}
1592
1593static int
1594null_write_config(struct pci_bus *bus, unsigned int devfn, int offset,
1595 int len, u32 val)
1596{
1597 return PCIBIOS_DEVICE_NOT_FOUND;
1598}
1599
1600static struct pci_ops null_pci_ops = {
1601 .read = null_read_config,
1602 .write = null_write_config,
1603};
1604
1605/*
1606 * These functions are used early on before PCI scanning is done
1607 * and all of the pci_dev and pci_bus structures have been created.
1608 */
1609static struct pci_bus *
1610fake_pci_bus(struct pci_controller *hose, int busnr)
1611{
1612 static struct pci_bus bus;
1613
1614 if (!hose)
1615 printk(KERN_ERR "Can't find hose for PCI bus %d!\n", busnr);
1616
1617 bus.number = busnr;
1618 bus.sysdata = hose;
1619 bus.ops = hose ? hose->ops : &null_pci_ops;
1620 return &bus;
1621}
1622
1623#define EARLY_PCI_OP(rw, size, type) \
1624int early_##rw##_config_##size(struct pci_controller *hose, int bus, \
1625 int devfn, int offset, type value) \
1626{ \
1627 return pci_bus_##rw##_config_##size(fake_pci_bus(hose, bus), \
1628 devfn, offset, value); \
1629}
1630
1631EARLY_PCI_OP(read, byte, u8 *)
1632EARLY_PCI_OP(read, word, u16 *)
1633EARLY_PCI_OP(read, dword, u32 *)
1634EARLY_PCI_OP(write, byte, u8)
1635EARLY_PCI_OP(write, word, u16)
1636EARLY_PCI_OP(write, dword, u32)
1637
1638int early_find_capability(struct pci_controller *hose, int bus, int devfn,
1639 int cap)
1640{
1641 return pci_bus_find_capability(fake_pci_bus(hose, bus), devfn, cap);
1642}
diff --git a/arch/microblaze/pci/pci_32.c b/arch/microblaze/pci/pci_32.c
new file mode 100644
index 000000000000..7e0c94f501cc
--- /dev/null
+++ b/arch/microblaze/pci/pci_32.c
@@ -0,0 +1,430 @@
1/*
2 * Common pmac/prep/chrp pci routines. -- Cort
3 */
4
5#include <linux/kernel.h>
6#include <linux/pci.h>
7#include <linux/delay.h>
8#include <linux/string.h>
9#include <linux/init.h>
10#include <linux/capability.h>
11#include <linux/sched.h>
12#include <linux/errno.h>
13#include <linux/bootmem.h>
14#include <linux/irq.h>
15#include <linux/list.h>
16#include <linux/of.h>
17
18#include <asm/processor.h>
19#include <asm/io.h>
20#include <asm/prom.h>
21#include <asm/sections.h>
22#include <asm/pci-bridge.h>
23#include <asm/byteorder.h>
24#include <asm/uaccess.h>
25
26#undef DEBUG
27
28unsigned long isa_io_base;
29unsigned long pci_dram_offset;
30int pcibios_assign_bus_offset = 1;
31
32static u8 *pci_to_OF_bus_map;
33
34/* By default, we don't re-assign bus numbers. We do this only on
35 * some pmacs
36 */
37static int pci_assign_all_buses;
38
39static int pci_bus_count;
40
41/*
42 * Functions below are used on OpenFirmware machines.
43 */
44static void
45make_one_node_map(struct device_node *node, u8 pci_bus)
46{
47 const int *bus_range;
48 int len;
49
50 if (pci_bus >= pci_bus_count)
51 return;
52 bus_range = of_get_property(node, "bus-range", &len);
53 if (bus_range == NULL || len < 2 * sizeof(int)) {
54 printk(KERN_WARNING "Can't get bus-range for %s, "
55 "assuming it starts at 0\n", node->full_name);
56 pci_to_OF_bus_map[pci_bus] = 0;
57 } else
58 pci_to_OF_bus_map[pci_bus] = bus_range[0];
59
60 for_each_child_of_node(node, node) {
61 struct pci_dev *dev;
62 const unsigned int *class_code, *reg;
63
64 class_code = of_get_property(node, "class-code", NULL);
65 if (!class_code ||
66 ((*class_code >> 8) != PCI_CLASS_BRIDGE_PCI &&
67 (*class_code >> 8) != PCI_CLASS_BRIDGE_CARDBUS))
68 continue;
69 reg = of_get_property(node, "reg", NULL);
70 if (!reg)
71 continue;
72 dev = pci_get_bus_and_slot(pci_bus, ((reg[0] >> 8) & 0xff));
73 if (!dev || !dev->subordinate) {
74 pci_dev_put(dev);
75 continue;
76 }
77 make_one_node_map(node, dev->subordinate->number);
78 pci_dev_put(dev);
79 }
80}
81
82void
83pcibios_make_OF_bus_map(void)
84{
85 int i;
86 struct pci_controller *hose, *tmp;
87 struct property *map_prop;
88 struct device_node *dn;
89
90 pci_to_OF_bus_map = kmalloc(pci_bus_count, GFP_KERNEL);
91 if (!pci_to_OF_bus_map) {
92 printk(KERN_ERR "Can't allocate OF bus map !\n");
93 return;
94 }
95
96 /* We fill the bus map with invalid values, that helps
97 * debugging.
98 */
99 for (i = 0; i < pci_bus_count; i++)
100 pci_to_OF_bus_map[i] = 0xff;
101
102 /* For each hose, we begin searching bridges */
103 list_for_each_entry_safe(hose, tmp, &hose_list, list_node) {
104 struct device_node *node = hose->dn;
105
106 if (!node)
107 continue;
108 make_one_node_map(node, hose->first_busno);
109 }
110 dn = of_find_node_by_path("/");
111 map_prop = of_find_property(dn, "pci-OF-bus-map", NULL);
112 if (map_prop) {
113 BUG_ON(pci_bus_count > map_prop->length);
114 memcpy(map_prop->value, pci_to_OF_bus_map, pci_bus_count);
115 }
116 of_node_put(dn);
117#ifdef DEBUG
118 printk(KERN_INFO "PCI->OF bus map:\n");
119 for (i = 0; i < pci_bus_count; i++) {
120 if (pci_to_OF_bus_map[i] == 0xff)
121 continue;
122 printk(KERN_INFO "%d -> %d\n", i, pci_to_OF_bus_map[i]);
123 }
124#endif
125}
126
127typedef int (*pci_OF_scan_iterator)(struct device_node *node, void *data);
128
129static struct device_node *scan_OF_pci_childs(struct device_node *parent,
130 pci_OF_scan_iterator filter, void *data)
131{
132 struct device_node *node;
133 struct device_node *sub_node;
134
135 for_each_child_of_node(parent, node) {
136 const unsigned int *class_code;
137
138 if (filter(node, data)) {
139 of_node_put(node);
140 return node;
141 }
142
143 /* For PCI<->PCI bridges or CardBus bridges, we go down
144 * Note: some OFs create a parent node "multifunc-device" as
145 * a fake root for all functions of a multi-function device,
146 * we go down them as well.
147 */
148 class_code = of_get_property(node, "class-code", NULL);
149 if ((!class_code ||
150 ((*class_code >> 8) != PCI_CLASS_BRIDGE_PCI &&
151 (*class_code >> 8) != PCI_CLASS_BRIDGE_CARDBUS)) &&
152 strcmp(node->name, "multifunc-device"))
153 continue;
154 sub_node = scan_OF_pci_childs(node, filter, data);
155 if (sub_node) {
156 of_node_put(node);
157 return sub_node;
158 }
159 }
160 return NULL;
161}
162
163static struct device_node *scan_OF_for_pci_dev(struct device_node *parent,
164 unsigned int devfn)
165{
166 struct device_node *np, *cnp;
167 const u32 *reg;
168 unsigned int psize;
169
170 for_each_child_of_node(parent, np) {
171 reg = of_get_property(np, "reg", &psize);
172 if (reg && psize >= 4 && ((reg[0] >> 8) & 0xff) == devfn)
173 return np;
174
175 /* Note: some OFs create a parent node "multifunc-device" as
176 * a fake root for all functions of a multi-function device,
177 * we go down them as well. */
178 if (!strcmp(np->name, "multifunc-device")) {
179 cnp = scan_OF_for_pci_dev(np, devfn);
180 if (cnp)
181 return cnp;
182 }
183 }
184 return NULL;
185}
186
187
188static struct device_node *scan_OF_for_pci_bus(struct pci_bus *bus)
189{
190 struct device_node *parent, *np;
191
192 /* Are we a root bus ? */
193 if (bus->self == NULL || bus->parent == NULL) {
194 struct pci_controller *hose = pci_bus_to_host(bus);
195 if (hose == NULL)
196 return NULL;
197 return of_node_get(hose->dn);
198 }
199
200 /* not a root bus, we need to get our parent */
201 parent = scan_OF_for_pci_bus(bus->parent);
202 if (parent == NULL)
203 return NULL;
204
205 /* now iterate for children for a match */
206 np = scan_OF_for_pci_dev(parent, bus->self->devfn);
207 of_node_put(parent);
208
209 return np;
210}
211
212/*
213 * Scans the OF tree for a device node matching a PCI device
214 */
215struct device_node *
216pci_busdev_to_OF_node(struct pci_bus *bus, int devfn)
217{
218 struct device_node *parent, *np;
219
220 pr_debug("pci_busdev_to_OF_node(%d,0x%x)\n", bus->number, devfn);
221 parent = scan_OF_for_pci_bus(bus);
222 if (parent == NULL)
223 return NULL;
224 pr_debug(" parent is %s\n", parent ? parent->full_name : "<NULL>");
225 np = scan_OF_for_pci_dev(parent, devfn);
226 of_node_put(parent);
227 pr_debug(" result is %s\n", np ? np->full_name : "<NULL>");
228
229 /* XXX most callers don't release the returned node
230 * mostly because ppc64 doesn't increase the refcount,
231 * we need to fix that.
232 */
233 return np;
234}
235EXPORT_SYMBOL(pci_busdev_to_OF_node);
236
237struct device_node*
238pci_device_to_OF_node(struct pci_dev *dev)
239{
240 return pci_busdev_to_OF_node(dev->bus, dev->devfn);
241}
242EXPORT_SYMBOL(pci_device_to_OF_node);
243
244static int
245find_OF_pci_device_filter(struct device_node *node, void *data)
246{
247 return ((void *)node == data);
248}
249
250/*
251 * Returns the PCI device matching a given OF node
252 */
253int
254pci_device_from_OF_node(struct device_node *node, u8 *bus, u8 *devfn)
255{
256 const unsigned int *reg;
257 struct pci_controller *hose;
258 struct pci_dev *dev = NULL;
259
260 /* Make sure it's really a PCI device */
261 hose = pci_find_hose_for_OF_device(node);
262 if (!hose || !hose->dn)
263 return -ENODEV;
264 if (!scan_OF_pci_childs(hose->dn,
265 find_OF_pci_device_filter, (void *)node))
266 return -ENODEV;
267 reg = of_get_property(node, "reg", NULL);
268 if (!reg)
269 return -ENODEV;
270 *bus = (reg[0] >> 16) & 0xff;
271 *devfn = ((reg[0] >> 8) & 0xff);
272
273 /* Ok, here we need some tweak. If we have already renumbered
274 * all busses, we can't rely on the OF bus number any more.
275 * the pci_to_OF_bus_map is not enough as several PCI busses
276 * may match the same OF bus number.
277 */
278 if (!pci_to_OF_bus_map)
279 return 0;
280
281 for_each_pci_dev(dev)
282 if (pci_to_OF_bus_map[dev->bus->number] == *bus &&
283 dev->devfn == *devfn) {
284 *bus = dev->bus->number;
285 pci_dev_put(dev);
286 return 0;
287 }
288
289 return -ENODEV;
290}
291EXPORT_SYMBOL(pci_device_from_OF_node);
292
293/* We create the "pci-OF-bus-map" property now so it appears in the
294 * /proc device tree
295 */
296void __init
297pci_create_OF_bus_map(void)
298{
299 struct property *of_prop;
300 struct device_node *dn;
301
302 of_prop = (struct property *) alloc_bootmem(sizeof(struct property) + \
303 256);
304 if (!of_prop)
305 return;
306 dn = of_find_node_by_path("/");
307 if (dn) {
308 memset(of_prop, -1, sizeof(struct property) + 256);
309 of_prop->name = "pci-OF-bus-map";
310 of_prop->length = 256;
311 of_prop->value = &of_prop[1];
312 prom_add_property(dn, of_prop);
313 of_node_put(dn);
314 }
315}
316
317static void __devinit pcibios_scan_phb(struct pci_controller *hose)
318{
319 struct pci_bus *bus;
320 struct device_node *node = hose->dn;
321 unsigned long io_offset;
322 struct resource *res = &hose->io_resource;
323
324 pr_debug("PCI: Scanning PHB %s\n",
325 node ? node->full_name : "<NO NAME>");
326
327 /* Create an empty bus for the toplevel */
328 bus = pci_create_bus(hose->parent, hose->first_busno, hose->ops, hose);
329 if (bus == NULL) {
330 printk(KERN_ERR "Failed to create bus for PCI domain %04x\n",
331 hose->global_number);
332 return;
333 }
334 bus->secondary = hose->first_busno;
335 hose->bus = bus;
336
337 /* Fixup IO space offset */
338 io_offset = (unsigned long)hose->io_base_virt - isa_io_base;
339 res->start = (res->start + io_offset) & 0xffffffffu;
340 res->end = (res->end + io_offset) & 0xffffffffu;
341
342 /* Wire up PHB bus resources */
343 pcibios_setup_phb_resources(hose);
344
345 /* Scan children */
346 hose->last_busno = bus->subordinate = pci_scan_child_bus(bus);
347}
348
349static int __init pcibios_init(void)
350{
351 struct pci_controller *hose, *tmp;
352 int next_busno = 0;
353
354 printk(KERN_INFO "PCI: Probing PCI hardware\n");
355
356 if (pci_flags & PCI_REASSIGN_ALL_BUS) {
357 printk(KERN_INFO "setting pci_asign_all_busses\n");
358 pci_assign_all_buses = 1;
359 }
360
361 /* Scan all of the recorded PCI controllers. */
362 list_for_each_entry_safe(hose, tmp, &hose_list, list_node) {
363 if (pci_assign_all_buses)
364 hose->first_busno = next_busno;
365 hose->last_busno = 0xff;
366 pcibios_scan_phb(hose);
367 printk(KERN_INFO "calling pci_bus_add_devices()\n");
368 pci_bus_add_devices(hose->bus);
369 if (pci_assign_all_buses || next_busno <= hose->last_busno)
370 next_busno = hose->last_busno + \
371 pcibios_assign_bus_offset;
372 }
373 pci_bus_count = next_busno;
374
375 /* OpenFirmware based machines need a map of OF bus
376 * numbers vs. kernel bus numbers since we may have to
377 * remap them.
378 */
379 if (pci_assign_all_buses)
380 pcibios_make_OF_bus_map();
381
382 /* Call common code to handle resource allocation */
383 pcibios_resource_survey();
384
385 return 0;
386}
387
388subsys_initcall(pcibios_init);
389
390static struct pci_controller*
391pci_bus_to_hose(int bus)
392{
393 struct pci_controller *hose, *tmp;
394
395 list_for_each_entry_safe(hose, tmp, &hose_list, list_node)
396 if (bus >= hose->first_busno && bus <= hose->last_busno)
397 return hose;
398 return NULL;
399}
400
401/* Provide information on locations of various I/O regions in physical
402 * memory. Do this on a per-card basis so that we choose the right
403 * root bridge.
404 * Note that the returned IO or memory base is a physical address
405 */
406
407long sys_pciconfig_iobase(long which, unsigned long bus, unsigned long devfn)
408{
409 struct pci_controller *hose;
410 long result = -EOPNOTSUPP;
411
412 hose = pci_bus_to_hose(bus);
413 if (!hose)
414 return -ENODEV;
415
416 switch (which) {
417 case IOBASE_BRIDGE_NUMBER:
418 return (long)hose->first_busno;
419 case IOBASE_MEMORY:
420 return (long)hose->pci_mem_offset;
421 case IOBASE_IO:
422 return (long)hose->io_base_phys;
423 case IOBASE_ISA_IO:
424 return (long)isa_io_base;
425 case IOBASE_ISA_MEM:
426 return (long)isa_mem_base;
427 }
428
429 return result;
430}
diff --git a/arch/microblaze/pci/xilinx_pci.c b/arch/microblaze/pci/xilinx_pci.c
new file mode 100644
index 000000000000..7869a41b0f94
--- /dev/null
+++ b/arch/microblaze/pci/xilinx_pci.c
@@ -0,0 +1,168 @@
1/*
2 * PCI support for Xilinx plbv46_pci soft-core which can be used on
3 * Xilinx Virtex ML410 / ML510 boards.
4 *
5 * Copyright 2009 Roderick Colenbrander
6 * Copyright 2009 Secret Lab Technologies Ltd.
7 *
8 * The pci bridge fixup code was copied from ppc4xx_pci.c and was written
9 * by Benjamin Herrenschmidt.
10 * Copyright 2007 Ben. Herrenschmidt <benh@kernel.crashing.org>, IBM Corp.
11 *
12 * This file is licensed under the terms of the GNU General Public License
13 * version 2. This program is licensed "as is" without any warranty of any
14 * kind, whether express or implied.
15 */
16
17#include <linux/ioport.h>
18#include <linux/of.h>
19#include <linux/pci.h>
20#include <asm/io.h>
21
22#define XPLB_PCI_ADDR 0x10c
23#define XPLB_PCI_DATA 0x110
24#define XPLB_PCI_BUS 0x114
25
26#define PCI_HOST_ENABLE_CMD (PCI_COMMAND_SERR | PCI_COMMAND_PARITY | \
27 PCI_COMMAND_MASTER | PCI_COMMAND_MEMORY)
28
29static struct of_device_id xilinx_pci_match[] = {
30 { .compatible = "xlnx,plbv46-pci-1.03.a", },
31 {}
32};
33
34/**
35 * xilinx_pci_fixup_bridge - Block Xilinx PHB configuration.
36 */
37static void xilinx_pci_fixup_bridge(struct pci_dev *dev)
38{
39 struct pci_controller *hose;
40 int i;
41
42 if (dev->devfn || dev->bus->self)
43 return;
44
45 hose = pci_bus_to_host(dev->bus);
46 if (!hose)
47 return;
48
49 if (!of_match_node(xilinx_pci_match, hose->dn))
50 return;
51
52 /* Hide the PCI host BARs from the kernel as their content doesn't
53 * fit well in the resource management
54 */
55 for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) {
56 dev->resource[i].start = 0;
57 dev->resource[i].end = 0;
58 dev->resource[i].flags = 0;
59 }
60
61 dev_info(&dev->dev, "Hiding Xilinx plb-pci host bridge resources %s\n",
62 pci_name(dev));
63}
64DECLARE_PCI_FIXUP_HEADER(PCI_ANY_ID, PCI_ANY_ID, xilinx_pci_fixup_bridge);
65
66#ifdef DEBUG
67/**
68 * xilinx_pci_exclude_device - Don't do config access for non-root bus
69 *
70 * This is a hack. Config access to any bus other than bus 0 does not
71 * currently work on the ML510 so we prevent it here.
72 */
73static int
74xilinx_pci_exclude_device(struct pci_controller *hose, u_char bus, u8 devfn)
75{
76 return (bus != 0);
77}
78
79/**
80 * xilinx_early_pci_scan - List pci config space for available devices
81 *
82 * List pci devices in very early phase.
83 */
84void __init xilinx_early_pci_scan(struct pci_controller *hose)
85{
86 u32 bus = 0;
87 u32 val, dev, func, offset;
88
89 /* Currently we have only 2 device connected - up-to 32 devices */
90 for (dev = 0; dev < 2; dev++) {
91 /* List only first function number - up-to 8 functions */
92 for (func = 0; func < 1; func++) {
93 printk(KERN_INFO "%02x:%02x:%02x", bus, dev, func);
94 /* read the first 64 standardized bytes */
95 /* Up-to 192 bytes can be list of capabilities */
96 for (offset = 0; offset < 64; offset += 4) {
97 early_read_config_dword(hose, bus,
98 PCI_DEVFN(dev, func), offset, &val);
99 if (offset == 0 && val == 0xFFFFFFFF) {
100 printk(KERN_CONT "\nABSENT");
101 break;
102 }
103 if (!(offset % 0x10))
104 printk(KERN_CONT "\n%04x: ", offset);
105
106 printk(KERN_CONT "%08x ", val);
107 }
108 printk(KERN_INFO "\n");
109 }
110 }
111}
112#else
113void __init xilinx_early_pci_scan(struct pci_controller *hose)
114{
115}
116#endif
117
118/**
119 * xilinx_pci_init - Find and register a Xilinx PCI host bridge
120 */
121void __init xilinx_pci_init(void)
122{
123 struct pci_controller *hose;
124 struct resource r;
125 void __iomem *pci_reg;
126 struct device_node *pci_node;
127
128 pci_node = of_find_matching_node(NULL, xilinx_pci_match);
129 if (!pci_node)
130 return;
131
132 if (of_address_to_resource(pci_node, 0, &r)) {
133 pr_err("xilinx-pci: cannot resolve base address\n");
134 return;
135 }
136
137 hose = pcibios_alloc_controller(pci_node);
138 if (!hose) {
139 pr_err("xilinx-pci: pcibios_alloc_controller() failed\n");
140 return;
141 }
142
143 /* Setup config space */
144 setup_indirect_pci(hose, r.start + XPLB_PCI_ADDR,
145 r.start + XPLB_PCI_DATA,
146 INDIRECT_TYPE_SET_CFG_TYPE);
147
148 /* According to the xilinx plbv46_pci documentation the soft-core starts
149 * a self-init when the bus master enable bit is set. Without this bit
150 * set the pci bus can't be scanned.
151 */
152 early_write_config_word(hose, 0, 0, PCI_COMMAND, PCI_HOST_ENABLE_CMD);
153
154 /* Set the max latency timer to 255 */
155 early_write_config_byte(hose, 0, 0, PCI_LATENCY_TIMER, 0xff);
156
157 /* Set the max bus number to 255, and bus/subbus no's to 0 */
158 pci_reg = of_iomap(pci_node, 0);
159 out_be32(pci_reg + XPLB_PCI_BUS, 0x000000ff);
160 iounmap(pci_reg);
161
162 /* Register the host bridge with the linux kernel! */
163 pci_process_bridge_OF_ranges(hose, pci_node,
164 INDIRECT_TYPE_SET_CFG_TYPE);
165
166 pr_info("xilinx-pci: Registered PCI host bridge\n");
167 xilinx_early_pci_scan(hose);
168}
diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig
index 8a54eb8e3768..2e19500921f9 100644
--- a/arch/powerpc/Kconfig
+++ b/arch/powerpc/Kconfig
@@ -313,19 +313,6 @@ config 8XX_MINIMAL_FPEMU
313 313
314 It is recommended that you build a soft-float userspace instead. 314 It is recommended that you build a soft-float userspace instead.
315 315
316config IOMMU_VMERGE
317 bool "Enable IOMMU virtual merging"
318 depends on PPC64
319 default y
320 help
321 Cause IO segments sent to a device for DMA to be merged virtually
322 by the IOMMU when they happen to have been allocated contiguously.
323 This doesn't add pressure to the IOMMU allocator. However, some
324 drivers don't support getting large merged segments coming back
325 from *_map_sg().
326
327 Most drivers don't have this problem; it is safe to say Y here.
328
329config IOMMU_HELPER 316config IOMMU_HELPER
330 def_bool PPC64 317 def_bool PPC64
331 318
diff --git a/arch/powerpc/configs/52xx/cm5200_defconfig b/arch/powerpc/configs/52xx/cm5200_defconfig
index ff9bdb28197d..218d49b36a0c 100644
--- a/arch/powerpc/configs/52xx/cm5200_defconfig
+++ b/arch/powerpc/configs/52xx/cm5200_defconfig
@@ -1,7 +1,7 @@
1# 1#
2# Automatically generated make config: don't edit 2# Automatically generated make config: don't edit
3# Linux kernel version: 2.6.33-rc2 3# Linux kernel version: 2.6.34-rc1
4# Wed Dec 30 14:45:07 2009 4# Wed Mar 10 14:34:22 2010
5# 5#
6# CONFIG_PPC64 is not set 6# CONFIG_PPC64 is not set
7 7
@@ -94,11 +94,6 @@ CONFIG_RCU_FANOUT=32
94# CONFIG_TREE_RCU_TRACE is not set 94# CONFIG_TREE_RCU_TRACE is not set
95# CONFIG_IKCONFIG is not set 95# CONFIG_IKCONFIG is not set
96CONFIG_LOG_BUF_SHIFT=14 96CONFIG_LOG_BUF_SHIFT=14
97CONFIG_GROUP_SCHED=y
98CONFIG_FAIR_GROUP_SCHED=y
99# CONFIG_RT_GROUP_SCHED is not set
100CONFIG_USER_SCHED=y
101# CONFIG_CGROUP_SCHED is not set
102# CONFIG_CGROUPS is not set 97# CONFIG_CGROUPS is not set
103CONFIG_SYSFS_DEPRECATED=y 98CONFIG_SYSFS_DEPRECATED=y
104CONFIG_SYSFS_DEPRECATED_V2=y 99CONFIG_SYSFS_DEPRECATED_V2=y
@@ -109,6 +104,7 @@ CONFIG_INITRAMFS_SOURCE=""
109CONFIG_RD_GZIP=y 104CONFIG_RD_GZIP=y
110# CONFIG_RD_BZIP2 is not set 105# CONFIG_RD_BZIP2 is not set
111# CONFIG_RD_LZMA is not set 106# CONFIG_RD_LZMA is not set
107# CONFIG_RD_LZO is not set
112# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set 108# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
113CONFIG_SYSCTL=y 109CONFIG_SYSCTL=y
114CONFIG_ANON_INODES=y 110CONFIG_ANON_INODES=y
@@ -340,7 +336,6 @@ CONFIG_NET=y
340# Networking options 336# Networking options
341# 337#
342CONFIG_PACKET=y 338CONFIG_PACKET=y
343# CONFIG_PACKET_MMAP is not set
344CONFIG_UNIX=y 339CONFIG_UNIX=y
345CONFIG_XFRM=y 340CONFIG_XFRM=y
346CONFIG_XFRM_USER=y 341CONFIG_XFRM_USER=y
@@ -517,6 +512,8 @@ CONFIG_MTD_PHYSMAP_OF=y
517# UBI - Unsorted block images 512# UBI - Unsorted block images
518# 513#
519# CONFIG_MTD_UBI is not set 514# CONFIG_MTD_UBI is not set
515CONFIG_OF_FLATTREE=y
516CONFIG_OF_DYNAMIC=y
520CONFIG_OF_DEVICE=y 517CONFIG_OF_DEVICE=y
521CONFIG_OF_I2C=y 518CONFIG_OF_I2C=y
522CONFIG_OF_MDIO=y 519CONFIG_OF_MDIO=y
@@ -684,6 +681,7 @@ CONFIG_SERIAL_CORE_CONSOLE=y
684CONFIG_SERIAL_MPC52xx=y 681CONFIG_SERIAL_MPC52xx=y
685CONFIG_SERIAL_MPC52xx_CONSOLE=y 682CONFIG_SERIAL_MPC52xx_CONSOLE=y
686CONFIG_SERIAL_MPC52xx_CONSOLE_BAUD=57600 683CONFIG_SERIAL_MPC52xx_CONSOLE_BAUD=57600
684# CONFIG_SERIAL_TIMBERDALE is not set
687# CONFIG_SERIAL_GRLIB_GAISLER_APBUART is not set 685# CONFIG_SERIAL_GRLIB_GAISLER_APBUART is not set
688CONFIG_UNIX98_PTYS=y 686CONFIG_UNIX98_PTYS=y
689# CONFIG_DEVPTS_MULTIPLE_INSTANCES is not set 687# CONFIG_DEVPTS_MULTIPLE_INSTANCES is not set
@@ -714,6 +712,7 @@ CONFIG_I2C_HELPER_AUTO=y
714CONFIG_I2C_MPC=y 712CONFIG_I2C_MPC=y
715# CONFIG_I2C_OCORES is not set 713# CONFIG_I2C_OCORES is not set
716# CONFIG_I2C_SIMTEC is not set 714# CONFIG_I2C_SIMTEC is not set
715# CONFIG_I2C_XILINX is not set
717 716
718# 717#
719# External I2C/SMBus adapter drivers 718# External I2C/SMBus adapter drivers
@@ -754,6 +753,7 @@ CONFIG_WATCHDOG=y
754# Watchdog Device Drivers 753# Watchdog Device Drivers
755# 754#
756# CONFIG_SOFT_WATCHDOG is not set 755# CONFIG_SOFT_WATCHDOG is not set
756# CONFIG_MAX63XX_WATCHDOG is not set
757# CONFIG_MPC5200_WDT is not set 757# CONFIG_MPC5200_WDT is not set
758 758
759# 759#
@@ -771,18 +771,20 @@ CONFIG_SSB_POSSIBLE=y
771# Multifunction device drivers 771# Multifunction device drivers
772# 772#
773# CONFIG_MFD_CORE is not set 773# CONFIG_MFD_CORE is not set
774# CONFIG_MFD_88PM860X is not set
774# CONFIG_MFD_SM501 is not set 775# CONFIG_MFD_SM501 is not set
775# CONFIG_HTC_PASIC3 is not set 776# CONFIG_HTC_PASIC3 is not set
776# CONFIG_TWL4030_CORE is not set 777# CONFIG_TWL4030_CORE is not set
777# CONFIG_MFD_TMIO is not set 778# CONFIG_MFD_TMIO is not set
778# CONFIG_PMIC_DA903X is not set 779# CONFIG_PMIC_DA903X is not set
779# CONFIG_PMIC_ADP5520 is not set 780# CONFIG_PMIC_ADP5520 is not set
781# CONFIG_MFD_MAX8925 is not set
780# CONFIG_MFD_WM8400 is not set 782# CONFIG_MFD_WM8400 is not set
781# CONFIG_MFD_WM831X is not set 783# CONFIG_MFD_WM831X is not set
782# CONFIG_MFD_WM8350_I2C is not set 784# CONFIG_MFD_WM8350_I2C is not set
785# CONFIG_MFD_WM8994 is not set
783# CONFIG_MFD_PCF50633 is not set 786# CONFIG_MFD_PCF50633 is not set
784# CONFIG_AB3100_CORE is not set 787# CONFIG_AB3100_CORE is not set
785# CONFIG_MFD_88PM8607 is not set
786# CONFIG_REGULATOR is not set 788# CONFIG_REGULATOR is not set
787# CONFIG_MEDIA_SUPPORT is not set 789# CONFIG_MEDIA_SUPPORT is not set
788 790
@@ -813,7 +815,6 @@ CONFIG_USB=y
813CONFIG_USB_DEVICEFS=y 815CONFIG_USB_DEVICEFS=y
814# CONFIG_USB_DEVICE_CLASS is not set 816# CONFIG_USB_DEVICE_CLASS is not set
815# CONFIG_USB_DYNAMIC_MINORS is not set 817# CONFIG_USB_DYNAMIC_MINORS is not set
816# CONFIG_USB_SUSPEND is not set
817# CONFIG_USB_OTG is not set 818# CONFIG_USB_OTG is not set
818# CONFIG_USB_OTG_WHITELIST is not set 819# CONFIG_USB_OTG_WHITELIST is not set
819# CONFIG_USB_OTG_BLACKLIST_HUB is not set 820# CONFIG_USB_OTG_BLACKLIST_HUB is not set
@@ -891,7 +892,6 @@ CONFIG_USB_STORAGE=y
891# CONFIG_USB_RIO500 is not set 892# CONFIG_USB_RIO500 is not set
892# CONFIG_USB_LEGOTOWER is not set 893# CONFIG_USB_LEGOTOWER is not set
893# CONFIG_USB_LCD is not set 894# CONFIG_USB_LCD is not set
894# CONFIG_USB_BERRY_CHARGE is not set
895# CONFIG_USB_LED is not set 895# CONFIG_USB_LED is not set
896# CONFIG_USB_CYPRESS_CY7C63 is not set 896# CONFIG_USB_CYPRESS_CY7C63 is not set
897# CONFIG_USB_CYTHERM is not set 897# CONFIG_USB_CYTHERM is not set
@@ -903,7 +903,6 @@ CONFIG_USB_STORAGE=y
903# CONFIG_USB_IOWARRIOR is not set 903# CONFIG_USB_IOWARRIOR is not set
904# CONFIG_USB_TEST is not set 904# CONFIG_USB_TEST is not set
905# CONFIG_USB_ISIGHTFW is not set 905# CONFIG_USB_ISIGHTFW is not set
906# CONFIG_USB_VST is not set
907# CONFIG_USB_GADGET is not set 906# CONFIG_USB_GADGET is not set
908 907
909# 908#
@@ -1009,6 +1008,7 @@ CONFIG_JFFS2_ZLIB=y
1009# CONFIG_JFFS2_LZO is not set 1008# CONFIG_JFFS2_LZO is not set
1010CONFIG_JFFS2_RTIME=y 1009CONFIG_JFFS2_RTIME=y
1011# CONFIG_JFFS2_RUBIN is not set 1010# CONFIG_JFFS2_RUBIN is not set
1011# CONFIG_LOGFS is not set
1012CONFIG_CRAMFS=y 1012CONFIG_CRAMFS=y
1013# CONFIG_SQUASHFS is not set 1013# CONFIG_SQUASHFS is not set
1014# CONFIG_VXFS_FS is not set 1014# CONFIG_VXFS_FS is not set
diff --git a/arch/powerpc/configs/52xx/lite5200b_defconfig b/arch/powerpc/configs/52xx/lite5200b_defconfig
index 7b3f4d0ed404..90492ff25232 100644
--- a/arch/powerpc/configs/52xx/lite5200b_defconfig
+++ b/arch/powerpc/configs/52xx/lite5200b_defconfig
@@ -1,7 +1,7 @@
1# 1#
2# Automatically generated make config: don't edit 2# Automatically generated make config: don't edit
3# Linux kernel version: 2.6.33-rc2 3# Linux kernel version: 2.6.34-rc1
4# Wed Dec 30 14:45:09 2009 4# Wed Mar 10 14:34:24 2010
5# 5#
6# CONFIG_PPC64 is not set 6# CONFIG_PPC64 is not set
7 7
@@ -95,11 +95,6 @@ CONFIG_RCU_FANOUT=32
95# CONFIG_TREE_RCU_TRACE is not set 95# CONFIG_TREE_RCU_TRACE is not set
96# CONFIG_IKCONFIG is not set 96# CONFIG_IKCONFIG is not set
97CONFIG_LOG_BUF_SHIFT=14 97CONFIG_LOG_BUF_SHIFT=14
98CONFIG_GROUP_SCHED=y
99# CONFIG_FAIR_GROUP_SCHED is not set
100# CONFIG_RT_GROUP_SCHED is not set
101CONFIG_USER_SCHED=y
102# CONFIG_CGROUP_SCHED is not set
103# CONFIG_CGROUPS is not set 98# CONFIG_CGROUPS is not set
104CONFIG_SYSFS_DEPRECATED=y 99CONFIG_SYSFS_DEPRECATED=y
105CONFIG_SYSFS_DEPRECATED_V2=y 100CONFIG_SYSFS_DEPRECATED_V2=y
@@ -110,6 +105,7 @@ CONFIG_INITRAMFS_SOURCE=""
110CONFIG_RD_GZIP=y 105CONFIG_RD_GZIP=y
111# CONFIG_RD_BZIP2 is not set 106# CONFIG_RD_BZIP2 is not set
112# CONFIG_RD_LZMA is not set 107# CONFIG_RD_LZMA is not set
108# CONFIG_RD_LZO is not set
113# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set 109# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
114CONFIG_SYSCTL=y 110CONFIG_SYSCTL=y
115CONFIG_ANON_INODES=y 111CONFIG_ANON_INODES=y
@@ -317,6 +313,7 @@ CONFIG_SUSPEND=y
317CONFIG_SUSPEND_FREEZER=y 313CONFIG_SUSPEND_FREEZER=y
318# CONFIG_HIBERNATION is not set 314# CONFIG_HIBERNATION is not set
319# CONFIG_PM_RUNTIME is not set 315# CONFIG_PM_RUNTIME is not set
316CONFIG_PM_OPS=y
320CONFIG_SECCOMP=y 317CONFIG_SECCOMP=y
321CONFIG_ISA_DMA_API=y 318CONFIG_ISA_DMA_API=y
322 319
@@ -333,7 +330,6 @@ CONFIG_PCI_SYSCALL=y
333# CONFIG_PCIEPORTBUS is not set 330# CONFIG_PCIEPORTBUS is not set
334CONFIG_ARCH_SUPPORTS_MSI=y 331CONFIG_ARCH_SUPPORTS_MSI=y
335# CONFIG_PCI_MSI is not set 332# CONFIG_PCI_MSI is not set
336CONFIG_PCI_LEGACY=y
337# CONFIG_PCI_DEBUG is not set 333# CONFIG_PCI_DEBUG is not set
338# CONFIG_PCI_STUB is not set 334# CONFIG_PCI_STUB is not set
339# CONFIG_PCI_IOV is not set 335# CONFIG_PCI_IOV is not set
@@ -360,7 +356,6 @@ CONFIG_NET=y
360# Networking options 356# Networking options
361# 357#
362CONFIG_PACKET=y 358CONFIG_PACKET=y
363# CONFIG_PACKET_MMAP is not set
364CONFIG_UNIX=y 359CONFIG_UNIX=y
365CONFIG_XFRM=y 360CONFIG_XFRM=y
366CONFIG_XFRM_USER=m 361CONFIG_XFRM_USER=m
@@ -457,6 +452,8 @@ CONFIG_PREVENT_FIRMWARE_BUILD=y
457# CONFIG_SYS_HYPERVISOR is not set 452# CONFIG_SYS_HYPERVISOR is not set
458# CONFIG_CONNECTOR is not set 453# CONFIG_CONNECTOR is not set
459# CONFIG_MTD is not set 454# CONFIG_MTD is not set
455CONFIG_OF_FLATTREE=y
456CONFIG_OF_DYNAMIC=y
460CONFIG_OF_DEVICE=y 457CONFIG_OF_DEVICE=y
461CONFIG_OF_I2C=y 458CONFIG_OF_I2C=y
462CONFIG_OF_MDIO=y 459CONFIG_OF_MDIO=y
@@ -631,6 +628,7 @@ CONFIG_ATA_SFF=y
631# CONFIG_PATA_IT821X is not set 628# CONFIG_PATA_IT821X is not set
632# CONFIG_PATA_IT8213 is not set 629# CONFIG_PATA_IT8213 is not set
633# CONFIG_PATA_JMICRON is not set 630# CONFIG_PATA_JMICRON is not set
631# CONFIG_PATA_LEGACY is not set
634# CONFIG_PATA_TRIFLEX is not set 632# CONFIG_PATA_TRIFLEX is not set
635# CONFIG_PATA_MARVELL is not set 633# CONFIG_PATA_MARVELL is not set
636CONFIG_PATA_MPC52xx=y 634CONFIG_PATA_MPC52xx=y
@@ -668,7 +666,7 @@ CONFIG_PATA_MPC52xx=y
668# 666#
669 667
670# 668#
671# See the help texts for more information. 669# The newer stack is recommended.
672# 670#
673# CONFIG_FIREWIRE is not set 671# CONFIG_FIREWIRE is not set
674# CONFIG_IEEE1394 is not set 672# CONFIG_IEEE1394 is not set
@@ -768,6 +766,7 @@ CONFIG_CHELSIO_T3_DEPENDS=y
768# CONFIG_MLX4_CORE is not set 766# CONFIG_MLX4_CORE is not set
769# CONFIG_TEHUTI is not set 767# CONFIG_TEHUTI is not set
770# CONFIG_BNX2X is not set 768# CONFIG_BNX2X is not set
769# CONFIG_QLCNIC is not set
771# CONFIG_QLGE is not set 770# CONFIG_QLGE is not set
772# CONFIG_SFC is not set 771# CONFIG_SFC is not set
773# CONFIG_BE2NET is not set 772# CONFIG_BE2NET is not set
@@ -828,6 +827,7 @@ CONFIG_SERIAL_MPC52xx=y
828CONFIG_SERIAL_MPC52xx_CONSOLE=y 827CONFIG_SERIAL_MPC52xx_CONSOLE=y
829CONFIG_SERIAL_MPC52xx_CONSOLE_BAUD=115200 828CONFIG_SERIAL_MPC52xx_CONSOLE_BAUD=115200
830# CONFIG_SERIAL_JSM is not set 829# CONFIG_SERIAL_JSM is not set
830# CONFIG_SERIAL_TIMBERDALE is not set
831# CONFIG_SERIAL_GRLIB_GAISLER_APBUART is not set 831# CONFIG_SERIAL_GRLIB_GAISLER_APBUART is not set
832CONFIG_UNIX98_PTYS=y 832CONFIG_UNIX98_PTYS=y
833# CONFIG_DEVPTS_MULTIPLE_INSTANCES is not set 833# CONFIG_DEVPTS_MULTIPLE_INSTANCES is not set
@@ -879,6 +879,7 @@ CONFIG_I2C_HELPER_AUTO=y
879CONFIG_I2C_MPC=y 879CONFIG_I2C_MPC=y
880# CONFIG_I2C_OCORES is not set 880# CONFIG_I2C_OCORES is not set
881# CONFIG_I2C_SIMTEC is not set 881# CONFIG_I2C_SIMTEC is not set
882# CONFIG_I2C_XILINX is not set
882 883
883# 884#
884# External I2C/SMBus adapter drivers 885# External I2C/SMBus adapter drivers
@@ -924,18 +925,21 @@ CONFIG_SSB_POSSIBLE=y
924# Multifunction device drivers 925# Multifunction device drivers
925# 926#
926# CONFIG_MFD_CORE is not set 927# CONFIG_MFD_CORE is not set
928# CONFIG_MFD_88PM860X is not set
927# CONFIG_MFD_SM501 is not set 929# CONFIG_MFD_SM501 is not set
928# CONFIG_HTC_PASIC3 is not set 930# CONFIG_HTC_PASIC3 is not set
929# CONFIG_TWL4030_CORE is not set 931# CONFIG_TWL4030_CORE is not set
930# CONFIG_MFD_TMIO is not set 932# CONFIG_MFD_TMIO is not set
931# CONFIG_PMIC_DA903X is not set 933# CONFIG_PMIC_DA903X is not set
932# CONFIG_PMIC_ADP5520 is not set 934# CONFIG_PMIC_ADP5520 is not set
935# CONFIG_MFD_MAX8925 is not set
933# CONFIG_MFD_WM8400 is not set 936# CONFIG_MFD_WM8400 is not set
934# CONFIG_MFD_WM831X is not set 937# CONFIG_MFD_WM831X is not set
935# CONFIG_MFD_WM8350_I2C is not set 938# CONFIG_MFD_WM8350_I2C is not set
939# CONFIG_MFD_WM8994 is not set
936# CONFIG_MFD_PCF50633 is not set 940# CONFIG_MFD_PCF50633 is not set
937# CONFIG_AB3100_CORE is not set 941# CONFIG_AB3100_CORE is not set
938# CONFIG_MFD_88PM8607 is not set 942# CONFIG_LPC_SCH is not set
939# CONFIG_REGULATOR is not set 943# CONFIG_REGULATOR is not set
940# CONFIG_MEDIA_SUPPORT is not set 944# CONFIG_MEDIA_SUPPORT is not set
941 945
@@ -944,6 +948,7 @@ CONFIG_SSB_POSSIBLE=y
944# 948#
945# CONFIG_AGP is not set 949# CONFIG_AGP is not set
946CONFIG_VGA_ARB=y 950CONFIG_VGA_ARB=y
951CONFIG_VGA_ARB_MAX_GPUS=16
947# CONFIG_DRM is not set 952# CONFIG_DRM is not set
948# CONFIG_VGASTATE is not set 953# CONFIG_VGASTATE is not set
949CONFIG_VIDEO_OUTPUT_CONTROL=m 954CONFIG_VIDEO_OUTPUT_CONTROL=m
@@ -1062,6 +1067,7 @@ CONFIG_MISC_FILESYSTEMS=y
1062# CONFIG_BEFS_FS is not set 1067# CONFIG_BEFS_FS is not set
1063# CONFIG_BFS_FS is not set 1068# CONFIG_BFS_FS is not set
1064# CONFIG_EFS_FS is not set 1069# CONFIG_EFS_FS is not set
1070# CONFIG_LOGFS is not set
1065# CONFIG_CRAMFS is not set 1071# CONFIG_CRAMFS is not set
1066# CONFIG_SQUASHFS is not set 1072# CONFIG_SQUASHFS is not set
1067# CONFIG_VXFS_FS is not set 1073# CONFIG_VXFS_FS is not set
diff --git a/arch/powerpc/configs/52xx/motionpro_defconfig b/arch/powerpc/configs/52xx/motionpro_defconfig
index eaae2d469aa0..dffc8cac825f 100644
--- a/arch/powerpc/configs/52xx/motionpro_defconfig
+++ b/arch/powerpc/configs/52xx/motionpro_defconfig
@@ -1,7 +1,7 @@
1# 1#
2# Automatically generated make config: don't edit 2# Automatically generated make config: don't edit
3# Linux kernel version: 2.6.33-rc2 3# Linux kernel version: 2.6.34-rc1
4# Wed Dec 30 14:45:08 2009 4# Wed Mar 10 14:34:23 2010
5# 5#
6# CONFIG_PPC64 is not set 6# CONFIG_PPC64 is not set
7 7
@@ -94,11 +94,6 @@ CONFIG_RCU_FANOUT=32
94# CONFIG_TREE_RCU_TRACE is not set 94# CONFIG_TREE_RCU_TRACE is not set
95# CONFIG_IKCONFIG is not set 95# CONFIG_IKCONFIG is not set
96CONFIG_LOG_BUF_SHIFT=14 96CONFIG_LOG_BUF_SHIFT=14
97CONFIG_GROUP_SCHED=y
98CONFIG_FAIR_GROUP_SCHED=y
99# CONFIG_RT_GROUP_SCHED is not set
100CONFIG_USER_SCHED=y
101# CONFIG_CGROUP_SCHED is not set
102# CONFIG_CGROUPS is not set 97# CONFIG_CGROUPS is not set
103CONFIG_SYSFS_DEPRECATED=y 98CONFIG_SYSFS_DEPRECATED=y
104CONFIG_SYSFS_DEPRECATED_V2=y 99CONFIG_SYSFS_DEPRECATED_V2=y
@@ -109,6 +104,7 @@ CONFIG_INITRAMFS_SOURCE=""
109CONFIG_RD_GZIP=y 104CONFIG_RD_GZIP=y
110# CONFIG_RD_BZIP2 is not set 105# CONFIG_RD_BZIP2 is not set
111# CONFIG_RD_LZMA is not set 106# CONFIG_RD_LZMA is not set
107# CONFIG_RD_LZO is not set
112# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set 108# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
113CONFIG_SYSCTL=y 109CONFIG_SYSCTL=y
114CONFIG_ANON_INODES=y 110CONFIG_ANON_INODES=y
@@ -341,7 +337,6 @@ CONFIG_NET=y
341# Networking options 337# Networking options
342# 338#
343CONFIG_PACKET=y 339CONFIG_PACKET=y
344# CONFIG_PACKET_MMAP is not set
345CONFIG_UNIX=y 340CONFIG_UNIX=y
346CONFIG_XFRM=y 341CONFIG_XFRM=y
347CONFIG_XFRM_USER=y 342CONFIG_XFRM_USER=y
@@ -518,6 +513,8 @@ CONFIG_MTD_ROM=y
518# UBI - Unsorted block images 513# UBI - Unsorted block images
519# 514#
520# CONFIG_MTD_UBI is not set 515# CONFIG_MTD_UBI is not set
516CONFIG_OF_FLATTREE=y
517CONFIG_OF_DYNAMIC=y
521CONFIG_OF_DEVICE=y 518CONFIG_OF_DEVICE=y
522CONFIG_OF_I2C=y 519CONFIG_OF_I2C=y
523CONFIG_OF_MDIO=y 520CONFIG_OF_MDIO=y
@@ -699,6 +696,7 @@ CONFIG_SERIAL_CORE_CONSOLE=y
699CONFIG_SERIAL_MPC52xx=y 696CONFIG_SERIAL_MPC52xx=y
700CONFIG_SERIAL_MPC52xx_CONSOLE=y 697CONFIG_SERIAL_MPC52xx_CONSOLE=y
701CONFIG_SERIAL_MPC52xx_CONSOLE_BAUD=115200 698CONFIG_SERIAL_MPC52xx_CONSOLE_BAUD=115200
699# CONFIG_SERIAL_TIMBERDALE is not set
702# CONFIG_SERIAL_GRLIB_GAISLER_APBUART is not set 700# CONFIG_SERIAL_GRLIB_GAISLER_APBUART is not set
703CONFIG_UNIX98_PTYS=y 701CONFIG_UNIX98_PTYS=y
704# CONFIG_DEVPTS_MULTIPLE_INSTANCES is not set 702# CONFIG_DEVPTS_MULTIPLE_INSTANCES is not set
@@ -728,6 +726,7 @@ CONFIG_I2C_HELPER_AUTO=y
728CONFIG_I2C_MPC=y 726CONFIG_I2C_MPC=y
729# CONFIG_I2C_OCORES is not set 727# CONFIG_I2C_OCORES is not set
730# CONFIG_I2C_SIMTEC is not set 728# CONFIG_I2C_SIMTEC is not set
729# CONFIG_I2C_XILINX is not set
731 730
732# 731#
733# External I2C/SMBus adapter drivers 732# External I2C/SMBus adapter drivers
@@ -773,10 +772,11 @@ CONFIG_HWMON=y
773# CONFIG_SENSORS_ADM1029 is not set 772# CONFIG_SENSORS_ADM1029 is not set
774# CONFIG_SENSORS_ADM1031 is not set 773# CONFIG_SENSORS_ADM1031 is not set
775# CONFIG_SENSORS_ADM9240 is not set 774# CONFIG_SENSORS_ADM9240 is not set
775# CONFIG_SENSORS_ADT7411 is not set
776# CONFIG_SENSORS_ADT7462 is not set 776# CONFIG_SENSORS_ADT7462 is not set
777# CONFIG_SENSORS_ADT7470 is not set 777# CONFIG_SENSORS_ADT7470 is not set
778# CONFIG_SENSORS_ADT7473 is not set
779# CONFIG_SENSORS_ADT7475 is not set 778# CONFIG_SENSORS_ADT7475 is not set
779# CONFIG_SENSORS_ASC7621 is not set
780# CONFIG_SENSORS_ATXP1 is not set 780# CONFIG_SENSORS_ATXP1 is not set
781# CONFIG_SENSORS_DS1621 is not set 781# CONFIG_SENSORS_DS1621 is not set
782# CONFIG_SENSORS_F71805F is not set 782# CONFIG_SENSORS_F71805F is not set
@@ -811,6 +811,7 @@ CONFIG_HWMON=y
811# CONFIG_SENSORS_SMSC47M192 is not set 811# CONFIG_SENSORS_SMSC47M192 is not set
812# CONFIG_SENSORS_SMSC47B397 is not set 812# CONFIG_SENSORS_SMSC47B397 is not set
813# CONFIG_SENSORS_ADS7828 is not set 813# CONFIG_SENSORS_ADS7828 is not set
814# CONFIG_SENSORS_AMC6821 is not set
814# CONFIG_SENSORS_THMC50 is not set 815# CONFIG_SENSORS_THMC50 is not set
815# CONFIG_SENSORS_TMP401 is not set 816# CONFIG_SENSORS_TMP401 is not set
816# CONFIG_SENSORS_TMP421 is not set 817# CONFIG_SENSORS_TMP421 is not set
@@ -831,6 +832,7 @@ CONFIG_WATCHDOG=y
831# Watchdog Device Drivers 832# Watchdog Device Drivers
832# 833#
833# CONFIG_SOFT_WATCHDOG is not set 834# CONFIG_SOFT_WATCHDOG is not set
835# CONFIG_MAX63XX_WATCHDOG is not set
834# CONFIG_MPC5200_WDT is not set 836# CONFIG_MPC5200_WDT is not set
835CONFIG_SSB_POSSIBLE=y 837CONFIG_SSB_POSSIBLE=y
836 838
@@ -843,18 +845,20 @@ CONFIG_SSB_POSSIBLE=y
843# Multifunction device drivers 845# Multifunction device drivers
844# 846#
845# CONFIG_MFD_CORE is not set 847# CONFIG_MFD_CORE is not set
848# CONFIG_MFD_88PM860X is not set
846# CONFIG_MFD_SM501 is not set 849# CONFIG_MFD_SM501 is not set
847# CONFIG_HTC_PASIC3 is not set 850# CONFIG_HTC_PASIC3 is not set
848# CONFIG_TWL4030_CORE is not set 851# CONFIG_TWL4030_CORE is not set
849# CONFIG_MFD_TMIO is not set 852# CONFIG_MFD_TMIO is not set
850# CONFIG_PMIC_DA903X is not set 853# CONFIG_PMIC_DA903X is not set
851# CONFIG_PMIC_ADP5520 is not set 854# CONFIG_PMIC_ADP5520 is not set
855# CONFIG_MFD_MAX8925 is not set
852# CONFIG_MFD_WM8400 is not set 856# CONFIG_MFD_WM8400 is not set
853# CONFIG_MFD_WM831X is not set 857# CONFIG_MFD_WM831X is not set
854# CONFIG_MFD_WM8350_I2C is not set 858# CONFIG_MFD_WM8350_I2C is not set
859# CONFIG_MFD_WM8994 is not set
855# CONFIG_MFD_PCF50633 is not set 860# CONFIG_MFD_PCF50633 is not set
856# CONFIG_AB3100_CORE is not set 861# CONFIG_AB3100_CORE is not set
857# CONFIG_MFD_88PM8607 is not set
858# CONFIG_REGULATOR is not set 862# CONFIG_REGULATOR is not set
859# CONFIG_MEDIA_SUPPORT is not set 863# CONFIG_MEDIA_SUPPORT is not set
860 864
@@ -1050,6 +1054,7 @@ CONFIG_JFFS2_ZLIB=y
1050# CONFIG_JFFS2_LZO is not set 1054# CONFIG_JFFS2_LZO is not set
1051CONFIG_JFFS2_RTIME=y 1055CONFIG_JFFS2_RTIME=y
1052# CONFIG_JFFS2_RUBIN is not set 1056# CONFIG_JFFS2_RUBIN is not set
1057# CONFIG_LOGFS is not set
1053CONFIG_CRAMFS=y 1058CONFIG_CRAMFS=y
1054# CONFIG_SQUASHFS is not set 1059# CONFIG_SQUASHFS is not set
1055# CONFIG_VXFS_FS is not set 1060# CONFIG_VXFS_FS is not set
diff --git a/arch/powerpc/configs/52xx/pcm030_defconfig b/arch/powerpc/configs/52xx/pcm030_defconfig
index 1742c0200b75..3cb2a522046a 100644
--- a/arch/powerpc/configs/52xx/pcm030_defconfig
+++ b/arch/powerpc/configs/52xx/pcm030_defconfig
@@ -1,7 +1,7 @@
1# 1#
2# Automatically generated make config: don't edit 2# Automatically generated make config: don't edit
3# Linux kernel version: 2.6.33-rc2 3# Linux kernel version: 2.6.34-rc1
4# Wed Dec 30 14:45:10 2009 4# Wed Mar 10 14:34:25 2010
5# 5#
6# CONFIG_PPC64 is not set 6# CONFIG_PPC64 is not set
7 7
@@ -97,11 +97,6 @@ CONFIG_RCU_FANOUT=32
97CONFIG_IKCONFIG=y 97CONFIG_IKCONFIG=y
98CONFIG_IKCONFIG_PROC=y 98CONFIG_IKCONFIG_PROC=y
99CONFIG_LOG_BUF_SHIFT=14 99CONFIG_LOG_BUF_SHIFT=14
100CONFIG_GROUP_SCHED=y
101CONFIG_FAIR_GROUP_SCHED=y
102# CONFIG_RT_GROUP_SCHED is not set
103CONFIG_USER_SCHED=y
104# CONFIG_CGROUP_SCHED is not set
105# CONFIG_CGROUPS is not set 100# CONFIG_CGROUPS is not set
106CONFIG_SYSFS_DEPRECATED=y 101CONFIG_SYSFS_DEPRECATED=y
107CONFIG_SYSFS_DEPRECATED_V2=y 102CONFIG_SYSFS_DEPRECATED_V2=y
@@ -326,7 +321,6 @@ CONFIG_PCI_SYSCALL=y
326# CONFIG_PCIEPORTBUS is not set 321# CONFIG_PCIEPORTBUS is not set
327CONFIG_ARCH_SUPPORTS_MSI=y 322CONFIG_ARCH_SUPPORTS_MSI=y
328# CONFIG_PCI_MSI is not set 323# CONFIG_PCI_MSI is not set
329CONFIG_PCI_LEGACY=y
330# CONFIG_PCI_STUB is not set 324# CONFIG_PCI_STUB is not set
331# CONFIG_PCI_IOV is not set 325# CONFIG_PCI_IOV is not set
332# CONFIG_PCCARD is not set 326# CONFIG_PCCARD is not set
@@ -352,7 +346,6 @@ CONFIG_NET=y
352# Networking options 346# Networking options
353# 347#
354CONFIG_PACKET=y 348CONFIG_PACKET=y
355# CONFIG_PACKET_MMAP is not set
356CONFIG_UNIX=y 349CONFIG_UNIX=y
357# CONFIG_NET_KEY is not set 350# CONFIG_NET_KEY is not set
358CONFIG_INET=y 351CONFIG_INET=y
@@ -525,6 +518,8 @@ CONFIG_MTD_PHYSMAP=y
525# UBI - Unsorted block images 518# UBI - Unsorted block images
526# 519#
527# CONFIG_MTD_UBI is not set 520# CONFIG_MTD_UBI is not set
521CONFIG_OF_FLATTREE=y
522CONFIG_OF_DYNAMIC=y
528CONFIG_OF_DEVICE=y 523CONFIG_OF_DEVICE=y
529CONFIG_OF_I2C=y 524CONFIG_OF_I2C=y
530CONFIG_OF_MDIO=y 525CONFIG_OF_MDIO=y
@@ -610,6 +605,7 @@ CONFIG_ATA_SFF=y
610# CONFIG_PATA_IT821X is not set 605# CONFIG_PATA_IT821X is not set
611# CONFIG_PATA_IT8213 is not set 606# CONFIG_PATA_IT8213 is not set
612# CONFIG_PATA_JMICRON is not set 607# CONFIG_PATA_JMICRON is not set
608# CONFIG_PATA_LEGACY is not set
613# CONFIG_PATA_TRIFLEX is not set 609# CONFIG_PATA_TRIFLEX is not set
614# CONFIG_PATA_MARVELL is not set 610# CONFIG_PATA_MARVELL is not set
615CONFIG_PATA_MPC52xx=m 611CONFIG_PATA_MPC52xx=m
@@ -647,7 +643,7 @@ CONFIG_PATA_MPC52xx=m
647# 643#
648 644
649# 645#
650# See the help texts for more information. 646# The newer stack is recommended.
651# 647#
652# CONFIG_FIREWIRE is not set 648# CONFIG_FIREWIRE is not set
653# CONFIG_IEEE1394 is not set 649# CONFIG_IEEE1394 is not set
@@ -775,6 +771,7 @@ CONFIG_SERIAL_MPC52xx=y
775CONFIG_SERIAL_MPC52xx_CONSOLE=y 771CONFIG_SERIAL_MPC52xx_CONSOLE=y
776CONFIG_SERIAL_MPC52xx_CONSOLE_BAUD=9600 772CONFIG_SERIAL_MPC52xx_CONSOLE_BAUD=9600
777# CONFIG_SERIAL_JSM is not set 773# CONFIG_SERIAL_JSM is not set
774# CONFIG_SERIAL_TIMBERDALE is not set
778# CONFIG_SERIAL_GRLIB_GAISLER_APBUART is not set 775# CONFIG_SERIAL_GRLIB_GAISLER_APBUART is not set
779CONFIG_UNIX98_PTYS=y 776CONFIG_UNIX98_PTYS=y
780# CONFIG_DEVPTS_MULTIPLE_INSTANCES is not set 777# CONFIG_DEVPTS_MULTIPLE_INSTANCES is not set
@@ -824,6 +821,7 @@ CONFIG_I2C_HELPER_AUTO=y
824CONFIG_I2C_MPC=y 821CONFIG_I2C_MPC=y
825# CONFIG_I2C_OCORES is not set 822# CONFIG_I2C_OCORES is not set
826# CONFIG_I2C_SIMTEC is not set 823# CONFIG_I2C_SIMTEC is not set
824# CONFIG_I2C_XILINX is not set
827 825
828# 826#
829# External I2C/SMBus adapter drivers 827# External I2C/SMBus adapter drivers
@@ -870,18 +868,21 @@ CONFIG_SSB_POSSIBLE=y
870# Multifunction device drivers 868# Multifunction device drivers
871# 869#
872# CONFIG_MFD_CORE is not set 870# CONFIG_MFD_CORE is not set
871# CONFIG_MFD_88PM860X is not set
873# CONFIG_MFD_SM501 is not set 872# CONFIG_MFD_SM501 is not set
874# CONFIG_HTC_PASIC3 is not set 873# CONFIG_HTC_PASIC3 is not set
875# CONFIG_TWL4030_CORE is not set 874# CONFIG_TWL4030_CORE is not set
876# CONFIG_MFD_TMIO is not set 875# CONFIG_MFD_TMIO is not set
877# CONFIG_PMIC_DA903X is not set 876# CONFIG_PMIC_DA903X is not set
878# CONFIG_PMIC_ADP5520 is not set 877# CONFIG_PMIC_ADP5520 is not set
878# CONFIG_MFD_MAX8925 is not set
879# CONFIG_MFD_WM8400 is not set 879# CONFIG_MFD_WM8400 is not set
880# CONFIG_MFD_WM831X is not set 880# CONFIG_MFD_WM831X is not set
881# CONFIG_MFD_WM8350_I2C is not set 881# CONFIG_MFD_WM8350_I2C is not set
882# CONFIG_MFD_WM8994 is not set
882# CONFIG_MFD_PCF50633 is not set 883# CONFIG_MFD_PCF50633 is not set
883# CONFIG_AB3100_CORE is not set 884# CONFIG_AB3100_CORE is not set
884# CONFIG_MFD_88PM8607 is not set 885# CONFIG_LPC_SCH is not set
885# CONFIG_REGULATOR is not set 886# CONFIG_REGULATOR is not set
886# CONFIG_MEDIA_SUPPORT is not set 887# CONFIG_MEDIA_SUPPORT is not set
887 888
@@ -890,6 +891,7 @@ CONFIG_SSB_POSSIBLE=y
890# 891#
891# CONFIG_AGP is not set 892# CONFIG_AGP is not set
892CONFIG_VGA_ARB=y 893CONFIG_VGA_ARB=y
894CONFIG_VGA_ARB_MAX_GPUS=16
893# CONFIG_DRM is not set 895# CONFIG_DRM is not set
894# CONFIG_VGASTATE is not set 896# CONFIG_VGASTATE is not set
895# CONFIG_VIDEO_OUTPUT_CONTROL is not set 897# CONFIG_VIDEO_OUTPUT_CONTROL is not set
@@ -997,7 +999,6 @@ CONFIG_USB_STORAGE=m
997# CONFIG_USB_RIO500 is not set 999# CONFIG_USB_RIO500 is not set
998# CONFIG_USB_LEGOTOWER is not set 1000# CONFIG_USB_LEGOTOWER is not set
999# CONFIG_USB_LCD is not set 1001# CONFIG_USB_LCD is not set
1000# CONFIG_USB_BERRY_CHARGE is not set
1001# CONFIG_USB_LED is not set 1002# CONFIG_USB_LED is not set
1002# CONFIG_USB_CYPRESS_CY7C63 is not set 1003# CONFIG_USB_CYPRESS_CY7C63 is not set
1003# CONFIG_USB_CYTHERM is not set 1004# CONFIG_USB_CYTHERM is not set
@@ -1009,7 +1010,6 @@ CONFIG_USB_STORAGE=m
1009# CONFIG_USB_IOWARRIOR is not set 1010# CONFIG_USB_IOWARRIOR is not set
1010# CONFIG_USB_TEST is not set 1011# CONFIG_USB_TEST is not set
1011# CONFIG_USB_ISIGHTFW is not set 1012# CONFIG_USB_ISIGHTFW is not set
1012# CONFIG_USB_VST is not set
1013# CONFIG_USB_GADGET is not set 1013# CONFIG_USB_GADGET is not set
1014 1014
1015# 1015#
@@ -1172,6 +1172,7 @@ CONFIG_JFFS2_ZLIB=y
1172# CONFIG_JFFS2_LZO is not set 1172# CONFIG_JFFS2_LZO is not set
1173CONFIG_JFFS2_RTIME=y 1173CONFIG_JFFS2_RTIME=y
1174# CONFIG_JFFS2_RUBIN is not set 1174# CONFIG_JFFS2_RUBIN is not set
1175# CONFIG_LOGFS is not set
1175# CONFIG_CRAMFS is not set 1176# CONFIG_CRAMFS is not set
1176# CONFIG_SQUASHFS is not set 1177# CONFIG_SQUASHFS is not set
1177# CONFIG_VXFS_FS is not set 1178# CONFIG_VXFS_FS is not set
diff --git a/arch/powerpc/configs/52xx/tqm5200_defconfig b/arch/powerpc/configs/52xx/tqm5200_defconfig
index 3972438db719..96181c62abfa 100644
--- a/arch/powerpc/configs/52xx/tqm5200_defconfig
+++ b/arch/powerpc/configs/52xx/tqm5200_defconfig
@@ -1,7 +1,7 @@
1# 1#
2# Automatically generated make config: don't edit 2# Automatically generated make config: don't edit
3# Linux kernel version: 2.6.33-rc2 3# Linux kernel version: 2.6.34-rc1
4# Wed Dec 30 14:45:09 2009 4# Wed Mar 10 14:34:24 2010
5# 5#
6# CONFIG_PPC64 is not set 6# CONFIG_PPC64 is not set
7 7
@@ -94,11 +94,6 @@ CONFIG_RCU_FANOUT=32
94# CONFIG_TREE_RCU_TRACE is not set 94# CONFIG_TREE_RCU_TRACE is not set
95# CONFIG_IKCONFIG is not set 95# CONFIG_IKCONFIG is not set
96CONFIG_LOG_BUF_SHIFT=14 96CONFIG_LOG_BUF_SHIFT=14
97CONFIG_GROUP_SCHED=y
98CONFIG_FAIR_GROUP_SCHED=y
99# CONFIG_RT_GROUP_SCHED is not set
100CONFIG_USER_SCHED=y
101# CONFIG_CGROUP_SCHED is not set
102# CONFIG_CGROUPS is not set 97# CONFIG_CGROUPS is not set
103CONFIG_SYSFS_DEPRECATED=y 98CONFIG_SYSFS_DEPRECATED=y
104CONFIG_SYSFS_DEPRECATED_V2=y 99CONFIG_SYSFS_DEPRECATED_V2=y
@@ -109,6 +104,7 @@ CONFIG_INITRAMFS_SOURCE=""
109CONFIG_RD_GZIP=y 104CONFIG_RD_GZIP=y
110# CONFIG_RD_BZIP2 is not set 105# CONFIG_RD_BZIP2 is not set
111# CONFIG_RD_LZMA is not set 106# CONFIG_RD_LZMA is not set
107# CONFIG_RD_LZO is not set
112# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set 108# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
113CONFIG_SYSCTL=y 109CONFIG_SYSCTL=y
114CONFIG_ANON_INODES=y 110CONFIG_ANON_INODES=y
@@ -346,7 +342,6 @@ CONFIG_NET=y
346# Networking options 342# Networking options
347# 343#
348CONFIG_PACKET=y 344CONFIG_PACKET=y
349# CONFIG_PACKET_MMAP is not set
350CONFIG_UNIX=y 345CONFIG_UNIX=y
351CONFIG_XFRM=y 346CONFIG_XFRM=y
352CONFIG_XFRM_USER=y 347CONFIG_XFRM_USER=y
@@ -524,6 +519,8 @@ CONFIG_MTD_PHYSMAP_OF=y
524# UBI - Unsorted block images 519# UBI - Unsorted block images
525# 520#
526# CONFIG_MTD_UBI is not set 521# CONFIG_MTD_UBI is not set
522CONFIG_OF_FLATTREE=y
523CONFIG_OF_DYNAMIC=y
527CONFIG_OF_DEVICE=y 524CONFIG_OF_DEVICE=y
528CONFIG_OF_I2C=y 525CONFIG_OF_I2C=y
529CONFIG_OF_MDIO=y 526CONFIG_OF_MDIO=y
@@ -704,6 +701,7 @@ CONFIG_SERIAL_CORE_CONSOLE=y
704CONFIG_SERIAL_MPC52xx=y 701CONFIG_SERIAL_MPC52xx=y
705CONFIG_SERIAL_MPC52xx_CONSOLE=y 702CONFIG_SERIAL_MPC52xx_CONSOLE=y
706CONFIG_SERIAL_MPC52xx_CONSOLE_BAUD=115200 703CONFIG_SERIAL_MPC52xx_CONSOLE_BAUD=115200
704# CONFIG_SERIAL_TIMBERDALE is not set
707# CONFIG_SERIAL_GRLIB_GAISLER_APBUART is not set 705# CONFIG_SERIAL_GRLIB_GAISLER_APBUART is not set
708CONFIG_UNIX98_PTYS=y 706CONFIG_UNIX98_PTYS=y
709# CONFIG_DEVPTS_MULTIPLE_INSTANCES is not set 707# CONFIG_DEVPTS_MULTIPLE_INSTANCES is not set
@@ -733,6 +731,7 @@ CONFIG_I2C_HELPER_AUTO=y
733CONFIG_I2C_MPC=y 731CONFIG_I2C_MPC=y
734# CONFIG_I2C_OCORES is not set 732# CONFIG_I2C_OCORES is not set
735# CONFIG_I2C_SIMTEC is not set 733# CONFIG_I2C_SIMTEC is not set
734# CONFIG_I2C_XILINX is not set
736 735
737# 736#
738# External I2C/SMBus adapter drivers 737# External I2C/SMBus adapter drivers
@@ -780,10 +779,11 @@ CONFIG_HWMON=y
780# CONFIG_SENSORS_ADM1029 is not set 779# CONFIG_SENSORS_ADM1029 is not set
781# CONFIG_SENSORS_ADM1031 is not set 780# CONFIG_SENSORS_ADM1031 is not set
782# CONFIG_SENSORS_ADM9240 is not set 781# CONFIG_SENSORS_ADM9240 is not set
782# CONFIG_SENSORS_ADT7411 is not set
783# CONFIG_SENSORS_ADT7462 is not set 783# CONFIG_SENSORS_ADT7462 is not set
784# CONFIG_SENSORS_ADT7470 is not set 784# CONFIG_SENSORS_ADT7470 is not set
785# CONFIG_SENSORS_ADT7473 is not set
786# CONFIG_SENSORS_ADT7475 is not set 785# CONFIG_SENSORS_ADT7475 is not set
786# CONFIG_SENSORS_ASC7621 is not set
787# CONFIG_SENSORS_ATXP1 is not set 787# CONFIG_SENSORS_ATXP1 is not set
788# CONFIG_SENSORS_DS1621 is not set 788# CONFIG_SENSORS_DS1621 is not set
789# CONFIG_SENSORS_F71805F is not set 789# CONFIG_SENSORS_F71805F is not set
@@ -818,6 +818,7 @@ CONFIG_HWMON=y
818# CONFIG_SENSORS_SMSC47M192 is not set 818# CONFIG_SENSORS_SMSC47M192 is not set
819# CONFIG_SENSORS_SMSC47B397 is not set 819# CONFIG_SENSORS_SMSC47B397 is not set
820# CONFIG_SENSORS_ADS7828 is not set 820# CONFIG_SENSORS_ADS7828 is not set
821# CONFIG_SENSORS_AMC6821 is not set
821# CONFIG_SENSORS_THMC50 is not set 822# CONFIG_SENSORS_THMC50 is not set
822# CONFIG_SENSORS_TMP401 is not set 823# CONFIG_SENSORS_TMP401 is not set
823# CONFIG_SENSORS_TMP421 is not set 824# CONFIG_SENSORS_TMP421 is not set
@@ -838,6 +839,7 @@ CONFIG_WATCHDOG=y
838# Watchdog Device Drivers 839# Watchdog Device Drivers
839# 840#
840# CONFIG_SOFT_WATCHDOG is not set 841# CONFIG_SOFT_WATCHDOG is not set
842# CONFIG_MAX63XX_WATCHDOG is not set
841# CONFIG_MPC5200_WDT is not set 843# CONFIG_MPC5200_WDT is not set
842 844
843# 845#
@@ -855,18 +857,20 @@ CONFIG_SSB_POSSIBLE=y
855# Multifunction device drivers 857# Multifunction device drivers
856# 858#
857# CONFIG_MFD_CORE is not set 859# CONFIG_MFD_CORE is not set
860# CONFIG_MFD_88PM860X is not set
858# CONFIG_MFD_SM501 is not set 861# CONFIG_MFD_SM501 is not set
859# CONFIG_HTC_PASIC3 is not set 862# CONFIG_HTC_PASIC3 is not set
860# CONFIG_TWL4030_CORE is not set 863# CONFIG_TWL4030_CORE is not set
861# CONFIG_MFD_TMIO is not set 864# CONFIG_MFD_TMIO is not set
862# CONFIG_PMIC_DA903X is not set 865# CONFIG_PMIC_DA903X is not set
863# CONFIG_PMIC_ADP5520 is not set 866# CONFIG_PMIC_ADP5520 is not set
867# CONFIG_MFD_MAX8925 is not set
864# CONFIG_MFD_WM8400 is not set 868# CONFIG_MFD_WM8400 is not set
865# CONFIG_MFD_WM831X is not set 869# CONFIG_MFD_WM831X is not set
866# CONFIG_MFD_WM8350_I2C is not set 870# CONFIG_MFD_WM8350_I2C is not set
871# CONFIG_MFD_WM8994 is not set
867# CONFIG_MFD_PCF50633 is not set 872# CONFIG_MFD_PCF50633 is not set
868# CONFIG_AB3100_CORE is not set 873# CONFIG_AB3100_CORE is not set
869# CONFIG_MFD_88PM8607 is not set
870# CONFIG_REGULATOR is not set 874# CONFIG_REGULATOR is not set
871# CONFIG_MEDIA_SUPPORT is not set 875# CONFIG_MEDIA_SUPPORT is not set
872 876
@@ -897,7 +901,6 @@ CONFIG_USB=y
897CONFIG_USB_DEVICEFS=y 901CONFIG_USB_DEVICEFS=y
898# CONFIG_USB_DEVICE_CLASS is not set 902# CONFIG_USB_DEVICE_CLASS is not set
899# CONFIG_USB_DYNAMIC_MINORS is not set 903# CONFIG_USB_DYNAMIC_MINORS is not set
900# CONFIG_USB_SUSPEND is not set
901# CONFIG_USB_OTG is not set 904# CONFIG_USB_OTG is not set
902# CONFIG_USB_OTG_WHITELIST is not set 905# CONFIG_USB_OTG_WHITELIST is not set
903# CONFIG_USB_OTG_BLACKLIST_HUB is not set 906# CONFIG_USB_OTG_BLACKLIST_HUB is not set
@@ -975,7 +978,6 @@ CONFIG_USB_STORAGE=y
975# CONFIG_USB_RIO500 is not set 978# CONFIG_USB_RIO500 is not set
976# CONFIG_USB_LEGOTOWER is not set 979# CONFIG_USB_LEGOTOWER is not set
977# CONFIG_USB_LCD is not set 980# CONFIG_USB_LCD is not set
978# CONFIG_USB_BERRY_CHARGE is not set
979# CONFIG_USB_LED is not set 981# CONFIG_USB_LED is not set
980# CONFIG_USB_CYPRESS_CY7C63 is not set 982# CONFIG_USB_CYPRESS_CY7C63 is not set
981# CONFIG_USB_CYTHERM is not set 983# CONFIG_USB_CYTHERM is not set
@@ -987,7 +989,6 @@ CONFIG_USB_STORAGE=y
987# CONFIG_USB_IOWARRIOR is not set 989# CONFIG_USB_IOWARRIOR is not set
988# CONFIG_USB_TEST is not set 990# CONFIG_USB_TEST is not set
989# CONFIG_USB_ISIGHTFW is not set 991# CONFIG_USB_ISIGHTFW is not set
990# CONFIG_USB_VST is not set
991# CONFIG_USB_GADGET is not set 992# CONFIG_USB_GADGET is not set
992 993
993# 994#
@@ -1151,6 +1152,7 @@ CONFIG_JFFS2_ZLIB=y
1151# CONFIG_JFFS2_LZO is not set 1152# CONFIG_JFFS2_LZO is not set
1152CONFIG_JFFS2_RTIME=y 1153CONFIG_JFFS2_RTIME=y
1153# CONFIG_JFFS2_RUBIN is not set 1154# CONFIG_JFFS2_RUBIN is not set
1155# CONFIG_LOGFS is not set
1154CONFIG_CRAMFS=y 1156CONFIG_CRAMFS=y
1155# CONFIG_SQUASHFS is not set 1157# CONFIG_SQUASHFS is not set
1156# CONFIG_VXFS_FS is not set 1158# CONFIG_VXFS_FS is not set
diff --git a/arch/powerpc/configs/mpc5200_defconfig b/arch/powerpc/configs/mpc5200_defconfig
index 61cf73d0000f..7012ac0134f0 100644
--- a/arch/powerpc/configs/mpc5200_defconfig
+++ b/arch/powerpc/configs/mpc5200_defconfig
@@ -1,7 +1,7 @@
1# 1#
2# Automatically generated make config: don't edit 2# Automatically generated make config: don't edit
3# Linux kernel version: 2.6.33-rc2 3# Linux kernel version: 2.6.34-rc1
4# Wed Dec 30 15:08:52 2009 4# Wed Mar 10 14:38:54 2010
5# 5#
6# CONFIG_PPC64 is not set 6# CONFIG_PPC64 is not set
7 7
@@ -96,30 +96,37 @@ CONFIG_RCU_FANOUT=32
96# CONFIG_TREE_RCU_TRACE is not set 96# CONFIG_TREE_RCU_TRACE is not set
97# CONFIG_IKCONFIG is not set 97# CONFIG_IKCONFIG is not set
98CONFIG_LOG_BUF_SHIFT=14 98CONFIG_LOG_BUF_SHIFT=14
99# CONFIG_GROUP_SCHED is not set
100# CONFIG_CGROUPS is not set 99# CONFIG_CGROUPS is not set
101CONFIG_SYSFS_DEPRECATED=y 100CONFIG_SYSFS_DEPRECATED=y
102CONFIG_SYSFS_DEPRECATED_V2=y 101CONFIG_SYSFS_DEPRECATED_V2=y
103# CONFIG_RELAY is not set 102# CONFIG_RELAY is not set
104# CONFIG_NAMESPACES is not set 103CONFIG_NAMESPACES=y
104# CONFIG_UTS_NS is not set
105# CONFIG_IPC_NS is not set
106# CONFIG_USER_NS is not set
107# CONFIG_PID_NS is not set
108# CONFIG_NET_NS is not set
105CONFIG_BLK_DEV_INITRD=y 109CONFIG_BLK_DEV_INITRD=y
106CONFIG_INITRAMFS_SOURCE="" 110CONFIG_INITRAMFS_SOURCE=""
107CONFIG_RD_GZIP=y 111CONFIG_RD_GZIP=y
108# CONFIG_RD_BZIP2 is not set 112CONFIG_RD_BZIP2=y
109# CONFIG_RD_LZMA is not set 113CONFIG_RD_LZMA=y
114CONFIG_RD_LZO=y
110# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set 115# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
111CONFIG_SYSCTL=y 116CONFIG_SYSCTL=y
112CONFIG_ANON_INODES=y 117CONFIG_ANON_INODES=y
113CONFIG_EMBEDDED=y 118# CONFIG_EMBEDDED is not set
114# CONFIG_SYSCTL_SYSCALL is not set 119CONFIG_SYSCTL_SYSCALL=y
115# CONFIG_KALLSYMS is not set 120CONFIG_KALLSYMS=y
121# CONFIG_KALLSYMS_ALL is not set
122# CONFIG_KALLSYMS_EXTRA_PASS is not set
116CONFIG_HOTPLUG=y 123CONFIG_HOTPLUG=y
117CONFIG_PRINTK=y 124CONFIG_PRINTK=y
118CONFIG_BUG=y 125CONFIG_BUG=y
119CONFIG_ELF_CORE=y 126CONFIG_ELF_CORE=y
120CONFIG_BASE_FULL=y 127CONFIG_BASE_FULL=y
121CONFIG_FUTEX=y 128CONFIG_FUTEX=y
122# CONFIG_EPOLL is not set 129CONFIG_EPOLL=y
123CONFIG_SIGNALFD=y 130CONFIG_SIGNALFD=y
124CONFIG_TIMERFD=y 131CONFIG_TIMERFD=y
125CONFIG_EVENTFD=y 132CONFIG_EVENTFD=y
@@ -141,6 +148,7 @@ CONFIG_SLUB=y
141# CONFIG_SLOB is not set 148# CONFIG_SLOB is not set
142# CONFIG_PROFILING is not set 149# CONFIG_PROFILING is not set
143CONFIG_HAVE_OPROFILE=y 150CONFIG_HAVE_OPROFILE=y
151# CONFIG_KPROBES is not set
144CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS=y 152CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS=y
145CONFIG_HAVE_IOREMAP_PROT=y 153CONFIG_HAVE_IOREMAP_PROT=y
146CONFIG_HAVE_KPROBES=y 154CONFIG_HAVE_KPROBES=y
@@ -320,6 +328,7 @@ CONFIG_SUSPEND=y
320CONFIG_SUSPEND_FREEZER=y 328CONFIG_SUSPEND_FREEZER=y
321# CONFIG_HIBERNATION is not set 329# CONFIG_HIBERNATION is not set
322# CONFIG_PM_RUNTIME is not set 330# CONFIG_PM_RUNTIME is not set
331CONFIG_PM_OPS=y
323CONFIG_SECCOMP=y 332CONFIG_SECCOMP=y
324CONFIG_ISA_DMA_API=y 333CONFIG_ISA_DMA_API=y
325 334
@@ -336,7 +345,6 @@ CONFIG_PCI_SYSCALL=y
336# CONFIG_PCIEPORTBUS is not set 345# CONFIG_PCIEPORTBUS is not set
337CONFIG_ARCH_SUPPORTS_MSI=y 346CONFIG_ARCH_SUPPORTS_MSI=y
338# CONFIG_PCI_MSI is not set 347# CONFIG_PCI_MSI is not set
339CONFIG_PCI_LEGACY=y
340# CONFIG_PCI_DEBUG is not set 348# CONFIG_PCI_DEBUG is not set
341# CONFIG_PCI_STUB is not set 349# CONFIG_PCI_STUB is not set
342# CONFIG_PCI_IOV is not set 350# CONFIG_PCI_IOV is not set
@@ -363,7 +371,6 @@ CONFIG_NET=y
363# Networking options 371# Networking options
364# 372#
365CONFIG_PACKET=y 373CONFIG_PACKET=y
366# CONFIG_PACKET_MMAP is not set
367CONFIG_UNIX=y 374CONFIG_UNIX=y
368CONFIG_XFRM=y 375CONFIG_XFRM=y
369CONFIG_XFRM_USER=m 376CONFIG_XFRM_USER=m
@@ -454,7 +461,9 @@ CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
454# CONFIG_DEVTMPFS is not set 461# CONFIG_DEVTMPFS is not set
455CONFIG_STANDALONE=y 462CONFIG_STANDALONE=y
456CONFIG_PREVENT_FIRMWARE_BUILD=y 463CONFIG_PREVENT_FIRMWARE_BUILD=y
457# CONFIG_FW_LOADER is not set 464CONFIG_FW_LOADER=y
465CONFIG_FIRMWARE_IN_KERNEL=y
466CONFIG_EXTRA_FIRMWARE=""
458# CONFIG_DEBUG_DRIVER is not set 467# CONFIG_DEBUG_DRIVER is not set
459# CONFIG_DEBUG_DEVRES is not set 468# CONFIG_DEBUG_DEVRES is not set
460# CONFIG_SYS_HYPERVISOR is not set 469# CONFIG_SYS_HYPERVISOR is not set
@@ -554,6 +563,8 @@ CONFIG_MTD_UBI_BEB_RESERVE=1
554# UBI debugging options 563# UBI debugging options
555# 564#
556# CONFIG_MTD_UBI_DEBUG is not set 565# CONFIG_MTD_UBI_DEBUG is not set
566CONFIG_OF_FLATTREE=y
567CONFIG_OF_DYNAMIC=y
557CONFIG_OF_DEVICE=y 568CONFIG_OF_DEVICE=y
558CONFIG_OF_GPIO=y 569CONFIG_OF_GPIO=y
559CONFIG_OF_I2C=y 570CONFIG_OF_I2C=y
@@ -732,6 +743,7 @@ CONFIG_ATA_SFF=y
732# CONFIG_PATA_IT821X is not set 743# CONFIG_PATA_IT821X is not set
733# CONFIG_PATA_IT8213 is not set 744# CONFIG_PATA_IT8213 is not set
734# CONFIG_PATA_JMICRON is not set 745# CONFIG_PATA_JMICRON is not set
746# CONFIG_PATA_LEGACY is not set
735# CONFIG_PATA_TRIFLEX is not set 747# CONFIG_PATA_TRIFLEX is not set
736# CONFIG_PATA_MARVELL is not set 748# CONFIG_PATA_MARVELL is not set
737CONFIG_PATA_MPC52xx=y 749CONFIG_PATA_MPC52xx=y
@@ -770,7 +782,7 @@ CONFIG_PATA_PLATFORM=y
770# 782#
771 783
772# 784#
773# See the help texts for more information. 785# The newer stack is recommended.
774# 786#
775# CONFIG_FIREWIRE is not set 787# CONFIG_FIREWIRE is not set
776# CONFIG_IEEE1394 is not set 788# CONFIG_IEEE1394 is not set
@@ -929,6 +941,7 @@ CONFIG_SERIAL_MPC52xx=y
929CONFIG_SERIAL_MPC52xx_CONSOLE=y 941CONFIG_SERIAL_MPC52xx_CONSOLE=y
930CONFIG_SERIAL_MPC52xx_CONSOLE_BAUD=115200 942CONFIG_SERIAL_MPC52xx_CONSOLE_BAUD=115200
931# CONFIG_SERIAL_JSM is not set 943# CONFIG_SERIAL_JSM is not set
944# CONFIG_SERIAL_TIMBERDALE is not set
932# CONFIG_SERIAL_GRLIB_GAISLER_APBUART is not set 945# CONFIG_SERIAL_GRLIB_GAISLER_APBUART is not set
933CONFIG_UNIX98_PTYS=y 946CONFIG_UNIX98_PTYS=y
934# CONFIG_DEVPTS_MULTIPLE_INSTANCES is not set 947# CONFIG_DEVPTS_MULTIPLE_INSTANCES is not set
@@ -981,6 +994,7 @@ CONFIG_I2C_ALGOBIT=y
981CONFIG_I2C_MPC=y 994CONFIG_I2C_MPC=y
982# CONFIG_I2C_OCORES is not set 995# CONFIG_I2C_OCORES is not set
983# CONFIG_I2C_SIMTEC is not set 996# CONFIG_I2C_SIMTEC is not set
997# CONFIG_I2C_XILINX is not set
984 998
985# 999#
986# External I2C/SMBus adapter drivers 1000# External I2C/SMBus adapter drivers
@@ -1010,9 +1024,9 @@ CONFIG_SPI_MASTER=y
1010# 1024#
1011# SPI Master Controller Drivers 1025# SPI Master Controller Drivers
1012# 1026#
1013# CONFIG_SPI_BITBANG is not set 1027CONFIG_SPI_BITBANG=m
1014# CONFIG_SPI_GPIO is not set 1028CONFIG_SPI_GPIO=m
1015# CONFIG_SPI_MPC52xx is not set 1029CONFIG_SPI_MPC52xx=m
1016CONFIG_SPI_MPC52xx_PSC=m 1030CONFIG_SPI_MPC52xx_PSC=m
1017# CONFIG_SPI_XILINX is not set 1031# CONFIG_SPI_XILINX is not set
1018# CONFIG_SPI_DESIGNWARE is not set 1032# CONFIG_SPI_DESIGNWARE is not set
@@ -1036,14 +1050,18 @@ CONFIG_GPIOLIB=y
1036# 1050#
1037# Memory mapped GPIO expanders: 1051# Memory mapped GPIO expanders:
1038# 1052#
1053# CONFIG_GPIO_IT8761E is not set
1039# CONFIG_GPIO_XILINX is not set 1054# CONFIG_GPIO_XILINX is not set
1055# CONFIG_GPIO_SCH is not set
1040 1056
1041# 1057#
1042# I2C GPIO expanders: 1058# I2C GPIO expanders:
1043# 1059#
1060# CONFIG_GPIO_MAX7300 is not set
1044# CONFIG_GPIO_MAX732X is not set 1061# CONFIG_GPIO_MAX732X is not set
1045# CONFIG_GPIO_PCA953X is not set 1062# CONFIG_GPIO_PCA953X is not set
1046# CONFIG_GPIO_PCF857X is not set 1063# CONFIG_GPIO_PCF857X is not set
1064# CONFIG_GPIO_ADP5588 is not set
1047 1065
1048# 1066#
1049# PCI GPIO expanders: 1067# PCI GPIO expanders:
@@ -1080,10 +1098,11 @@ CONFIG_HWMON=y
1080# CONFIG_SENSORS_ADM1029 is not set 1098# CONFIG_SENSORS_ADM1029 is not set
1081# CONFIG_SENSORS_ADM1031 is not set 1099# CONFIG_SENSORS_ADM1031 is not set
1082# CONFIG_SENSORS_ADM9240 is not set 1100# CONFIG_SENSORS_ADM9240 is not set
1101# CONFIG_SENSORS_ADT7411 is not set
1083# CONFIG_SENSORS_ADT7462 is not set 1102# CONFIG_SENSORS_ADT7462 is not set
1084# CONFIG_SENSORS_ADT7470 is not set 1103# CONFIG_SENSORS_ADT7470 is not set
1085# CONFIG_SENSORS_ADT7473 is not set
1086# CONFIG_SENSORS_ADT7475 is not set 1104# CONFIG_SENSORS_ADT7475 is not set
1105# CONFIG_SENSORS_ASC7621 is not set
1087# CONFIG_SENSORS_ATXP1 is not set 1106# CONFIG_SENSORS_ATXP1 is not set
1088# CONFIG_SENSORS_DS1621 is not set 1107# CONFIG_SENSORS_DS1621 is not set
1089# CONFIG_SENSORS_I5K_AMB is not set 1108# CONFIG_SENSORS_I5K_AMB is not set
@@ -1123,6 +1142,7 @@ CONFIG_HWMON=y
1123# CONFIG_SENSORS_SMSC47M192 is not set 1142# CONFIG_SENSORS_SMSC47M192 is not set
1124# CONFIG_SENSORS_SMSC47B397 is not set 1143# CONFIG_SENSORS_SMSC47B397 is not set
1125# CONFIG_SENSORS_ADS7828 is not set 1144# CONFIG_SENSORS_ADS7828 is not set
1145# CONFIG_SENSORS_AMC6821 is not set
1126# CONFIG_SENSORS_THMC50 is not set 1146# CONFIG_SENSORS_THMC50 is not set
1127# CONFIG_SENSORS_TMP401 is not set 1147# CONFIG_SENSORS_TMP401 is not set
1128# CONFIG_SENSORS_TMP421 is not set 1148# CONFIG_SENSORS_TMP421 is not set
@@ -1147,6 +1167,7 @@ CONFIG_WATCHDOG=y
1147# Watchdog Device Drivers 1167# Watchdog Device Drivers
1148# 1168#
1149# CONFIG_SOFT_WATCHDOG is not set 1169# CONFIG_SOFT_WATCHDOG is not set
1170# CONFIG_MAX63XX_WATCHDOG is not set
1150# CONFIG_ALIM7101_WDT is not set 1171# CONFIG_ALIM7101_WDT is not set
1151# CONFIG_MPC5200_WDT is not set 1172# CONFIG_MPC5200_WDT is not set
1152# CONFIG_WATCHDOG_RTAS is not set 1173# CONFIG_WATCHDOG_RTAS is not set
@@ -1172,22 +1193,27 @@ CONFIG_SSB_POSSIBLE=y
1172# Multifunction device drivers 1193# Multifunction device drivers
1173# 1194#
1174# CONFIG_MFD_CORE is not set 1195# CONFIG_MFD_CORE is not set
1196# CONFIG_MFD_88PM860X is not set
1175# CONFIG_MFD_SM501 is not set 1197# CONFIG_MFD_SM501 is not set
1176# CONFIG_HTC_PASIC3 is not set 1198# CONFIG_HTC_PASIC3 is not set
1199# CONFIG_HTC_I2CPLD is not set
1177# CONFIG_TPS65010 is not set 1200# CONFIG_TPS65010 is not set
1178# CONFIG_TWL4030_CORE is not set 1201# CONFIG_TWL4030_CORE is not set
1179# CONFIG_MFD_TMIO is not set 1202# CONFIG_MFD_TMIO is not set
1180# CONFIG_PMIC_DA903X is not set 1203# CONFIG_PMIC_DA903X is not set
1181# CONFIG_PMIC_ADP5520 is not set 1204# CONFIG_PMIC_ADP5520 is not set
1205# CONFIG_MFD_MAX8925 is not set
1182# CONFIG_MFD_WM8400 is not set 1206# CONFIG_MFD_WM8400 is not set
1183# CONFIG_MFD_WM831X is not set 1207# CONFIG_MFD_WM831X is not set
1184# CONFIG_MFD_WM8350_I2C is not set 1208# CONFIG_MFD_WM8350_I2C is not set
1209# CONFIG_MFD_WM8994 is not set
1185# CONFIG_MFD_PCF50633 is not set 1210# CONFIG_MFD_PCF50633 is not set
1186# CONFIG_MFD_MC13783 is not set 1211# CONFIG_MFD_MC13783 is not set
1187# CONFIG_AB3100_CORE is not set 1212# CONFIG_AB3100_CORE is not set
1188# CONFIG_EZX_PCAP is not set 1213# CONFIG_EZX_PCAP is not set
1189# CONFIG_MFD_88PM8607 is not set
1190# CONFIG_AB4500_CORE is not set 1214# CONFIG_AB4500_CORE is not set
1215# CONFIG_MFD_TIMBERDALE is not set
1216# CONFIG_LPC_SCH is not set
1191# CONFIG_REGULATOR is not set 1217# CONFIG_REGULATOR is not set
1192# CONFIG_MEDIA_SUPPORT is not set 1218# CONFIG_MEDIA_SUPPORT is not set
1193 1219
@@ -1196,6 +1222,7 @@ CONFIG_SSB_POSSIBLE=y
1196# 1222#
1197# CONFIG_AGP is not set 1223# CONFIG_AGP is not set
1198CONFIG_VGA_ARB=y 1224CONFIG_VGA_ARB=y
1225CONFIG_VGA_ARB_MAX_GPUS=16
1199CONFIG_DRM=y 1226CONFIG_DRM=y
1200# CONFIG_DRM_TDFX is not set 1227# CONFIG_DRM_TDFX is not set
1201# CONFIG_DRM_R128 is not set 1228# CONFIG_DRM_R128 is not set
@@ -1309,32 +1336,46 @@ CONFIG_USB_HID=y
1309# 1336#
1310# Special HID drivers 1337# Special HID drivers
1311# 1338#
1339# CONFIG_HID_3M_PCT is not set
1312CONFIG_HID_A4TECH=y 1340CONFIG_HID_A4TECH=y
1313# CONFIG_HID_APPLE is not set 1341CONFIG_HID_APPLE=y
1314CONFIG_HID_BELKIN=y 1342CONFIG_HID_BELKIN=y
1315CONFIG_HID_CHERRY=y 1343CONFIG_HID_CHERRY=y
1316# CONFIG_HID_CHICONY is not set 1344CONFIG_HID_CHICONY=y
1317CONFIG_HID_CYPRESS=y 1345CONFIG_HID_CYPRESS=y
1318# CONFIG_HID_DRAGONRISE is not set 1346CONFIG_HID_DRAGONRISE=y
1347# CONFIG_DRAGONRISE_FF is not set
1319CONFIG_HID_EZKEY=y 1348CONFIG_HID_EZKEY=y
1320# CONFIG_HID_KYE is not set 1349CONFIG_HID_KYE=y
1321# CONFIG_HID_GYRATION is not set 1350CONFIG_HID_GYRATION=y
1322# CONFIG_HID_TWINHAN is not set 1351CONFIG_HID_TWINHAN=y
1323# CONFIG_HID_KENSINGTON is not set 1352CONFIG_HID_KENSINGTON=y
1324# CONFIG_HID_LOGITECH is not set 1353CONFIG_HID_LOGITECH=y
1325# CONFIG_HID_MICROSOFT is not set 1354# CONFIG_LOGITECH_FF is not set
1326# CONFIG_HID_MONTEREY is not set 1355# CONFIG_LOGIRUMBLEPAD2_FF is not set
1356# CONFIG_LOGIG940_FF is not set
1357CONFIG_HID_MICROSOFT=y
1358# CONFIG_HID_MOSART is not set
1359CONFIG_HID_MONTEREY=y
1327# CONFIG_HID_NTRIG is not set 1360# CONFIG_HID_NTRIG is not set
1328# CONFIG_HID_PANTHERLORD is not set 1361CONFIG_HID_ORTEK=y
1329# CONFIG_HID_PETALYNX is not set 1362CONFIG_HID_PANTHERLORD=y
1330# CONFIG_HID_SAMSUNG is not set 1363# CONFIG_PANTHERLORD_FF is not set
1331# CONFIG_HID_SONY is not set 1364CONFIG_HID_PETALYNX=y
1332# CONFIG_HID_SUNPLUS is not set 1365# CONFIG_HID_QUANTA is not set
1333# CONFIG_HID_GREENASIA is not set 1366CONFIG_HID_SAMSUNG=y
1334# CONFIG_HID_SMARTJOYPLUS is not set 1367CONFIG_HID_SONY=y
1335# CONFIG_HID_TOPSEED is not set 1368# CONFIG_HID_STANTUM is not set
1336# CONFIG_HID_THRUSTMASTER is not set 1369CONFIG_HID_SUNPLUS=y
1337# CONFIG_HID_ZEROPLUS is not set 1370CONFIG_HID_GREENASIA=y
1371# CONFIG_GREENASIA_FF is not set
1372CONFIG_HID_SMARTJOYPLUS=y
1373# CONFIG_SMARTJOYPLUS_FF is not set
1374CONFIG_HID_TOPSEED=y
1375CONFIG_HID_THRUSTMASTER=y
1376# CONFIG_THRUSTMASTER_FF is not set
1377CONFIG_HID_ZEROPLUS=y
1378# CONFIG_ZEROPLUS_FF is not set
1338CONFIG_USB_SUPPORT=y 1379CONFIG_USB_SUPPORT=y
1339CONFIG_USB_ARCH_HAS_HCD=y 1380CONFIG_USB_ARCH_HAS_HCD=y
1340CONFIG_USB_ARCH_HAS_OHCI=y 1381CONFIG_USB_ARCH_HAS_OHCI=y
@@ -1349,10 +1390,7 @@ CONFIG_USB=y
1349CONFIG_USB_DEVICEFS=y 1390CONFIG_USB_DEVICEFS=y
1350# CONFIG_USB_DEVICE_CLASS is not set 1391# CONFIG_USB_DEVICE_CLASS is not set
1351# CONFIG_USB_DYNAMIC_MINORS is not set 1392# CONFIG_USB_DYNAMIC_MINORS is not set
1352# CONFIG_USB_SUSPEND is not set
1353# CONFIG_USB_OTG is not set 1393# CONFIG_USB_OTG is not set
1354# CONFIG_USB_OTG_WHITELIST is not set
1355# CONFIG_USB_OTG_BLACKLIST_HUB is not set
1356CONFIG_USB_MON=y 1394CONFIG_USB_MON=y
1357# CONFIG_USB_WUSB is not set 1395# CONFIG_USB_WUSB is not set
1358# CONFIG_USB_WUSB_CBAF is not set 1396# CONFIG_USB_WUSB_CBAF is not set
@@ -1433,7 +1471,6 @@ CONFIG_USB_STORAGE=y
1433# CONFIG_USB_RIO500 is not set 1471# CONFIG_USB_RIO500 is not set
1434# CONFIG_USB_LEGOTOWER is not set 1472# CONFIG_USB_LEGOTOWER is not set
1435# CONFIG_USB_LCD is not set 1473# CONFIG_USB_LCD is not set
1436# CONFIG_USB_BERRY_CHARGE is not set
1437# CONFIG_USB_LED is not set 1474# CONFIG_USB_LED is not set
1438# CONFIG_USB_CYPRESS_CY7C63 is not set 1475# CONFIG_USB_CYPRESS_CY7C63 is not set
1439# CONFIG_USB_CYTHERM is not set 1476# CONFIG_USB_CYTHERM is not set
@@ -1445,7 +1482,6 @@ CONFIG_USB_STORAGE=y
1445# CONFIG_USB_IOWARRIOR is not set 1482# CONFIG_USB_IOWARRIOR is not set
1446# CONFIG_USB_TEST is not set 1483# CONFIG_USB_TEST is not set
1447# CONFIG_USB_ISIGHTFW is not set 1484# CONFIG_USB_ISIGHTFW is not set
1448# CONFIG_USB_VST is not set
1449# CONFIG_USB_GADGET is not set 1485# CONFIG_USB_GADGET is not set
1450 1486
1451# 1487#
@@ -1636,6 +1672,7 @@ CONFIG_UBIFS_FS=m
1636CONFIG_UBIFS_FS_LZO=y 1672CONFIG_UBIFS_FS_LZO=y
1637CONFIG_UBIFS_FS_ZLIB=y 1673CONFIG_UBIFS_FS_ZLIB=y
1638# CONFIG_UBIFS_FS_DEBUG is not set 1674# CONFIG_UBIFS_FS_DEBUG is not set
1675# CONFIG_LOGFS is not set
1639CONFIG_CRAMFS=y 1676CONFIG_CRAMFS=y
1640# CONFIG_SQUASHFS is not set 1677# CONFIG_SQUASHFS is not set
1641# CONFIG_VXFS_FS is not set 1678# CONFIG_VXFS_FS is not set
@@ -1730,8 +1767,11 @@ CONFIG_CRC32=y
1730CONFIG_ZLIB_INFLATE=y 1767CONFIG_ZLIB_INFLATE=y
1731CONFIG_ZLIB_DEFLATE=y 1768CONFIG_ZLIB_DEFLATE=y
1732CONFIG_LZO_COMPRESS=m 1769CONFIG_LZO_COMPRESS=m
1733CONFIG_LZO_DECOMPRESS=m 1770CONFIG_LZO_DECOMPRESS=y
1734CONFIG_DECOMPRESS_GZIP=y 1771CONFIG_DECOMPRESS_GZIP=y
1772CONFIG_DECOMPRESS_BZIP2=y
1773CONFIG_DECOMPRESS_LZMA=y
1774CONFIG_DECOMPRESS_LZO=y
1735CONFIG_HAS_IOMEM=y 1775CONFIG_HAS_IOMEM=y
1736CONFIG_HAS_IOPORT=y 1776CONFIG_HAS_IOPORT=y
1737CONFIG_HAS_DMA=y 1777CONFIG_HAS_DMA=y
@@ -1776,11 +1816,11 @@ CONFIG_SCHED_DEBUG=y
1776# CONFIG_DEBUG_SPINLOCK_SLEEP is not set 1816# CONFIG_DEBUG_SPINLOCK_SLEEP is not set
1777# CONFIG_DEBUG_LOCKING_API_SELFTESTS is not set 1817# CONFIG_DEBUG_LOCKING_API_SELFTESTS is not set
1778# CONFIG_DEBUG_KOBJECT is not set 1818# CONFIG_DEBUG_KOBJECT is not set
1779# CONFIG_DEBUG_BUGVERBOSE is not set 1819CONFIG_DEBUG_BUGVERBOSE=y
1780CONFIG_DEBUG_INFO=y 1820CONFIG_DEBUG_INFO=y
1781# CONFIG_DEBUG_VM is not set 1821# CONFIG_DEBUG_VM is not set
1782# CONFIG_DEBUG_WRITECOUNT is not set 1822# CONFIG_DEBUG_WRITECOUNT is not set
1783# CONFIG_DEBUG_MEMORY_INIT is not set 1823CONFIG_DEBUG_MEMORY_INIT=y
1784# CONFIG_DEBUG_LIST is not set 1824# CONFIG_DEBUG_LIST is not set
1785# CONFIG_DEBUG_SG is not set 1825# CONFIG_DEBUG_SG is not set
1786# CONFIG_DEBUG_NOTIFIERS is not set 1826# CONFIG_DEBUG_NOTIFIERS is not set
diff --git a/arch/powerpc/include/asm/ppc-opcode.h b/arch/powerpc/include/asm/ppc-opcode.h
index aea714797590..d553bbeb726c 100644
--- a/arch/powerpc/include/asm/ppc-opcode.h
+++ b/arch/powerpc/include/asm/ppc-opcode.h
@@ -25,7 +25,7 @@
25#define PPC_INST_LDARX 0x7c0000a8 25#define PPC_INST_LDARX 0x7c0000a8
26#define PPC_INST_LSWI 0x7c0004aa 26#define PPC_INST_LSWI 0x7c0004aa
27#define PPC_INST_LSWX 0x7c00042a 27#define PPC_INST_LSWX 0x7c00042a
28#define PPC_INST_LWARX 0x7c000029 28#define PPC_INST_LWARX 0x7c000028
29#define PPC_INST_LWSYNC 0x7c2004ac 29#define PPC_INST_LWSYNC 0x7c2004ac
30#define PPC_INST_LXVD2X 0x7c000698 30#define PPC_INST_LXVD2X 0x7c000698
31#define PPC_INST_MCRXR 0x7c000400 31#define PPC_INST_MCRXR 0x7c000400
@@ -62,8 +62,8 @@
62#define __PPC_T_TLB(t) (((t) & 0x3) << 21) 62#define __PPC_T_TLB(t) (((t) & 0x3) << 21)
63#define __PPC_WC(w) (((w) & 0x3) << 21) 63#define __PPC_WC(w) (((w) & 0x3) << 21)
64/* 64/*
65 * Only use the larx hint bit on 64bit CPUs. Once we verify it doesn't have 65 * Only use the larx hint bit on 64bit CPUs. e500v1/v2 based CPUs will treat a
66 * any side effects on all 32bit processors, we can do this all the time. 66 * larx with EH set as an illegal instruction.
67 */ 67 */
68#ifdef CONFIG_PPC64 68#ifdef CONFIG_PPC64
69#define __PPC_EH(eh) (((eh) & 0x1) << 0) 69#define __PPC_EH(eh) (((eh) & 0x1) << 0)
diff --git a/arch/powerpc/include/asm/syscall.h b/arch/powerpc/include/asm/syscall.h
index efa7f0b879f3..23913e902fc3 100644
--- a/arch/powerpc/include/asm/syscall.h
+++ b/arch/powerpc/include/asm/syscall.h
@@ -30,7 +30,7 @@ static inline void syscall_rollback(struct task_struct *task,
30static inline long syscall_get_error(struct task_struct *task, 30static inline long syscall_get_error(struct task_struct *task,
31 struct pt_regs *regs) 31 struct pt_regs *regs)
32{ 32{
33 return (regs->ccr & 0x1000) ? -regs->gpr[3] : 0; 33 return (regs->ccr & 0x10000000) ? -regs->gpr[3] : 0;
34} 34}
35 35
36static inline long syscall_get_return_value(struct task_struct *task, 36static inline long syscall_get_return_value(struct task_struct *task,
@@ -44,10 +44,10 @@ static inline void syscall_set_return_value(struct task_struct *task,
44 int error, long val) 44 int error, long val)
45{ 45{
46 if (error) { 46 if (error) {
47 regs->ccr |= 0x1000L; 47 regs->ccr |= 0x10000000L;
48 regs->gpr[3] = -error; 48 regs->gpr[3] = -error;
49 } else { 49 } else {
50 regs->ccr &= ~0x1000L; 50 regs->ccr &= ~0x10000000L;
51 regs->gpr[3] = val; 51 regs->gpr[3] = val;
52 } 52 }
53} 53}
diff --git a/arch/powerpc/kernel/head_fsl_booke.S b/arch/powerpc/kernel/head_fsl_booke.S
index 25793bb0e782..725526547994 100644
--- a/arch/powerpc/kernel/head_fsl_booke.S
+++ b/arch/powerpc/kernel/head_fsl_booke.S
@@ -747,9 +747,6 @@ finish_tlb_load:
747#else 747#else
748 rlwimi r12, r11, 26, 27, 31 /* extract WIMGE from pte */ 748 rlwimi r12, r11, 26, 27, 31 /* extract WIMGE from pte */
749#endif 749#endif
750#ifdef CONFIG_SMP
751 ori r12, r12, MAS2_M
752#endif
753 mtspr SPRN_MAS2, r12 750 mtspr SPRN_MAS2, r12
754 751
755#ifdef CONFIG_PTE_64BIT 752#ifdef CONFIG_PTE_64BIT
@@ -887,13 +884,17 @@ KernelSPE:
887 lwz r3,_MSR(r1) 884 lwz r3,_MSR(r1)
888 oris r3,r3,MSR_SPE@h 885 oris r3,r3,MSR_SPE@h
889 stw r3,_MSR(r1) /* enable use of SPE after return */ 886 stw r3,_MSR(r1) /* enable use of SPE after return */
887#ifdef CONFIG_PRINTK
890 lis r3,87f@h 888 lis r3,87f@h
891 ori r3,r3,87f@l 889 ori r3,r3,87f@l
892 mr r4,r2 /* current */ 890 mr r4,r2 /* current */
893 lwz r5,_NIP(r1) 891 lwz r5,_NIP(r1)
894 bl printk 892 bl printk
893#endif
895 b ret_from_except 894 b ret_from_except
895#ifdef CONFIG_PRINTK
89687: .string "SPE used in kernel (task=%p, pc=%x) \n" 89687: .string "SPE used in kernel (task=%p, pc=%x) \n"
897#endif
897 .align 4,0 898 .align 4,0
898 899
899#endif /* CONFIG_SPE */ 900#endif /* CONFIG_SPE */
diff --git a/arch/powerpc/kernel/iommu.c b/arch/powerpc/kernel/iommu.c
index 5547ae6e6b0b..ec94f906ea43 100644
--- a/arch/powerpc/kernel/iommu.c
+++ b/arch/powerpc/kernel/iommu.c
@@ -42,12 +42,7 @@
42 42
43#define DBG(...) 43#define DBG(...)
44 44
45#ifdef CONFIG_IOMMU_VMERGE 45static int novmerge;
46static int novmerge = 0;
47#else
48static int novmerge = 1;
49#endif
50
51static int protect4gb = 1; 46static int protect4gb = 1;
52 47
53static void __iommu_free(struct iommu_table *, dma_addr_t, unsigned int); 48static void __iommu_free(struct iommu_table *, dma_addr_t, unsigned int);
diff --git a/arch/powerpc/kernel/perf_event.c b/arch/powerpc/kernel/perf_event.c
index 5120bd44f69a..08460a2e9f41 100644
--- a/arch/powerpc/kernel/perf_event.c
+++ b/arch/powerpc/kernel/perf_event.c
@@ -1287,7 +1287,7 @@ static void perf_event_interrupt(struct pt_regs *regs)
1287 irq_exit(); 1287 irq_exit();
1288} 1288}
1289 1289
1290void hw_perf_event_setup(int cpu) 1290static void power_pmu_setup(int cpu)
1291{ 1291{
1292 struct cpu_hw_events *cpuhw = &per_cpu(cpu_hw_events, cpu); 1292 struct cpu_hw_events *cpuhw = &per_cpu(cpu_hw_events, cpu);
1293 1293
@@ -1297,6 +1297,23 @@ void hw_perf_event_setup(int cpu)
1297 cpuhw->mmcr[0] = MMCR0_FC; 1297 cpuhw->mmcr[0] = MMCR0_FC;
1298} 1298}
1299 1299
1300static int __cpuinit
1301power_pmu_notifier(struct notifier_block *self, unsigned long action, void *hcpu)
1302{
1303 unsigned int cpu = (long)hcpu;
1304
1305 switch (action & ~CPU_TASKS_FROZEN) {
1306 case CPU_UP_PREPARE:
1307 power_pmu_setup(cpu);
1308 break;
1309
1310 default:
1311 break;
1312 }
1313
1314 return NOTIFY_OK;
1315}
1316
1300int register_power_pmu(struct power_pmu *pmu) 1317int register_power_pmu(struct power_pmu *pmu)
1301{ 1318{
1302 if (ppmu) 1319 if (ppmu)
@@ -1314,5 +1331,7 @@ int register_power_pmu(struct power_pmu *pmu)
1314 freeze_events_kernel = MMCR0_FCHV; 1331 freeze_events_kernel = MMCR0_FCHV;
1315#endif /* CONFIG_PPC64 */ 1332#endif /* CONFIG_PPC64 */
1316 1333
1334 perf_cpu_notifier(power_pmu_notifier);
1335
1317 return 0; 1336 return 0;
1318} 1337}
diff --git a/arch/powerpc/kernel/setup_32.c b/arch/powerpc/kernel/setup_32.c
index b152de3e64d4..8f58986c2ad9 100644
--- a/arch/powerpc/kernel/setup_32.c
+++ b/arch/powerpc/kernel/setup_32.c
@@ -39,7 +39,6 @@
39#include <asm/serial.h> 39#include <asm/serial.h>
40#include <asm/udbg.h> 40#include <asm/udbg.h>
41#include <asm/mmu_context.h> 41#include <asm/mmu_context.h>
42#include <asm/swiotlb.h>
43 42
44#include "setup.h" 43#include "setup.h"
45 44
@@ -343,11 +342,6 @@ void __init setup_arch(char **cmdline_p)
343 ppc_md.setup_arch(); 342 ppc_md.setup_arch();
344 if ( ppc_md.progress ) ppc_md.progress("arch: exit", 0x3eab); 343 if ( ppc_md.progress ) ppc_md.progress("arch: exit", 0x3eab);
345 344
346#ifdef CONFIG_SWIOTLB
347 if (ppc_swiotlb_enable)
348 swiotlb_init(1);
349#endif
350
351 paging_init(); 345 paging_init();
352 346
353 /* Initialize the MMU context management stuff */ 347 /* Initialize the MMU context management stuff */
diff --git a/arch/powerpc/kernel/setup_64.c b/arch/powerpc/kernel/setup_64.c
index 63547394048c..914389158a9b 100644
--- a/arch/powerpc/kernel/setup_64.c
+++ b/arch/powerpc/kernel/setup_64.c
@@ -61,7 +61,6 @@
61#include <asm/xmon.h> 61#include <asm/xmon.h>
62#include <asm/udbg.h> 62#include <asm/udbg.h>
63#include <asm/kexec.h> 63#include <asm/kexec.h>
64#include <asm/swiotlb.h>
65#include <asm/mmu_context.h> 64#include <asm/mmu_context.h>
66 65
67#include "setup.h" 66#include "setup.h"
@@ -541,11 +540,6 @@ void __init setup_arch(char **cmdline_p)
541 if (ppc_md.setup_arch) 540 if (ppc_md.setup_arch)
542 ppc_md.setup_arch(); 541 ppc_md.setup_arch();
543 542
544#ifdef CONFIG_SWIOTLB
545 if (ppc_swiotlb_enable)
546 swiotlb_init(1);
547#endif
548
549 paging_init(); 543 paging_init();
550 544
551 /* Initialize the MMU context management stuff */ 545 /* Initialize the MMU context management stuff */
diff --git a/arch/powerpc/mm/mem.c b/arch/powerpc/mm/mem.c
index 311224cdb7ad..448f972b22f5 100644
--- a/arch/powerpc/mm/mem.c
+++ b/arch/powerpc/mm/mem.c
@@ -48,6 +48,7 @@
48#include <asm/sparsemem.h> 48#include <asm/sparsemem.h>
49#include <asm/vdso.h> 49#include <asm/vdso.h>
50#include <asm/fixmap.h> 50#include <asm/fixmap.h>
51#include <asm/swiotlb.h>
51 52
52#include "mmu_decl.h" 53#include "mmu_decl.h"
53 54
@@ -320,6 +321,11 @@ void __init mem_init(void)
320 struct page *page; 321 struct page *page;
321 unsigned long reservedpages = 0, codesize, initsize, datasize, bsssize; 322 unsigned long reservedpages = 0, codesize, initsize, datasize, bsssize;
322 323
324#ifdef CONFIG_SWIOTLB
325 if (ppc_swiotlb_enable)
326 swiotlb_init(1);
327#endif
328
323 num_physpages = lmb.memory.size >> PAGE_SHIFT; 329 num_physpages = lmb.memory.size >> PAGE_SHIFT;
324 high_memory = (void *) __va(max_low_pfn * PAGE_SIZE); 330 high_memory = (void *) __va(max_low_pfn * PAGE_SIZE);
325 331
diff --git a/arch/powerpc/platforms/52xx/mpc52xx_gpt.c b/arch/powerpc/platforms/52xx/mpc52xx_gpt.c
index 072b948b2e2d..5d7cc88dae6b 100644
--- a/arch/powerpc/platforms/52xx/mpc52xx_gpt.c
+++ b/arch/powerpc/platforms/52xx/mpc52xx_gpt.c
@@ -711,7 +711,11 @@ static int __devinit mpc52xx_gpt_wdt_init(void)
711 return 0; 711 return 0;
712} 712}
713 713
714#define mpc52xx_gpt_wdt_setup(x, y) (0) 714static inline int mpc52xx_gpt_wdt_setup(struct mpc52xx_gpt_priv *gpt,
715 const u32 *period)
716{
717 return 0;
718}
715 719
716#endif /* CONFIG_MPC5200_WDT */ 720#endif /* CONFIG_MPC5200_WDT */
717 721
diff --git a/arch/sh/include/asm/mmu.h b/arch/sh/include/asm/mmu.h
index 19fe84550b49..56e4418c19b9 100644
--- a/arch/sh/include/asm/mmu.h
+++ b/arch/sh/include/asm/mmu.h
@@ -66,6 +66,13 @@ int pmb_unmap(void __iomem *addr);
66 66
67#else 67#else
68 68
69static inline int
70pmb_bolt_mapping(unsigned long virt, phys_addr_t phys,
71 unsigned long size, pgprot_t prot)
72{
73 return -EINVAL;
74}
75
69static inline void __iomem * 76static inline void __iomem *
70pmb_remap_caller(phys_addr_t phys, unsigned long size, 77pmb_remap_caller(phys_addr_t phys, unsigned long size,
71 pgprot_t prot, void *caller) 78 pgprot_t prot, void *caller)
diff --git a/arch/sh/kernel/perf_event.c b/arch/sh/kernel/perf_event.c
index 7ff0943e7a08..9f253e9cce01 100644
--- a/arch/sh/kernel/perf_event.c
+++ b/arch/sh/kernel/perf_event.c
@@ -275,13 +275,30 @@ const struct pmu *hw_perf_event_init(struct perf_event *event)
275 return &pmu; 275 return &pmu;
276} 276}
277 277
278void hw_perf_event_setup(int cpu) 278static void sh_pmu_setup(int cpu)
279{ 279{
280 struct cpu_hw_events *cpuhw = &per_cpu(cpu_hw_events, cpu); 280 struct cpu_hw_events *cpuhw = &per_cpu(cpu_hw_events, cpu);
281 281
282 memset(cpuhw, 0, sizeof(struct cpu_hw_events)); 282 memset(cpuhw, 0, sizeof(struct cpu_hw_events));
283} 283}
284 284
285static int __cpuinit
286sh_pmu_notifier(struct notifier_block *self, unsigned long action, void *hcpu)
287{
288 unsigned int cpu = (long)hcpu;
289
290 switch (action & ~CPU_TASKS_FROZEN) {
291 case CPU_UP_PREPARE:
292 sh_pmu_setup(cpu);
293 break;
294
295 default:
296 break;
297 }
298
299 return NOTIFY_OK;
300}
301
285void hw_perf_enable(void) 302void hw_perf_enable(void)
286{ 303{
287 if (!sh_pmu_initialized()) 304 if (!sh_pmu_initialized())
@@ -308,5 +325,6 @@ int register_sh_pmu(struct sh_pmu *pmu)
308 325
309 WARN_ON(pmu->num_events > MAX_HWEVENTS); 326 WARN_ON(pmu->num_events > MAX_HWEVENTS);
310 327
328 perf_cpu_notifier(sh_pmu_notifier);
311 return 0; 329 return 0;
312} 330}
diff --git a/arch/sh/mm/uncached.c b/arch/sh/mm/uncached.c
index cf20a5c5136a..8a4eca551fc0 100644
--- a/arch/sh/mm/uncached.c
+++ b/arch/sh/mm/uncached.c
@@ -1,6 +1,8 @@
1#include <linux/init.h> 1#include <linux/init.h>
2#include <linux/module.h>
2#include <asm/sizes.h> 3#include <asm/sizes.h>
3#include <asm/page.h> 4#include <asm/page.h>
5#include <asm/addrspace.h>
4 6
5/* 7/*
6 * This is the offset of the uncached section from its cached alias. 8 * This is the offset of the uncached section from its cached alias.
@@ -15,15 +17,22 @@
15unsigned long cached_to_uncached = SZ_512M; 17unsigned long cached_to_uncached = SZ_512M;
16unsigned long uncached_size = SZ_512M; 18unsigned long uncached_size = SZ_512M;
17unsigned long uncached_start, uncached_end; 19unsigned long uncached_start, uncached_end;
20EXPORT_SYMBOL(uncached_start);
21EXPORT_SYMBOL(uncached_end);
18 22
19int virt_addr_uncached(unsigned long kaddr) 23int virt_addr_uncached(unsigned long kaddr)
20{ 24{
21 return (kaddr >= uncached_start) && (kaddr < uncached_end); 25 return (kaddr >= uncached_start) && (kaddr < uncached_end);
22} 26}
27EXPORT_SYMBOL(virt_addr_uncached);
23 28
24void __init uncached_init(void) 29void __init uncached_init(void)
25{ 30{
31#ifdef CONFIG_29BIT
32 uncached_start = P2SEG;
33#else
26 uncached_start = memory_end; 34 uncached_start = memory_end;
35#endif
27 uncached_end = uncached_start + uncached_size; 36 uncached_end = uncached_start + uncached_size;
28} 37}
29 38
diff --git a/arch/x86/kernel/cpu/perf_event.c b/arch/x86/kernel/cpu/perf_event.c
index 42aafd11e170..60398a0d947c 100644
--- a/arch/x86/kernel/cpu/perf_event.c
+++ b/arch/x86/kernel/cpu/perf_event.c
@@ -133,8 +133,8 @@ struct x86_pmu {
133 int (*handle_irq)(struct pt_regs *); 133 int (*handle_irq)(struct pt_regs *);
134 void (*disable_all)(void); 134 void (*disable_all)(void);
135 void (*enable_all)(void); 135 void (*enable_all)(void);
136 void (*enable)(struct hw_perf_event *, int); 136 void (*enable)(struct perf_event *);
137 void (*disable)(struct hw_perf_event *, int); 137 void (*disable)(struct perf_event *);
138 unsigned eventsel; 138 unsigned eventsel;
139 unsigned perfctr; 139 unsigned perfctr;
140 u64 (*event_map)(int); 140 u64 (*event_map)(int);
@@ -157,6 +157,11 @@ struct x86_pmu {
157 void (*put_event_constraints)(struct cpu_hw_events *cpuc, 157 void (*put_event_constraints)(struct cpu_hw_events *cpuc,
158 struct perf_event *event); 158 struct perf_event *event);
159 struct event_constraint *event_constraints; 159 struct event_constraint *event_constraints;
160
161 void (*cpu_prepare)(int cpu);
162 void (*cpu_starting)(int cpu);
163 void (*cpu_dying)(int cpu);
164 void (*cpu_dead)(int cpu);
160}; 165};
161 166
162static struct x86_pmu x86_pmu __read_mostly; 167static struct x86_pmu x86_pmu __read_mostly;
@@ -165,8 +170,7 @@ static DEFINE_PER_CPU(struct cpu_hw_events, cpu_hw_events) = {
165 .enabled = 1, 170 .enabled = 1,
166}; 171};
167 172
168static int x86_perf_event_set_period(struct perf_event *event, 173static int x86_perf_event_set_period(struct perf_event *event);
169 struct hw_perf_event *hwc, int idx);
170 174
171/* 175/*
172 * Generalized hw caching related hw_event table, filled 176 * Generalized hw caching related hw_event table, filled
@@ -189,11 +193,12 @@ static u64 __read_mostly hw_cache_event_ids
189 * Returns the delta events processed. 193 * Returns the delta events processed.
190 */ 194 */
191static u64 195static u64
192x86_perf_event_update(struct perf_event *event, 196x86_perf_event_update(struct perf_event *event)
193 struct hw_perf_event *hwc, int idx)
194{ 197{
198 struct hw_perf_event *hwc = &event->hw;
195 int shift = 64 - x86_pmu.event_bits; 199 int shift = 64 - x86_pmu.event_bits;
196 u64 prev_raw_count, new_raw_count; 200 u64 prev_raw_count, new_raw_count;
201 int idx = hwc->idx;
197 s64 delta; 202 s64 delta;
198 203
199 if (idx == X86_PMC_IDX_FIXED_BTS) 204 if (idx == X86_PMC_IDX_FIXED_BTS)
@@ -293,7 +298,7 @@ static inline bool bts_available(void)
293 return x86_pmu.enable_bts != NULL; 298 return x86_pmu.enable_bts != NULL;
294} 299}
295 300
296static inline void init_debug_store_on_cpu(int cpu) 301static void init_debug_store_on_cpu(int cpu)
297{ 302{
298 struct debug_store *ds = per_cpu(cpu_hw_events, cpu).ds; 303 struct debug_store *ds = per_cpu(cpu_hw_events, cpu).ds;
299 304
@@ -305,7 +310,7 @@ static inline void init_debug_store_on_cpu(int cpu)
305 (u32)((u64)(unsigned long)ds >> 32)); 310 (u32)((u64)(unsigned long)ds >> 32));
306} 311}
307 312
308static inline void fini_debug_store_on_cpu(int cpu) 313static void fini_debug_store_on_cpu(int cpu)
309{ 314{
310 if (!per_cpu(cpu_hw_events, cpu).ds) 315 if (!per_cpu(cpu_hw_events, cpu).ds)
311 return; 316 return;
@@ -638,7 +643,7 @@ static int x86_schedule_events(struct cpu_hw_events *cpuc, int n, int *assign)
638 if (test_bit(hwc->idx, used_mask)) 643 if (test_bit(hwc->idx, used_mask))
639 break; 644 break;
640 645
641 set_bit(hwc->idx, used_mask); 646 __set_bit(hwc->idx, used_mask);
642 if (assign) 647 if (assign)
643 assign[i] = hwc->idx; 648 assign[i] = hwc->idx;
644 } 649 }
@@ -687,7 +692,7 @@ static int x86_schedule_events(struct cpu_hw_events *cpuc, int n, int *assign)
687 if (j == X86_PMC_IDX_MAX) 692 if (j == X86_PMC_IDX_MAX)
688 break; 693 break;
689 694
690 set_bit(j, used_mask); 695 __set_bit(j, used_mask);
691 696
692 if (assign) 697 if (assign)
693 assign[i] = j; 698 assign[i] = j;
@@ -780,6 +785,7 @@ static inline int match_prev_assignment(struct hw_perf_event *hwc,
780 hwc->last_tag == cpuc->tags[i]; 785 hwc->last_tag == cpuc->tags[i];
781} 786}
782 787
788static int x86_pmu_start(struct perf_event *event);
783static void x86_pmu_stop(struct perf_event *event); 789static void x86_pmu_stop(struct perf_event *event);
784 790
785void hw_perf_enable(void) 791void hw_perf_enable(void)
@@ -796,6 +802,7 @@ void hw_perf_enable(void)
796 return; 802 return;
797 803
798 if (cpuc->n_added) { 804 if (cpuc->n_added) {
805 int n_running = cpuc->n_events - cpuc->n_added;
799 /* 806 /*
800 * apply assignment obtained either from 807 * apply assignment obtained either from
801 * hw_perf_group_sched_in() or x86_pmu_enable() 808 * hw_perf_group_sched_in() or x86_pmu_enable()
@@ -803,8 +810,7 @@ void hw_perf_enable(void)
803 * step1: save events moving to new counters 810 * step1: save events moving to new counters
804 * step2: reprogram moved events into new counters 811 * step2: reprogram moved events into new counters
805 */ 812 */
806 for (i = 0; i < cpuc->n_events; i++) { 813 for (i = 0; i < n_running; i++) {
807
808 event = cpuc->event_list[i]; 814 event = cpuc->event_list[i];
809 hwc = &event->hw; 815 hwc = &event->hw;
810 816
@@ -819,29 +825,18 @@ void hw_perf_enable(void)
819 continue; 825 continue;
820 826
821 x86_pmu_stop(event); 827 x86_pmu_stop(event);
822
823 hwc->idx = -1;
824 } 828 }
825 829
826 for (i = 0; i < cpuc->n_events; i++) { 830 for (i = 0; i < cpuc->n_events; i++) {
827
828 event = cpuc->event_list[i]; 831 event = cpuc->event_list[i];
829 hwc = &event->hw; 832 hwc = &event->hw;
830 833
831 if (hwc->idx == -1) { 834 if (!match_prev_assignment(hwc, cpuc, i))
832 x86_assign_hw_event(event, cpuc, i); 835 x86_assign_hw_event(event, cpuc, i);
833 x86_perf_event_set_period(event, hwc, hwc->idx); 836 else if (i < n_running)
834 } 837 continue;
835 /*
836 * need to mark as active because x86_pmu_disable()
837 * clear active_mask and events[] yet it preserves
838 * idx
839 */
840 set_bit(hwc->idx, cpuc->active_mask);
841 cpuc->events[hwc->idx] = event;
842 838
843 x86_pmu.enable(hwc, hwc->idx); 839 x86_pmu_start(event);
844 perf_event_update_userpage(event);
845 } 840 }
846 cpuc->n_added = 0; 841 cpuc->n_added = 0;
847 perf_events_lapic_init(); 842 perf_events_lapic_init();
@@ -853,15 +848,16 @@ void hw_perf_enable(void)
853 x86_pmu.enable_all(); 848 x86_pmu.enable_all();
854} 849}
855 850
856static inline void __x86_pmu_enable_event(struct hw_perf_event *hwc, int idx) 851static inline void __x86_pmu_enable_event(struct hw_perf_event *hwc)
857{ 852{
858 (void)checking_wrmsrl(hwc->config_base + idx, 853 (void)checking_wrmsrl(hwc->config_base + hwc->idx,
859 hwc->config | ARCH_PERFMON_EVENTSEL_ENABLE); 854 hwc->config | ARCH_PERFMON_EVENTSEL_ENABLE);
860} 855}
861 856
862static inline void x86_pmu_disable_event(struct hw_perf_event *hwc, int idx) 857static inline void x86_pmu_disable_event(struct perf_event *event)
863{ 858{
864 (void)checking_wrmsrl(hwc->config_base + idx, hwc->config); 859 struct hw_perf_event *hwc = &event->hw;
860 (void)checking_wrmsrl(hwc->config_base + hwc->idx, hwc->config);
865} 861}
866 862
867static DEFINE_PER_CPU(u64 [X86_PMC_IDX_MAX], pmc_prev_left); 863static DEFINE_PER_CPU(u64 [X86_PMC_IDX_MAX], pmc_prev_left);
@@ -871,12 +867,12 @@ static DEFINE_PER_CPU(u64 [X86_PMC_IDX_MAX], pmc_prev_left);
871 * To be called with the event disabled in hw: 867 * To be called with the event disabled in hw:
872 */ 868 */
873static int 869static int
874x86_perf_event_set_period(struct perf_event *event, 870x86_perf_event_set_period(struct perf_event *event)
875 struct hw_perf_event *hwc, int idx)
876{ 871{
872 struct hw_perf_event *hwc = &event->hw;
877 s64 left = atomic64_read(&hwc->period_left); 873 s64 left = atomic64_read(&hwc->period_left);
878 s64 period = hwc->sample_period; 874 s64 period = hwc->sample_period;
879 int err, ret = 0; 875 int err, ret = 0, idx = hwc->idx;
880 876
881 if (idx == X86_PMC_IDX_FIXED_BTS) 877 if (idx == X86_PMC_IDX_FIXED_BTS)
882 return 0; 878 return 0;
@@ -922,11 +918,11 @@ x86_perf_event_set_period(struct perf_event *event,
922 return ret; 918 return ret;
923} 919}
924 920
925static void x86_pmu_enable_event(struct hw_perf_event *hwc, int idx) 921static void x86_pmu_enable_event(struct perf_event *event)
926{ 922{
927 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); 923 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
928 if (cpuc->enabled) 924 if (cpuc->enabled)
929 __x86_pmu_enable_event(hwc, idx); 925 __x86_pmu_enable_event(&event->hw);
930} 926}
931 927
932/* 928/*
@@ -962,34 +958,32 @@ static int x86_pmu_enable(struct perf_event *event)
962 memcpy(cpuc->assign, assign, n*sizeof(int)); 958 memcpy(cpuc->assign, assign, n*sizeof(int));
963 959
964 cpuc->n_events = n; 960 cpuc->n_events = n;
965 cpuc->n_added = n - n0; 961 cpuc->n_added += n - n0;
966 962
967 return 0; 963 return 0;
968} 964}
969 965
970static int x86_pmu_start(struct perf_event *event) 966static int x86_pmu_start(struct perf_event *event)
971{ 967{
972 struct hw_perf_event *hwc = &event->hw; 968 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
969 int idx = event->hw.idx;
973 970
974 if (hwc->idx == -1) 971 if (idx == -1)
975 return -EAGAIN; 972 return -EAGAIN;
976 973
977 x86_perf_event_set_period(event, hwc, hwc->idx); 974 x86_perf_event_set_period(event);
978 x86_pmu.enable(hwc, hwc->idx); 975 cpuc->events[idx] = event;
976 __set_bit(idx, cpuc->active_mask);
977 x86_pmu.enable(event);
978 perf_event_update_userpage(event);
979 979
980 return 0; 980 return 0;
981} 981}
982 982
983static void x86_pmu_unthrottle(struct perf_event *event) 983static void x86_pmu_unthrottle(struct perf_event *event)
984{ 984{
985 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); 985 int ret = x86_pmu_start(event);
986 struct hw_perf_event *hwc = &event->hw; 986 WARN_ON_ONCE(ret);
987
988 if (WARN_ON_ONCE(hwc->idx >= X86_PMC_IDX_MAX ||
989 cpuc->events[hwc->idx] != event))
990 return;
991
992 x86_pmu.enable(hwc, hwc->idx);
993} 987}
994 988
995void perf_event_print_debug(void) 989void perf_event_print_debug(void)
@@ -1049,18 +1043,16 @@ static void x86_pmu_stop(struct perf_event *event)
1049 struct hw_perf_event *hwc = &event->hw; 1043 struct hw_perf_event *hwc = &event->hw;
1050 int idx = hwc->idx; 1044 int idx = hwc->idx;
1051 1045
1052 /* 1046 if (!__test_and_clear_bit(idx, cpuc->active_mask))
1053 * Must be done before we disable, otherwise the nmi handler 1047 return;
1054 * could reenable again: 1048
1055 */ 1049 x86_pmu.disable(event);
1056 clear_bit(idx, cpuc->active_mask);
1057 x86_pmu.disable(hwc, idx);
1058 1050
1059 /* 1051 /*
1060 * Drain the remaining delta count out of a event 1052 * Drain the remaining delta count out of a event
1061 * that we are disabling: 1053 * that we are disabling:
1062 */ 1054 */
1063 x86_perf_event_update(event, hwc, idx); 1055 x86_perf_event_update(event);
1064 1056
1065 cpuc->events[idx] = NULL; 1057 cpuc->events[idx] = NULL;
1066} 1058}
@@ -1108,7 +1100,7 @@ static int x86_pmu_handle_irq(struct pt_regs *regs)
1108 event = cpuc->events[idx]; 1100 event = cpuc->events[idx];
1109 hwc = &event->hw; 1101 hwc = &event->hw;
1110 1102
1111 val = x86_perf_event_update(event, hwc, idx); 1103 val = x86_perf_event_update(event);
1112 if (val & (1ULL << (x86_pmu.event_bits - 1))) 1104 if (val & (1ULL << (x86_pmu.event_bits - 1)))
1113 continue; 1105 continue;
1114 1106
@@ -1118,11 +1110,11 @@ static int x86_pmu_handle_irq(struct pt_regs *regs)
1118 handled = 1; 1110 handled = 1;
1119 data.period = event->hw.last_period; 1111 data.period = event->hw.last_period;
1120 1112
1121 if (!x86_perf_event_set_period(event, hwc, idx)) 1113 if (!x86_perf_event_set_period(event))
1122 continue; 1114 continue;
1123 1115
1124 if (perf_event_overflow(event, 1, &data, regs)) 1116 if (perf_event_overflow(event, 1, &data, regs))
1125 x86_pmu.disable(hwc, idx); 1117 x86_pmu_stop(event);
1126 } 1118 }
1127 1119
1128 if (handled) 1120 if (handled)
@@ -1309,7 +1301,7 @@ int hw_perf_group_sched_in(struct perf_event *leader,
1309 memcpy(cpuc->assign, assign, n0*sizeof(int)); 1301 memcpy(cpuc->assign, assign, n0*sizeof(int));
1310 1302
1311 cpuc->n_events = n0; 1303 cpuc->n_events = n0;
1312 cpuc->n_added = n1; 1304 cpuc->n_added += n1;
1313 ctx->nr_active += n1; 1305 ctx->nr_active += n1;
1314 1306
1315 /* 1307 /*
@@ -1337,6 +1329,39 @@ undo:
1337#include "perf_event_p6.c" 1329#include "perf_event_p6.c"
1338#include "perf_event_intel.c" 1330#include "perf_event_intel.c"
1339 1331
1332static int __cpuinit
1333x86_pmu_notifier(struct notifier_block *self, unsigned long action, void *hcpu)
1334{
1335 unsigned int cpu = (long)hcpu;
1336
1337 switch (action & ~CPU_TASKS_FROZEN) {
1338 case CPU_UP_PREPARE:
1339 if (x86_pmu.cpu_prepare)
1340 x86_pmu.cpu_prepare(cpu);
1341 break;
1342
1343 case CPU_STARTING:
1344 if (x86_pmu.cpu_starting)
1345 x86_pmu.cpu_starting(cpu);
1346 break;
1347
1348 case CPU_DYING:
1349 if (x86_pmu.cpu_dying)
1350 x86_pmu.cpu_dying(cpu);
1351 break;
1352
1353 case CPU_DEAD:
1354 if (x86_pmu.cpu_dead)
1355 x86_pmu.cpu_dead(cpu);
1356 break;
1357
1358 default:
1359 break;
1360 }
1361
1362 return NOTIFY_OK;
1363}
1364
1340static void __init pmu_check_apic(void) 1365static void __init pmu_check_apic(void)
1341{ 1366{
1342 if (cpu_has_apic) 1367 if (cpu_has_apic)
@@ -1415,11 +1440,13 @@ void __init init_hw_perf_events(void)
1415 pr_info("... max period: %016Lx\n", x86_pmu.max_period); 1440 pr_info("... max period: %016Lx\n", x86_pmu.max_period);
1416 pr_info("... fixed-purpose events: %d\n", x86_pmu.num_events_fixed); 1441 pr_info("... fixed-purpose events: %d\n", x86_pmu.num_events_fixed);
1417 pr_info("... event mask: %016Lx\n", perf_event_mask); 1442 pr_info("... event mask: %016Lx\n", perf_event_mask);
1443
1444 perf_cpu_notifier(x86_pmu_notifier);
1418} 1445}
1419 1446
1420static inline void x86_pmu_read(struct perf_event *event) 1447static inline void x86_pmu_read(struct perf_event *event)
1421{ 1448{
1422 x86_perf_event_update(event, &event->hw, event->hw.idx); 1449 x86_perf_event_update(event);
1423} 1450}
1424 1451
1425static const struct pmu pmu = { 1452static const struct pmu pmu = {
@@ -1675,28 +1702,16 @@ struct perf_callchain_entry *perf_callchain(struct pt_regs *regs)
1675 return entry; 1702 return entry;
1676} 1703}
1677 1704
1678void hw_perf_event_setup_online(int cpu) 1705#ifdef CONFIG_EVENT_TRACING
1706void perf_arch_fetch_caller_regs(struct pt_regs *regs, unsigned long ip, int skip)
1679{ 1707{
1680 init_debug_store_on_cpu(cpu); 1708 regs->ip = ip;
1681 1709 /*
1682 switch (boot_cpu_data.x86_vendor) { 1710 * perf_arch_fetch_caller_regs adds another call, we need to increment
1683 case X86_VENDOR_AMD: 1711 * the skip level
1684 amd_pmu_cpu_online(cpu); 1712 */
1685 break; 1713 regs->bp = rewind_frame_pointer(skip + 1);
1686 default: 1714 regs->cs = __KERNEL_CS;
1687 return; 1715 local_save_flags(regs->flags);
1688 }
1689}
1690
1691void hw_perf_event_setup_offline(int cpu)
1692{
1693 init_debug_store_on_cpu(cpu);
1694
1695 switch (boot_cpu_data.x86_vendor) {
1696 case X86_VENDOR_AMD:
1697 amd_pmu_cpu_offline(cpu);
1698 break;
1699 default:
1700 return;
1701 }
1702} 1716}
1717#endif
diff --git a/arch/x86/kernel/cpu/perf_event_amd.c b/arch/x86/kernel/cpu/perf_event_amd.c
index 8f3dbfda3c4f..b87e0b6970cb 100644
--- a/arch/x86/kernel/cpu/perf_event_amd.c
+++ b/arch/x86/kernel/cpu/perf_event_amd.c
@@ -271,28 +271,6 @@ done:
271 return &emptyconstraint; 271 return &emptyconstraint;
272} 272}
273 273
274static __initconst struct x86_pmu amd_pmu = {
275 .name = "AMD",
276 .handle_irq = x86_pmu_handle_irq,
277 .disable_all = x86_pmu_disable_all,
278 .enable_all = x86_pmu_enable_all,
279 .enable = x86_pmu_enable_event,
280 .disable = x86_pmu_disable_event,
281 .eventsel = MSR_K7_EVNTSEL0,
282 .perfctr = MSR_K7_PERFCTR0,
283 .event_map = amd_pmu_event_map,
284 .raw_event = amd_pmu_raw_event,
285 .max_events = ARRAY_SIZE(amd_perfmon_event_map),
286 .num_events = 4,
287 .event_bits = 48,
288 .event_mask = (1ULL << 48) - 1,
289 .apic = 1,
290 /* use highest bit to detect overflow */
291 .max_period = (1ULL << 47) - 1,
292 .get_event_constraints = amd_get_event_constraints,
293 .put_event_constraints = amd_put_event_constraints
294};
295
296static struct amd_nb *amd_alloc_nb(int cpu, int nb_id) 274static struct amd_nb *amd_alloc_nb(int cpu, int nb_id)
297{ 275{
298 struct amd_nb *nb; 276 struct amd_nb *nb;
@@ -309,7 +287,7 @@ static struct amd_nb *amd_alloc_nb(int cpu, int nb_id)
309 * initialize all possible NB constraints 287 * initialize all possible NB constraints
310 */ 288 */
311 for (i = 0; i < x86_pmu.num_events; i++) { 289 for (i = 0; i < x86_pmu.num_events; i++) {
312 set_bit(i, nb->event_constraints[i].idxmsk); 290 __set_bit(i, nb->event_constraints[i].idxmsk);
313 nb->event_constraints[i].weight = 1; 291 nb->event_constraints[i].weight = 1;
314 } 292 }
315 return nb; 293 return nb;
@@ -370,14 +348,41 @@ static void amd_pmu_cpu_offline(int cpu)
370 348
371 raw_spin_lock(&amd_nb_lock); 349 raw_spin_lock(&amd_nb_lock);
372 350
373 if (--cpuhw->amd_nb->refcnt == 0) 351 if (cpuhw->amd_nb) {
374 kfree(cpuhw->amd_nb); 352 if (--cpuhw->amd_nb->refcnt == 0)
353 kfree(cpuhw->amd_nb);
375 354
376 cpuhw->amd_nb = NULL; 355 cpuhw->amd_nb = NULL;
356 }
377 357
378 raw_spin_unlock(&amd_nb_lock); 358 raw_spin_unlock(&amd_nb_lock);
379} 359}
380 360
361static __initconst struct x86_pmu amd_pmu = {
362 .name = "AMD",
363 .handle_irq = x86_pmu_handle_irq,
364 .disable_all = x86_pmu_disable_all,
365 .enable_all = x86_pmu_enable_all,
366 .enable = x86_pmu_enable_event,
367 .disable = x86_pmu_disable_event,
368 .eventsel = MSR_K7_EVNTSEL0,
369 .perfctr = MSR_K7_PERFCTR0,
370 .event_map = amd_pmu_event_map,
371 .raw_event = amd_pmu_raw_event,
372 .max_events = ARRAY_SIZE(amd_perfmon_event_map),
373 .num_events = 4,
374 .event_bits = 48,
375 .event_mask = (1ULL << 48) - 1,
376 .apic = 1,
377 /* use highest bit to detect overflow */
378 .max_period = (1ULL << 47) - 1,
379 .get_event_constraints = amd_get_event_constraints,
380 .put_event_constraints = amd_put_event_constraints,
381
382 .cpu_prepare = amd_pmu_cpu_online,
383 .cpu_dead = amd_pmu_cpu_offline,
384};
385
381static __init int amd_pmu_init(void) 386static __init int amd_pmu_init(void)
382{ 387{
383 /* Performance-monitoring supported from K7 and later: */ 388 /* Performance-monitoring supported from K7 and later: */
@@ -390,11 +395,6 @@ static __init int amd_pmu_init(void)
390 memcpy(hw_cache_event_ids, amd_hw_cache_event_ids, 395 memcpy(hw_cache_event_ids, amd_hw_cache_event_ids,
391 sizeof(hw_cache_event_ids)); 396 sizeof(hw_cache_event_ids));
392 397
393 /*
394 * explicitly initialize the boot cpu, other cpus will get
395 * the cpu hotplug callbacks from smp_init()
396 */
397 amd_pmu_cpu_online(smp_processor_id());
398 return 0; 398 return 0;
399} 399}
400 400
@@ -405,12 +405,4 @@ static int amd_pmu_init(void)
405 return 0; 405 return 0;
406} 406}
407 407
408static void amd_pmu_cpu_online(int cpu)
409{
410}
411
412static void amd_pmu_cpu_offline(int cpu)
413{
414}
415
416#endif 408#endif
diff --git a/arch/x86/kernel/cpu/perf_event_intel.c b/arch/x86/kernel/cpu/perf_event_intel.c
index 44b60c852107..84bfde64a337 100644
--- a/arch/x86/kernel/cpu/perf_event_intel.c
+++ b/arch/x86/kernel/cpu/perf_event_intel.c
@@ -548,9 +548,9 @@ static inline void intel_pmu_ack_status(u64 ack)
548} 548}
549 549
550static inline void 550static inline void
551intel_pmu_disable_fixed(struct hw_perf_event *hwc, int __idx) 551intel_pmu_disable_fixed(struct hw_perf_event *hwc)
552{ 552{
553 int idx = __idx - X86_PMC_IDX_FIXED; 553 int idx = hwc->idx - X86_PMC_IDX_FIXED;
554 u64 ctrl_val, mask; 554 u64 ctrl_val, mask;
555 555
556 mask = 0xfULL << (idx * 4); 556 mask = 0xfULL << (idx * 4);
@@ -621,26 +621,28 @@ static void intel_pmu_drain_bts_buffer(void)
621} 621}
622 622
623static inline void 623static inline void
624intel_pmu_disable_event(struct hw_perf_event *hwc, int idx) 624intel_pmu_disable_event(struct perf_event *event)
625{ 625{
626 if (unlikely(idx == X86_PMC_IDX_FIXED_BTS)) { 626 struct hw_perf_event *hwc = &event->hw;
627
628 if (unlikely(hwc->idx == X86_PMC_IDX_FIXED_BTS)) {
627 intel_pmu_disable_bts(); 629 intel_pmu_disable_bts();
628 intel_pmu_drain_bts_buffer(); 630 intel_pmu_drain_bts_buffer();
629 return; 631 return;
630 } 632 }
631 633
632 if (unlikely(hwc->config_base == MSR_ARCH_PERFMON_FIXED_CTR_CTRL)) { 634 if (unlikely(hwc->config_base == MSR_ARCH_PERFMON_FIXED_CTR_CTRL)) {
633 intel_pmu_disable_fixed(hwc, idx); 635 intel_pmu_disable_fixed(hwc);
634 return; 636 return;
635 } 637 }
636 638
637 x86_pmu_disable_event(hwc, idx); 639 x86_pmu_disable_event(event);
638} 640}
639 641
640static inline void 642static inline void
641intel_pmu_enable_fixed(struct hw_perf_event *hwc, int __idx) 643intel_pmu_enable_fixed(struct hw_perf_event *hwc)
642{ 644{
643 int idx = __idx - X86_PMC_IDX_FIXED; 645 int idx = hwc->idx - X86_PMC_IDX_FIXED;
644 u64 ctrl_val, bits, mask; 646 u64 ctrl_val, bits, mask;
645 int err; 647 int err;
646 648
@@ -670,9 +672,11 @@ intel_pmu_enable_fixed(struct hw_perf_event *hwc, int __idx)
670 err = checking_wrmsrl(hwc->config_base, ctrl_val); 672 err = checking_wrmsrl(hwc->config_base, ctrl_val);
671} 673}
672 674
673static void intel_pmu_enable_event(struct hw_perf_event *hwc, int idx) 675static void intel_pmu_enable_event(struct perf_event *event)
674{ 676{
675 if (unlikely(idx == X86_PMC_IDX_FIXED_BTS)) { 677 struct hw_perf_event *hwc = &event->hw;
678
679 if (unlikely(hwc->idx == X86_PMC_IDX_FIXED_BTS)) {
676 if (!__get_cpu_var(cpu_hw_events).enabled) 680 if (!__get_cpu_var(cpu_hw_events).enabled)
677 return; 681 return;
678 682
@@ -681,11 +685,11 @@ static void intel_pmu_enable_event(struct hw_perf_event *hwc, int idx)
681 } 685 }
682 686
683 if (unlikely(hwc->config_base == MSR_ARCH_PERFMON_FIXED_CTR_CTRL)) { 687 if (unlikely(hwc->config_base == MSR_ARCH_PERFMON_FIXED_CTR_CTRL)) {
684 intel_pmu_enable_fixed(hwc, idx); 688 intel_pmu_enable_fixed(hwc);
685 return; 689 return;
686 } 690 }
687 691
688 __x86_pmu_enable_event(hwc, idx); 692 __x86_pmu_enable_event(hwc);
689} 693}
690 694
691/* 695/*
@@ -694,14 +698,8 @@ static void intel_pmu_enable_event(struct hw_perf_event *hwc, int idx)
694 */ 698 */
695static int intel_pmu_save_and_restart(struct perf_event *event) 699static int intel_pmu_save_and_restart(struct perf_event *event)
696{ 700{
697 struct hw_perf_event *hwc = &event->hw; 701 x86_perf_event_update(event);
698 int idx = hwc->idx; 702 return x86_perf_event_set_period(event);
699 int ret;
700
701 x86_perf_event_update(event, hwc, idx);
702 ret = x86_perf_event_set_period(event, hwc, idx);
703
704 return ret;
705} 703}
706 704
707static void intel_pmu_reset(void) 705static void intel_pmu_reset(void)
@@ -745,11 +743,11 @@ static int intel_pmu_handle_irq(struct pt_regs *regs)
745 743
746 cpuc = &__get_cpu_var(cpu_hw_events); 744 cpuc = &__get_cpu_var(cpu_hw_events);
747 745
748 perf_disable(); 746 intel_pmu_disable_all();
749 intel_pmu_drain_bts_buffer(); 747 intel_pmu_drain_bts_buffer();
750 status = intel_pmu_get_status(); 748 status = intel_pmu_get_status();
751 if (!status) { 749 if (!status) {
752 perf_enable(); 750 intel_pmu_enable_all();
753 return 0; 751 return 0;
754 } 752 }
755 753
@@ -759,8 +757,7 @@ again:
759 WARN_ONCE(1, "perfevents: irq loop stuck!\n"); 757 WARN_ONCE(1, "perfevents: irq loop stuck!\n");
760 perf_event_print_debug(); 758 perf_event_print_debug();
761 intel_pmu_reset(); 759 intel_pmu_reset();
762 perf_enable(); 760 goto done;
763 return 1;
764 } 761 }
765 762
766 inc_irq_stat(apic_perf_irqs); 763 inc_irq_stat(apic_perf_irqs);
@@ -768,7 +765,6 @@ again:
768 for_each_set_bit(bit, (unsigned long *)&status, X86_PMC_IDX_MAX) { 765 for_each_set_bit(bit, (unsigned long *)&status, X86_PMC_IDX_MAX) {
769 struct perf_event *event = cpuc->events[bit]; 766 struct perf_event *event = cpuc->events[bit];
770 767
771 clear_bit(bit, (unsigned long *) &status);
772 if (!test_bit(bit, cpuc->active_mask)) 768 if (!test_bit(bit, cpuc->active_mask))
773 continue; 769 continue;
774 770
@@ -778,7 +774,7 @@ again:
778 data.period = event->hw.last_period; 774 data.period = event->hw.last_period;
779 775
780 if (perf_event_overflow(event, 1, &data, regs)) 776 if (perf_event_overflow(event, 1, &data, regs))
781 intel_pmu_disable_event(&event->hw, bit); 777 x86_pmu_stop(event);
782 } 778 }
783 779
784 intel_pmu_ack_status(ack); 780 intel_pmu_ack_status(ack);
@@ -790,8 +786,8 @@ again:
790 if (status) 786 if (status)
791 goto again; 787 goto again;
792 788
793 perf_enable(); 789done:
794 790 intel_pmu_enable_all();
795 return 1; 791 return 1;
796} 792}
797 793
@@ -870,7 +866,10 @@ static __initconst struct x86_pmu intel_pmu = {
870 .max_period = (1ULL << 31) - 1, 866 .max_period = (1ULL << 31) - 1,
871 .enable_bts = intel_pmu_enable_bts, 867 .enable_bts = intel_pmu_enable_bts,
872 .disable_bts = intel_pmu_disable_bts, 868 .disable_bts = intel_pmu_disable_bts,
873 .get_event_constraints = intel_get_event_constraints 869 .get_event_constraints = intel_get_event_constraints,
870
871 .cpu_starting = init_debug_store_on_cpu,
872 .cpu_dying = fini_debug_store_on_cpu,
874}; 873};
875 874
876static __init int intel_pmu_init(void) 875static __init int intel_pmu_init(void)
diff --git a/arch/x86/kernel/cpu/perf_event_p6.c b/arch/x86/kernel/cpu/perf_event_p6.c
index a4e67b99d91c..a330485d14da 100644
--- a/arch/x86/kernel/cpu/perf_event_p6.c
+++ b/arch/x86/kernel/cpu/perf_event_p6.c
@@ -77,27 +77,29 @@ static void p6_pmu_enable_all(void)
77} 77}
78 78
79static inline void 79static inline void
80p6_pmu_disable_event(struct hw_perf_event *hwc, int idx) 80p6_pmu_disable_event(struct perf_event *event)
81{ 81{
82 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); 82 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
83 struct hw_perf_event *hwc = &event->hw;
83 u64 val = P6_NOP_EVENT; 84 u64 val = P6_NOP_EVENT;
84 85
85 if (cpuc->enabled) 86 if (cpuc->enabled)
86 val |= ARCH_PERFMON_EVENTSEL_ENABLE; 87 val |= ARCH_PERFMON_EVENTSEL_ENABLE;
87 88
88 (void)checking_wrmsrl(hwc->config_base + idx, val); 89 (void)checking_wrmsrl(hwc->config_base + hwc->idx, val);
89} 90}
90 91
91static void p6_pmu_enable_event(struct hw_perf_event *hwc, int idx) 92static void p6_pmu_enable_event(struct perf_event *event)
92{ 93{
93 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); 94 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
95 struct hw_perf_event *hwc = &event->hw;
94 u64 val; 96 u64 val;
95 97
96 val = hwc->config; 98 val = hwc->config;
97 if (cpuc->enabled) 99 if (cpuc->enabled)
98 val |= ARCH_PERFMON_EVENTSEL_ENABLE; 100 val |= ARCH_PERFMON_EVENTSEL_ENABLE;
99 101
100 (void)checking_wrmsrl(hwc->config_base + idx, val); 102 (void)checking_wrmsrl(hwc->config_base + hwc->idx, val);
101} 103}
102 104
103static __initconst struct x86_pmu p6_pmu = { 105static __initconst struct x86_pmu p6_pmu = {
diff --git a/arch/x86/kernel/dumpstack.h b/arch/x86/kernel/dumpstack.h
index 4fd1420faffa..29e5f7c845b2 100644
--- a/arch/x86/kernel/dumpstack.h
+++ b/arch/x86/kernel/dumpstack.h
@@ -29,4 +29,19 @@ struct stack_frame {
29 struct stack_frame *next_frame; 29 struct stack_frame *next_frame;
30 unsigned long return_address; 30 unsigned long return_address;
31}; 31};
32
33static inline unsigned long rewind_frame_pointer(int n)
34{
35 struct stack_frame *frame;
36
37 get_bp(frame);
38
39#ifdef CONFIG_FRAME_POINTER
40 while (n--)
41 frame = frame->next_frame;
32#endif 42#endif
43
44 return (unsigned long)frame;
45}
46
47#endif /* DUMPSTACK_H */
diff --git a/arch/x86/kernel/dumpstack_64.c b/arch/x86/kernel/dumpstack_64.c
index d5e2a2ebb627..272c9f1f05f3 100644
--- a/arch/x86/kernel/dumpstack_64.c
+++ b/arch/x86/kernel/dumpstack_64.c
@@ -208,7 +208,7 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
208 if (in_irq_stack(stack, irq_stack, irq_stack_end)) { 208 if (in_irq_stack(stack, irq_stack, irq_stack_end)) {
209 if (ops->stack(data, "IRQ") < 0) 209 if (ops->stack(data, "IRQ") < 0)
210 break; 210 break;
211 bp = print_context_stack(tinfo, stack, bp, 211 bp = ops->walk_stack(tinfo, stack, bp,
212 ops, data, irq_stack_end, &graph); 212 ops, data, irq_stack_end, &graph);
213 /* 213 /*
214 * We link to the next stack (which would be 214 * We link to the next stack (which would be
@@ -229,7 +229,7 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
229 /* 229 /*
230 * This handles the process stack: 230 * This handles the process stack:
231 */ 231 */
232 bp = print_context_stack(tinfo, stack, bp, ops, data, NULL, &graph); 232 bp = ops->walk_stack(tinfo, stack, bp, ops, data, NULL, &graph);
233 put_cpu(); 233 put_cpu();
234} 234}
235EXPORT_SYMBOL(dump_trace); 235EXPORT_SYMBOL(dump_trace);
diff --git a/drivers/acpi/video.c b/drivers/acpi/video.c
index 2ff2b6ab5b6c..cbe6f3924a10 100644
--- a/drivers/acpi/video.c
+++ b/drivers/acpi/video.c
@@ -998,6 +998,7 @@ static void acpi_video_device_find_cap(struct acpi_video_device *device)
998 } 998 }
999 999
1000 if (acpi_video_backlight_support()) { 1000 if (acpi_video_backlight_support()) {
1001 struct backlight_properties props;
1001 int result; 1002 int result;
1002 static int count = 0; 1003 static int count = 0;
1003 char *name; 1004 char *name;
@@ -1010,12 +1011,14 @@ static void acpi_video_device_find_cap(struct acpi_video_device *device)
1010 return; 1011 return;
1011 1012
1012 sprintf(name, "acpi_video%d", count++); 1013 sprintf(name, "acpi_video%d", count++);
1013 device->backlight = backlight_device_register(name, 1014 memset(&props, 0, sizeof(struct backlight_properties));
1014 NULL, device, &acpi_backlight_ops); 1015 props.max_brightness = device->brightness->count - 3;
1016 device->backlight = backlight_device_register(name, NULL, device,
1017 &acpi_backlight_ops,
1018 &props);
1015 kfree(name); 1019 kfree(name);
1016 if (IS_ERR(device->backlight)) 1020 if (IS_ERR(device->backlight))
1017 return; 1021 return;
1018 device->backlight->props.max_brightness = device->brightness->count-3;
1019 1022
1020 result = sysfs_create_link(&device->backlight->dev.kobj, 1023 result = sysfs_create_link(&device->backlight->dev.kobj,
1021 &device->dev->dev.kobj, "device"); 1024 &device->dev->dev.kobj, "device");
diff --git a/drivers/ata/ahci.c b/drivers/ata/ahci.c
index 6bd930b93bcc..fdc9bcbe55a2 100644
--- a/drivers/ata/ahci.c
+++ b/drivers/ata/ahci.c
@@ -641,6 +641,21 @@ static const struct pci_device_id ahci_pci_tbl[] = {
641 { PCI_VDEVICE(NVIDIA, 0x055a), board_ahci_yesncq }, /* MCP67 */ 641 { PCI_VDEVICE(NVIDIA, 0x055a), board_ahci_yesncq }, /* MCP67 */
642 { PCI_VDEVICE(NVIDIA, 0x055b), board_ahci_yesncq }, /* MCP67 */ 642 { PCI_VDEVICE(NVIDIA, 0x055b), board_ahci_yesncq }, /* MCP67 */
643 { PCI_VDEVICE(NVIDIA, 0x0580), board_ahci_yesncq }, /* Linux ID */ 643 { PCI_VDEVICE(NVIDIA, 0x0580), board_ahci_yesncq }, /* Linux ID */
644 { PCI_VDEVICE(NVIDIA, 0x0581), board_ahci_yesncq }, /* Linux ID */
645 { PCI_VDEVICE(NVIDIA, 0x0582), board_ahci_yesncq }, /* Linux ID */
646 { PCI_VDEVICE(NVIDIA, 0x0583), board_ahci_yesncq }, /* Linux ID */
647 { PCI_VDEVICE(NVIDIA, 0x0584), board_ahci_yesncq }, /* Linux ID */
648 { PCI_VDEVICE(NVIDIA, 0x0585), board_ahci_yesncq }, /* Linux ID */
649 { PCI_VDEVICE(NVIDIA, 0x0586), board_ahci_yesncq }, /* Linux ID */
650 { PCI_VDEVICE(NVIDIA, 0x0587), board_ahci_yesncq }, /* Linux ID */
651 { PCI_VDEVICE(NVIDIA, 0x0588), board_ahci_yesncq }, /* Linux ID */
652 { PCI_VDEVICE(NVIDIA, 0x0589), board_ahci_yesncq }, /* Linux ID */
653 { PCI_VDEVICE(NVIDIA, 0x058a), board_ahci_yesncq }, /* Linux ID */
654 { PCI_VDEVICE(NVIDIA, 0x058b), board_ahci_yesncq }, /* Linux ID */
655 { PCI_VDEVICE(NVIDIA, 0x058c), board_ahci_yesncq }, /* Linux ID */
656 { PCI_VDEVICE(NVIDIA, 0x058d), board_ahci_yesncq }, /* Linux ID */
657 { PCI_VDEVICE(NVIDIA, 0x058e), board_ahci_yesncq }, /* Linux ID */
658 { PCI_VDEVICE(NVIDIA, 0x058f), board_ahci_yesncq }, /* Linux ID */
644 { PCI_VDEVICE(NVIDIA, 0x07f0), board_ahci_yesncq }, /* MCP73 */ 659 { PCI_VDEVICE(NVIDIA, 0x07f0), board_ahci_yesncq }, /* MCP73 */
645 { PCI_VDEVICE(NVIDIA, 0x07f1), board_ahci_yesncq }, /* MCP73 */ 660 { PCI_VDEVICE(NVIDIA, 0x07f1), board_ahci_yesncq }, /* MCP73 */
646 { PCI_VDEVICE(NVIDIA, 0x07f2), board_ahci_yesncq }, /* MCP73 */ 661 { PCI_VDEVICE(NVIDIA, 0x07f2), board_ahci_yesncq }, /* MCP73 */
@@ -2263,7 +2278,7 @@ static void ahci_port_intr(struct ata_port *ap)
2263 struct ahci_port_priv *pp = ap->private_data; 2278 struct ahci_port_priv *pp = ap->private_data;
2264 struct ahci_host_priv *hpriv = ap->host->private_data; 2279 struct ahci_host_priv *hpriv = ap->host->private_data;
2265 int resetting = !!(ap->pflags & ATA_PFLAG_RESETTING); 2280 int resetting = !!(ap->pflags & ATA_PFLAG_RESETTING);
2266 u32 status, qc_active; 2281 u32 status, qc_active = 0;
2267 int rc; 2282 int rc;
2268 2283
2269 status = readl(port_mmio + PORT_IRQ_STAT); 2284 status = readl(port_mmio + PORT_IRQ_STAT);
@@ -2321,11 +2336,22 @@ static void ahci_port_intr(struct ata_port *ap)
2321 } 2336 }
2322 } 2337 }
2323 2338
2324 /* pp->active_link is valid iff any command is in flight */ 2339 /* pp->active_link is not reliable once FBS is enabled, both
2325 if (ap->qc_active && pp->active_link->sactive) 2340 * PORT_SCR_ACT and PORT_CMD_ISSUE should be checked because
2326 qc_active = readl(port_mmio + PORT_SCR_ACT); 2341 * NCQ and non-NCQ commands may be in flight at the same time.
2327 else 2342 */
2328 qc_active = readl(port_mmio + PORT_CMD_ISSUE); 2343 if (pp->fbs_enabled) {
2344 if (ap->qc_active) {
2345 qc_active = readl(port_mmio + PORT_SCR_ACT);
2346 qc_active |= readl(port_mmio + PORT_CMD_ISSUE);
2347 }
2348 } else {
2349 /* pp->active_link is valid iff any command is in flight */
2350 if (ap->qc_active && pp->active_link->sactive)
2351 qc_active = readl(port_mmio + PORT_SCR_ACT);
2352 else
2353 qc_active = readl(port_mmio + PORT_CMD_ISSUE);
2354 }
2329 2355
2330 rc = ata_qc_complete_multiple(ap, qc_active); 2356 rc = ata_qc_complete_multiple(ap, qc_active);
2331 2357
@@ -3022,6 +3048,14 @@ static bool ahci_broken_suspend(struct pci_dev *pdev)
3022 * On HP dv[4-6] and HDX18 with earlier BIOSen, link 3048 * On HP dv[4-6] and HDX18 with earlier BIOSen, link
3023 * to the harddisk doesn't become online after 3049 * to the harddisk doesn't become online after
3024 * resuming from STR. Warn and fail suspend. 3050 * resuming from STR. Warn and fail suspend.
3051 *
3052 * http://bugzilla.kernel.org/show_bug.cgi?id=12276
3053 *
3054 * Use dates instead of versions to match as HP is
3055 * apparently recycling both product and version
3056 * strings.
3057 *
3058 * http://bugzilla.kernel.org/show_bug.cgi?id=15462
3025 */ 3059 */
3026 { 3060 {
3027 .ident = "dv4", 3061 .ident = "dv4",
@@ -3030,7 +3064,7 @@ static bool ahci_broken_suspend(struct pci_dev *pdev)
3030 DMI_MATCH(DMI_PRODUCT_NAME, 3064 DMI_MATCH(DMI_PRODUCT_NAME,
3031 "HP Pavilion dv4 Notebook PC"), 3065 "HP Pavilion dv4 Notebook PC"),
3032 }, 3066 },
3033 .driver_data = "F.30", /* cutoff BIOS version */ 3067 .driver_data = "20090105", /* F.30 */
3034 }, 3068 },
3035 { 3069 {
3036 .ident = "dv5", 3070 .ident = "dv5",
@@ -3039,7 +3073,7 @@ static bool ahci_broken_suspend(struct pci_dev *pdev)
3039 DMI_MATCH(DMI_PRODUCT_NAME, 3073 DMI_MATCH(DMI_PRODUCT_NAME,
3040 "HP Pavilion dv5 Notebook PC"), 3074 "HP Pavilion dv5 Notebook PC"),
3041 }, 3075 },
3042 .driver_data = "F.16", /* cutoff BIOS version */ 3076 .driver_data = "20090506", /* F.16 */
3043 }, 3077 },
3044 { 3078 {
3045 .ident = "dv6", 3079 .ident = "dv6",
@@ -3048,7 +3082,7 @@ static bool ahci_broken_suspend(struct pci_dev *pdev)
3048 DMI_MATCH(DMI_PRODUCT_NAME, 3082 DMI_MATCH(DMI_PRODUCT_NAME,
3049 "HP Pavilion dv6 Notebook PC"), 3083 "HP Pavilion dv6 Notebook PC"),
3050 }, 3084 },
3051 .driver_data = "F.21", /* cutoff BIOS version */ 3085 .driver_data = "20090423", /* F.21 */
3052 }, 3086 },
3053 { 3087 {
3054 .ident = "HDX18", 3088 .ident = "HDX18",
@@ -3057,7 +3091,7 @@ static bool ahci_broken_suspend(struct pci_dev *pdev)
3057 DMI_MATCH(DMI_PRODUCT_NAME, 3091 DMI_MATCH(DMI_PRODUCT_NAME,
3058 "HP HDX18 Notebook PC"), 3092 "HP HDX18 Notebook PC"),
3059 }, 3093 },
3060 .driver_data = "F.23", /* cutoff BIOS version */ 3094 .driver_data = "20090430", /* F.23 */
3061 }, 3095 },
3062 /* 3096 /*
3063 * Acer eMachines G725 has the same problem. BIOS 3097 * Acer eMachines G725 has the same problem. BIOS
@@ -3065,6 +3099,8 @@ static bool ahci_broken_suspend(struct pci_dev *pdev)
3065 * work. Inbetween, there are V1.06, V2.06 and V3.03 3099 * work. Inbetween, there are V1.06, V2.06 and V3.03
3066 * that we don't have much idea about. For now, 3100 * that we don't have much idea about. For now,
3067 * blacklist anything older than V3.04. 3101 * blacklist anything older than V3.04.
3102 *
3103 * http://bugzilla.kernel.org/show_bug.cgi?id=15104
3068 */ 3104 */
3069 { 3105 {
3070 .ident = "G725", 3106 .ident = "G725",
@@ -3072,19 +3108,21 @@ static bool ahci_broken_suspend(struct pci_dev *pdev)
3072 DMI_MATCH(DMI_SYS_VENDOR, "eMachines"), 3108 DMI_MATCH(DMI_SYS_VENDOR, "eMachines"),
3073 DMI_MATCH(DMI_PRODUCT_NAME, "eMachines G725"), 3109 DMI_MATCH(DMI_PRODUCT_NAME, "eMachines G725"),
3074 }, 3110 },
3075 .driver_data = "V3.04", /* cutoff BIOS version */ 3111 .driver_data = "20091216", /* V3.04 */
3076 }, 3112 },
3077 { } /* terminate list */ 3113 { } /* terminate list */
3078 }; 3114 };
3079 const struct dmi_system_id *dmi = dmi_first_match(sysids); 3115 const struct dmi_system_id *dmi = dmi_first_match(sysids);
3080 const char *ver; 3116 int year, month, date;
3117 char buf[9];
3081 3118
3082 if (!dmi || pdev->bus->number || pdev->devfn != PCI_DEVFN(0x1f, 2)) 3119 if (!dmi || pdev->bus->number || pdev->devfn != PCI_DEVFN(0x1f, 2))
3083 return false; 3120 return false;
3084 3121
3085 ver = dmi_get_system_info(DMI_BIOS_VERSION); 3122 dmi_get_date(DMI_BIOS_DATE, &year, &month, &date);
3123 snprintf(buf, sizeof(buf), "%04d%02d%02d", year, month, date);
3086 3124
3087 return !ver || strcmp(ver, dmi->driver_data) < 0; 3125 return strcmp(buf, dmi->driver_data) < 0;
3088} 3126}
3089 3127
3090static bool ahci_broken_online(struct pci_dev *pdev) 3128static bool ahci_broken_online(struct pci_dev *pdev)
diff --git a/drivers/base/class.c b/drivers/base/class.c
index 0147f476b8a9..9c6a0d6408e7 100644
--- a/drivers/base/class.c
+++ b/drivers/base/class.c
@@ -219,6 +219,8 @@ static void class_create_release(struct class *cls)
219 * This is used to create a struct class pointer that can then be used 219 * This is used to create a struct class pointer that can then be used
220 * in calls to device_create(). 220 * in calls to device_create().
221 * 221 *
222 * Returns &struct class pointer on success, or ERR_PTR() on error.
223 *
222 * Note, the pointer created here is to be destroyed when finished by 224 * Note, the pointer created here is to be destroyed when finished by
223 * making a call to class_destroy(). 225 * making a call to class_destroy().
224 */ 226 */
diff --git a/drivers/base/core.c b/drivers/base/core.c
index ef55df34ddd0..b56a0ba31d4a 100644
--- a/drivers/base/core.c
+++ b/drivers/base/core.c
@@ -1345,6 +1345,8 @@ static void root_device_release(struct device *dev)
1345 * 'module' symlink which points to the @owner directory 1345 * 'module' symlink which points to the @owner directory
1346 * in sysfs. 1346 * in sysfs.
1347 * 1347 *
1348 * Returns &struct device pointer on success, or ERR_PTR() on error.
1349 *
1348 * Note: You probably want to use root_device_register(). 1350 * Note: You probably want to use root_device_register().
1349 */ 1351 */
1350struct device *__root_device_register(const char *name, struct module *owner) 1352struct device *__root_device_register(const char *name, struct module *owner)
@@ -1432,6 +1434,8 @@ static void device_create_release(struct device *dev)
1432 * Any further sysfs files that might be required can be created using this 1434 * Any further sysfs files that might be required can be created using this
1433 * pointer. 1435 * pointer.
1434 * 1436 *
1437 * Returns &struct device pointer on success, or ERR_PTR() on error.
1438 *
1435 * Note: the struct class passed to this function must have previously 1439 * Note: the struct class passed to this function must have previously
1436 * been created with a call to class_create(). 1440 * been created with a call to class_create().
1437 */ 1441 */
@@ -1492,6 +1496,8 @@ EXPORT_SYMBOL_GPL(device_create_vargs);
1492 * Any further sysfs files that might be required can be created using this 1496 * Any further sysfs files that might be required can be created using this
1493 * pointer. 1497 * pointer.
1494 * 1498 *
1499 * Returns &struct device pointer on success, or ERR_PTR() on error.
1500 *
1495 * Note: the struct class passed to this function must have previously 1501 * Note: the struct class passed to this function must have previously
1496 * been created with a call to class_create(). 1502 * been created with a call to class_create().
1497 */ 1503 */
diff --git a/drivers/base/cpu.c b/drivers/base/cpu.c
index 7036e8e96ab8..b5242e1e8bc4 100644
--- a/drivers/base/cpu.c
+++ b/drivers/base/cpu.c
@@ -79,24 +79,24 @@ void unregister_cpu(struct cpu *cpu)
79} 79}
80 80
81#ifdef CONFIG_ARCH_CPU_PROBE_RELEASE 81#ifdef CONFIG_ARCH_CPU_PROBE_RELEASE
82static ssize_t cpu_probe_store(struct sys_device *dev, 82static ssize_t cpu_probe_store(struct sysdev_class *class,
83 struct sysdev_attribute *attr, 83 struct sysdev_class_attribute *attr,
84 const char *buf, 84 const char *buf,
85 size_t count) 85 size_t count)
86{ 86{
87 return arch_cpu_probe(buf, count); 87 return arch_cpu_probe(buf, count);
88} 88}
89 89
90static ssize_t cpu_release_store(struct sys_device *dev, 90static ssize_t cpu_release_store(struct sysdev_class *class,
91 struct sysdev_attribute *attr, 91 struct sysdev_class_attribute *attr,
92 const char *buf, 92 const char *buf,
93 size_t count) 93 size_t count)
94{ 94{
95 return arch_cpu_release(buf, count); 95 return arch_cpu_release(buf, count);
96} 96}
97 97
98static SYSDEV_ATTR(probe, S_IWUSR, NULL, cpu_probe_store); 98static SYSDEV_CLASS_ATTR(probe, S_IWUSR, NULL, cpu_probe_store);
99static SYSDEV_ATTR(release, S_IWUSR, NULL, cpu_release_store); 99static SYSDEV_CLASS_ATTR(release, S_IWUSR, NULL, cpu_release_store);
100#endif /* CONFIG_ARCH_CPU_PROBE_RELEASE */ 100#endif /* CONFIG_ARCH_CPU_PROBE_RELEASE */
101 101
102#else /* ... !CONFIG_HOTPLUG_CPU */ 102#else /* ... !CONFIG_HOTPLUG_CPU */
diff --git a/drivers/base/firmware_class.c b/drivers/base/firmware_class.c
index d0dc26ad5387..18518ba13c81 100644
--- a/drivers/base/firmware_class.c
+++ b/drivers/base/firmware_class.c
@@ -78,6 +78,7 @@ firmware_timeout_show(struct class *class,
78/** 78/**
79 * firmware_timeout_store - set number of seconds to wait for firmware 79 * firmware_timeout_store - set number of seconds to wait for firmware
80 * @class: device class pointer 80 * @class: device class pointer
81 * @attr: device attribute pointer
81 * @buf: buffer to scan for timeout value 82 * @buf: buffer to scan for timeout value
82 * @count: number of bytes in @buf 83 * @count: number of bytes in @buf
83 * 84 *
@@ -442,6 +443,7 @@ static int fw_setup_device(struct firmware *fw, struct device **dev_p,
442 fw_priv = dev_get_drvdata(f_dev); 443 fw_priv = dev_get_drvdata(f_dev);
443 444
444 fw_priv->fw = fw; 445 fw_priv->fw = fw;
446 sysfs_bin_attr_init(&fw_priv->attr_data);
445 retval = sysfs_create_bin_file(&f_dev->kobj, &fw_priv->attr_data); 447 retval = sysfs_create_bin_file(&f_dev->kobj, &fw_priv->attr_data);
446 if (retval) { 448 if (retval) {
447 dev_err(device, "%s: sysfs_create_bin_file failed\n", __func__); 449 dev_err(device, "%s: sysfs_create_bin_file failed\n", __func__);
diff --git a/drivers/base/memory.c b/drivers/base/memory.c
index 2f8691511190..db0848e54cc6 100644
--- a/drivers/base/memory.c
+++ b/drivers/base/memory.c
@@ -429,12 +429,16 @@ static inline int memory_fail_init(void)
429 * differentiation between which *physical* devices each 429 * differentiation between which *physical* devices each
430 * section belongs to... 430 * section belongs to...
431 */ 431 */
432int __weak arch_get_memory_phys_device(unsigned long start_pfn)
433{
434 return 0;
435}
432 436
433static int add_memory_block(int nid, struct mem_section *section, 437static int add_memory_block(int nid, struct mem_section *section,
434 unsigned long state, int phys_device, 438 unsigned long state, enum mem_add_context context)
435 enum mem_add_context context)
436{ 439{
437 struct memory_block *mem = kzalloc(sizeof(*mem), GFP_KERNEL); 440 struct memory_block *mem = kzalloc(sizeof(*mem), GFP_KERNEL);
441 unsigned long start_pfn;
438 int ret = 0; 442 int ret = 0;
439 443
440 if (!mem) 444 if (!mem)
@@ -443,7 +447,8 @@ static int add_memory_block(int nid, struct mem_section *section,
443 mem->phys_index = __section_nr(section); 447 mem->phys_index = __section_nr(section);
444 mem->state = state; 448 mem->state = state;
445 mutex_init(&mem->state_mutex); 449 mutex_init(&mem->state_mutex);
446 mem->phys_device = phys_device; 450 start_pfn = section_nr_to_pfn(mem->phys_index);
451 mem->phys_device = arch_get_memory_phys_device(start_pfn);
447 452
448 ret = register_memory(mem, section); 453 ret = register_memory(mem, section);
449 if (!ret) 454 if (!ret)
@@ -515,7 +520,7 @@ int remove_memory_block(unsigned long node_id, struct mem_section *section,
515 */ 520 */
516int register_new_memory(int nid, struct mem_section *section) 521int register_new_memory(int nid, struct mem_section *section)
517{ 522{
518 return add_memory_block(nid, section, MEM_OFFLINE, 0, HOTPLUG); 523 return add_memory_block(nid, section, MEM_OFFLINE, HOTPLUG);
519} 524}
520 525
521int unregister_memory_section(struct mem_section *section) 526int unregister_memory_section(struct mem_section *section)
@@ -548,7 +553,7 @@ int __init memory_dev_init(void)
548 if (!present_section_nr(i)) 553 if (!present_section_nr(i))
549 continue; 554 continue;
550 err = add_memory_block(0, __nr_to_section(i), MEM_ONLINE, 555 err = add_memory_block(0, __nr_to_section(i), MEM_ONLINE,
551 0, BOOT); 556 BOOT);
552 if (!ret) 557 if (!ret)
553 ret = err; 558 ret = err;
554 } 559 }
diff --git a/drivers/base/node.c b/drivers/base/node.c
index ad43185ec15a..93b3ac65c2d4 100644
--- a/drivers/base/node.c
+++ b/drivers/base/node.c
@@ -165,8 +165,11 @@ static ssize_t node_read_distance(struct sys_device * dev,
165 int len = 0; 165 int len = 0;
166 int i; 166 int i;
167 167
168 /* buf currently PAGE_SIZE, need ~4 chars per node */ 168 /*
169 BUILD_BUG_ON(MAX_NUMNODES*4 > PAGE_SIZE/2); 169 * buf is currently PAGE_SIZE in length and each node needs 4 chars
170 * at the most (distance + space or newline).
171 */
172 BUILD_BUG_ON(MAX_NUMNODES * 4 > PAGE_SIZE);
170 173
171 for_each_online_node(i) 174 for_each_online_node(i)
172 len += sprintf(buf + len, "%s%d", i ? " " : "", node_distance(nid, i)); 175 len += sprintf(buf + len, "%s%d", i ? " " : "", node_distance(nid, i));
diff --git a/drivers/base/platform.c b/drivers/base/platform.c
index 1ba9d617d241..4b4b565c835f 100644
--- a/drivers/base/platform.c
+++ b/drivers/base/platform.c
@@ -362,6 +362,8 @@ EXPORT_SYMBOL_GPL(platform_device_unregister);
362 * enumeration tasks, they don't fully conform to the Linux driver model. 362 * enumeration tasks, they don't fully conform to the Linux driver model.
363 * In particular, when such drivers are built as modules, they can't be 363 * In particular, when such drivers are built as modules, they can't be
364 * "hotplugged". 364 * "hotplugged".
365 *
366 * Returns &struct platform_device pointer on success, or ERR_PTR() on error.
365 */ 367 */
366struct platform_device *platform_device_register_simple(const char *name, 368struct platform_device *platform_device_register_simple(const char *name,
367 int id, 369 int id,
@@ -408,6 +410,8 @@ EXPORT_SYMBOL_GPL(platform_device_register_simple);
408 * allocated for the device allows drivers using such devices to be 410 * allocated for the device allows drivers using such devices to be
409 * unloaded without waiting for the last reference to the device to be 411 * unloaded without waiting for the last reference to the device to be
410 * dropped. 412 * dropped.
413 *
414 * Returns &struct platform_device pointer on success, or ERR_PTR() on error.
411 */ 415 */
412struct platform_device *platform_device_register_data( 416struct platform_device *platform_device_register_data(
413 struct device *parent, 417 struct device *parent,
@@ -559,6 +563,8 @@ EXPORT_SYMBOL_GPL(platform_driver_probe);
559 * 563 *
560 * Use this in legacy-style modules that probe hardware directly and 564 * Use this in legacy-style modules that probe hardware directly and
561 * register a single platform device and corresponding platform driver. 565 * register a single platform device and corresponding platform driver.
566 *
567 * Returns &struct platform_device pointer on success, or ERR_PTR() on error.
562 */ 568 */
563struct platform_device * __init_or_module platform_create_bundle( 569struct platform_device * __init_or_module platform_create_bundle(
564 struct platform_driver *driver, 570 struct platform_driver *driver,
@@ -1052,9 +1058,11 @@ static __initdata LIST_HEAD(early_platform_driver_list);
1052static __initdata LIST_HEAD(early_platform_device_list); 1058static __initdata LIST_HEAD(early_platform_device_list);
1053 1059
1054/** 1060/**
1055 * early_platform_driver_register 1061 * early_platform_driver_register - register early platform driver
1056 * @epdrv: early_platform driver structure 1062 * @epdrv: early_platform driver structure
1057 * @buf: string passed from early_param() 1063 * @buf: string passed from early_param()
1064 *
1065 * Helper function for early_platform_init() / early_platform_init_buffer()
1058 */ 1066 */
1059int __init early_platform_driver_register(struct early_platform_driver *epdrv, 1067int __init early_platform_driver_register(struct early_platform_driver *epdrv,
1060 char *buf) 1068 char *buf)
@@ -1106,9 +1114,12 @@ int __init early_platform_driver_register(struct early_platform_driver *epdrv,
1106} 1114}
1107 1115
1108/** 1116/**
1109 * early_platform_add_devices - add a numbers of early platform devices 1117 * early_platform_add_devices - adds a number of early platform devices
1110 * @devs: array of early platform devices to add 1118 * @devs: array of early platform devices to add
1111 * @num: number of early platform devices in array 1119 * @num: number of early platform devices in array
1120 *
1121 * Used by early architecture code to register early platform devices and
1122 * their platform data.
1112 */ 1123 */
1113void __init early_platform_add_devices(struct platform_device **devs, int num) 1124void __init early_platform_add_devices(struct platform_device **devs, int num)
1114{ 1125{
@@ -1128,8 +1139,12 @@ void __init early_platform_add_devices(struct platform_device **devs, int num)
1128} 1139}
1129 1140
1130/** 1141/**
1131 * early_platform_driver_register_all 1142 * early_platform_driver_register_all - register early platform drivers
1132 * @class_str: string to identify early platform driver class 1143 * @class_str: string to identify early platform driver class
1144 *
1145 * Used by architecture code to register all early platform drivers
1146 * for a certain class. If omitted then only early platform drivers
1147 * with matching kernel command line class parameters will be registered.
1133 */ 1148 */
1134void __init early_platform_driver_register_all(char *class_str) 1149void __init early_platform_driver_register_all(char *class_str)
1135{ 1150{
@@ -1151,7 +1166,7 @@ void __init early_platform_driver_register_all(char *class_str)
1151} 1166}
1152 1167
1153/** 1168/**
1154 * early_platform_match 1169 * early_platform_match - find early platform device matching driver
1155 * @epdrv: early platform driver structure 1170 * @epdrv: early platform driver structure
1156 * @id: id to match against 1171 * @id: id to match against
1157 */ 1172 */
@@ -1169,7 +1184,7 @@ early_platform_match(struct early_platform_driver *epdrv, int id)
1169} 1184}
1170 1185
1171/** 1186/**
1172 * early_platform_left 1187 * early_platform_left - check if early platform driver has matching devices
1173 * @epdrv: early platform driver structure 1188 * @epdrv: early platform driver structure
1174 * @id: return true if id or above exists 1189 * @id: return true if id or above exists
1175 */ 1190 */
@@ -1187,7 +1202,7 @@ static __init int early_platform_left(struct early_platform_driver *epdrv,
1187} 1202}
1188 1203
1189/** 1204/**
1190 * early_platform_driver_probe_id 1205 * early_platform_driver_probe_id - probe drivers matching class_str and id
1191 * @class_str: string to identify early platform driver class 1206 * @class_str: string to identify early platform driver class
1192 * @id: id to match against 1207 * @id: id to match against
1193 * @nr_probe: number of platform devices to successfully probe before exiting 1208 * @nr_probe: number of platform devices to successfully probe before exiting
@@ -1257,10 +1272,14 @@ static int __init early_platform_driver_probe_id(char *class_str,
1257} 1272}
1258 1273
1259/** 1274/**
1260 * early_platform_driver_probe 1275 * early_platform_driver_probe - probe a class of registered drivers
1261 * @class_str: string to identify early platform driver class 1276 * @class_str: string to identify early platform driver class
1262 * @nr_probe: number of platform devices to successfully probe before exiting 1277 * @nr_probe: number of platform devices to successfully probe before exiting
1263 * @user_only: only probe user specified early platform devices 1278 * @user_only: only probe user specified early platform devices
1279 *
1280 * Used by architecture code to probe registered early platform drivers
1281 * within a certain class. For probe to happen a registered early platform
1282 * device matching a registered early platform driver is needed.
1264 */ 1283 */
1265int __init early_platform_driver_probe(char *class_str, 1284int __init early_platform_driver_probe(char *class_str,
1266 int nr_probe, 1285 int nr_probe,
diff --git a/drivers/char/hpet.c b/drivers/char/hpet.c
index e481c5938bad..9c5eea3ea4de 100644
--- a/drivers/char/hpet.c
+++ b/drivers/char/hpet.c
@@ -215,9 +215,7 @@ static void hpet_timer_set_irq(struct hpet_dev *devp)
215 else 215 else
216 v &= ~0xffff; 216 v &= ~0xffff;
217 217
218 for (irq = find_first_bit(&v, HPET_MAX_IRQ); irq < HPET_MAX_IRQ; 218 for_each_set_bit(irq, &v, HPET_MAX_IRQ) {
219 irq = find_next_bit(&v, HPET_MAX_IRQ, 1 + irq)) {
220
221 if (irq >= nr_irqs) { 219 if (irq >= nr_irqs) {
222 irq = HPET_MAX_IRQ; 220 irq = HPET_MAX_IRQ;
223 break; 221 break;
diff --git a/drivers/char/hvc_console.c b/drivers/char/hvc_console.c
index 465185fc0f52..ba55bba151b9 100644
--- a/drivers/char/hvc_console.c
+++ b/drivers/char/hvc_console.c
@@ -312,6 +312,7 @@ static int hvc_open(struct tty_struct *tty, struct file * filp)
312 spin_lock_irqsave(&hp->lock, flags); 312 spin_lock_irqsave(&hp->lock, flags);
313 /* Check and then increment for fast path open. */ 313 /* Check and then increment for fast path open. */
314 if (hp->count++ > 0) { 314 if (hp->count++ > 0) {
315 tty_kref_get(tty);
315 spin_unlock_irqrestore(&hp->lock, flags); 316 spin_unlock_irqrestore(&hp->lock, flags);
316 hvc_kick(); 317 hvc_kick();
317 return 0; 318 return 0;
@@ -319,7 +320,7 @@ static int hvc_open(struct tty_struct *tty, struct file * filp)
319 320
320 tty->driver_data = hp; 321 tty->driver_data = hp;
321 322
322 hp->tty = tty; 323 hp->tty = tty_kref_get(tty);
323 324
324 spin_unlock_irqrestore(&hp->lock, flags); 325 spin_unlock_irqrestore(&hp->lock, flags);
325 326
@@ -336,6 +337,7 @@ static int hvc_open(struct tty_struct *tty, struct file * filp)
336 spin_lock_irqsave(&hp->lock, flags); 337 spin_lock_irqsave(&hp->lock, flags);
337 hp->tty = NULL; 338 hp->tty = NULL;
338 spin_unlock_irqrestore(&hp->lock, flags); 339 spin_unlock_irqrestore(&hp->lock, flags);
340 tty_kref_put(tty);
339 tty->driver_data = NULL; 341 tty->driver_data = NULL;
340 kref_put(&hp->kref, destroy_hvc_struct); 342 kref_put(&hp->kref, destroy_hvc_struct);
341 printk(KERN_ERR "hvc_open: request_irq failed with rc %d.\n", rc); 343 printk(KERN_ERR "hvc_open: request_irq failed with rc %d.\n", rc);
@@ -363,13 +365,18 @@ static void hvc_close(struct tty_struct *tty, struct file * filp)
363 return; 365 return;
364 366
365 hp = tty->driver_data; 367 hp = tty->driver_data;
368
366 spin_lock_irqsave(&hp->lock, flags); 369 spin_lock_irqsave(&hp->lock, flags);
370 tty_kref_get(tty);
367 371
368 if (--hp->count == 0) { 372 if (--hp->count == 0) {
369 /* We are done with the tty pointer now. */ 373 /* We are done with the tty pointer now. */
370 hp->tty = NULL; 374 hp->tty = NULL;
371 spin_unlock_irqrestore(&hp->lock, flags); 375 spin_unlock_irqrestore(&hp->lock, flags);
372 376
377 /* Put the ref obtained in hvc_open() */
378 tty_kref_put(tty);
379
373 if (hp->ops->notifier_del) 380 if (hp->ops->notifier_del)
374 hp->ops->notifier_del(hp, hp->data); 381 hp->ops->notifier_del(hp, hp->data);
375 382
@@ -389,6 +396,7 @@ static void hvc_close(struct tty_struct *tty, struct file * filp)
389 spin_unlock_irqrestore(&hp->lock, flags); 396 spin_unlock_irqrestore(&hp->lock, flags);
390 } 397 }
391 398
399 tty_kref_put(tty);
392 kref_put(&hp->kref, destroy_hvc_struct); 400 kref_put(&hp->kref, destroy_hvc_struct);
393} 401}
394 402
@@ -424,10 +432,11 @@ static void hvc_hangup(struct tty_struct *tty)
424 spin_unlock_irqrestore(&hp->lock, flags); 432 spin_unlock_irqrestore(&hp->lock, flags);
425 433
426 if (hp->ops->notifier_hangup) 434 if (hp->ops->notifier_hangup)
427 hp->ops->notifier_hangup(hp, hp->data); 435 hp->ops->notifier_hangup(hp, hp->data);
428 436
429 while(temp_open_count) { 437 while(temp_open_count) {
430 --temp_open_count; 438 --temp_open_count;
439 tty_kref_put(tty);
431 kref_put(&hp->kref, destroy_hvc_struct); 440 kref_put(&hp->kref, destroy_hvc_struct);
432 } 441 }
433} 442}
@@ -592,7 +601,7 @@ int hvc_poll(struct hvc_struct *hp)
592 } 601 }
593 602
594 /* No tty attached, just skip */ 603 /* No tty attached, just skip */
595 tty = hp->tty; 604 tty = tty_kref_get(hp->tty);
596 if (tty == NULL) 605 if (tty == NULL)
597 goto bail; 606 goto bail;
598 607
@@ -672,6 +681,8 @@ int hvc_poll(struct hvc_struct *hp)
672 681
673 tty_flip_buffer_push(tty); 682 tty_flip_buffer_push(tty);
674 } 683 }
684 if (tty)
685 tty_kref_put(tty);
675 686
676 return poll_mask; 687 return poll_mask;
677} 688}
@@ -807,7 +818,7 @@ int hvc_remove(struct hvc_struct *hp)
807 struct tty_struct *tty; 818 struct tty_struct *tty;
808 819
809 spin_lock_irqsave(&hp->lock, flags); 820 spin_lock_irqsave(&hp->lock, flags);
810 tty = hp->tty; 821 tty = tty_kref_get(hp->tty);
811 822
812 if (hp->index < MAX_NR_HVC_CONSOLES) 823 if (hp->index < MAX_NR_HVC_CONSOLES)
813 vtermnos[hp->index] = -1; 824 vtermnos[hp->index] = -1;
@@ -819,18 +830,18 @@ int hvc_remove(struct hvc_struct *hp)
819 /* 830 /*
820 * We 'put' the instance that was grabbed when the kref instance 831 * We 'put' the instance that was grabbed when the kref instance
821 * was initialized using kref_init(). Let the last holder of this 832 * was initialized using kref_init(). Let the last holder of this
822 * kref cause it to be removed, which will probably be the tty_hangup 833 * kref cause it to be removed, which will probably be the tty_vhangup
823 * below. 834 * below.
824 */ 835 */
825 kref_put(&hp->kref, destroy_hvc_struct); 836 kref_put(&hp->kref, destroy_hvc_struct);
826 837
827 /* 838 /*
828 * This function call will auto chain call hvc_hangup. The tty should 839 * This function call will auto chain call hvc_hangup.
829 * always be valid at this time unless a simultaneous tty close already
830 * cleaned up the hvc_struct.
831 */ 840 */
832 if (tty) 841 if (tty) {
833 tty_hangup(tty); 842 tty_vhangup(tty);
843 tty_kref_put(tty);
844 }
834 return 0; 845 return 0;
835} 846}
836EXPORT_SYMBOL_GPL(hvc_remove); 847EXPORT_SYMBOL_GPL(hvc_remove);
diff --git a/drivers/char/ipmi/ipmi_msghandler.c b/drivers/char/ipmi/ipmi_msghandler.c
index ec5e3f8df648..c6ad4234378d 100644
--- a/drivers/char/ipmi/ipmi_msghandler.c
+++ b/drivers/char/ipmi/ipmi_msghandler.c
@@ -2272,42 +2272,52 @@ static int create_files(struct bmc_device *bmc)
2272 bmc->device_id_attr.attr.name = "device_id"; 2272 bmc->device_id_attr.attr.name = "device_id";
2273 bmc->device_id_attr.attr.mode = S_IRUGO; 2273 bmc->device_id_attr.attr.mode = S_IRUGO;
2274 bmc->device_id_attr.show = device_id_show; 2274 bmc->device_id_attr.show = device_id_show;
2275 sysfs_attr_init(&bmc->device_id_attr.attr);
2275 2276
2276 bmc->provides_dev_sdrs_attr.attr.name = "provides_device_sdrs"; 2277 bmc->provides_dev_sdrs_attr.attr.name = "provides_device_sdrs";
2277 bmc->provides_dev_sdrs_attr.attr.mode = S_IRUGO; 2278 bmc->provides_dev_sdrs_attr.attr.mode = S_IRUGO;
2278 bmc->provides_dev_sdrs_attr.show = provides_dev_sdrs_show; 2279 bmc->provides_dev_sdrs_attr.show = provides_dev_sdrs_show;
2280 sysfs_attr_init(&bmc->provides_dev_sdrs_attr.attr);
2279 2281
2280 bmc->revision_attr.attr.name = "revision"; 2282 bmc->revision_attr.attr.name = "revision";
2281 bmc->revision_attr.attr.mode = S_IRUGO; 2283 bmc->revision_attr.attr.mode = S_IRUGO;
2282 bmc->revision_attr.show = revision_show; 2284 bmc->revision_attr.show = revision_show;
2285 sysfs_attr_init(&bmc->revision_attr.attr);
2283 2286
2284 bmc->firmware_rev_attr.attr.name = "firmware_revision"; 2287 bmc->firmware_rev_attr.attr.name = "firmware_revision";
2285 bmc->firmware_rev_attr.attr.mode = S_IRUGO; 2288 bmc->firmware_rev_attr.attr.mode = S_IRUGO;
2286 bmc->firmware_rev_attr.show = firmware_rev_show; 2289 bmc->firmware_rev_attr.show = firmware_rev_show;
2290 sysfs_attr_init(&bmc->firmware_rev_attr.attr);
2287 2291
2288 bmc->version_attr.attr.name = "ipmi_version"; 2292 bmc->version_attr.attr.name = "ipmi_version";
2289 bmc->version_attr.attr.mode = S_IRUGO; 2293 bmc->version_attr.attr.mode = S_IRUGO;
2290 bmc->version_attr.show = ipmi_version_show; 2294 bmc->version_attr.show = ipmi_version_show;
2295 sysfs_attr_init(&bmc->version_attr.attr);
2291 2296
2292 bmc->add_dev_support_attr.attr.name = "additional_device_support"; 2297 bmc->add_dev_support_attr.attr.name = "additional_device_support";
2293 bmc->add_dev_support_attr.attr.mode = S_IRUGO; 2298 bmc->add_dev_support_attr.attr.mode = S_IRUGO;
2294 bmc->add_dev_support_attr.show = add_dev_support_show; 2299 bmc->add_dev_support_attr.show = add_dev_support_show;
2300 sysfs_attr_init(&bmc->add_dev_support_attr.attr);
2295 2301
2296 bmc->manufacturer_id_attr.attr.name = "manufacturer_id"; 2302 bmc->manufacturer_id_attr.attr.name = "manufacturer_id";
2297 bmc->manufacturer_id_attr.attr.mode = S_IRUGO; 2303 bmc->manufacturer_id_attr.attr.mode = S_IRUGO;
2298 bmc->manufacturer_id_attr.show = manufacturer_id_show; 2304 bmc->manufacturer_id_attr.show = manufacturer_id_show;
2305 sysfs_attr_init(&bmc->manufacturer_id_attr.attr);
2299 2306
2300 bmc->product_id_attr.attr.name = "product_id"; 2307 bmc->product_id_attr.attr.name = "product_id";
2301 bmc->product_id_attr.attr.mode = S_IRUGO; 2308 bmc->product_id_attr.attr.mode = S_IRUGO;
2302 bmc->product_id_attr.show = product_id_show; 2309 bmc->product_id_attr.show = product_id_show;
2310 sysfs_attr_init(&bmc->product_id_attr.attr);
2303 2311
2304 bmc->guid_attr.attr.name = "guid"; 2312 bmc->guid_attr.attr.name = "guid";
2305 bmc->guid_attr.attr.mode = S_IRUGO; 2313 bmc->guid_attr.attr.mode = S_IRUGO;
2306 bmc->guid_attr.show = guid_show; 2314 bmc->guid_attr.show = guid_show;
2315 sysfs_attr_init(&bmc->guid_attr.attr);
2307 2316
2308 bmc->aux_firmware_rev_attr.attr.name = "aux_firmware_revision"; 2317 bmc->aux_firmware_rev_attr.attr.name = "aux_firmware_revision";
2309 bmc->aux_firmware_rev_attr.attr.mode = S_IRUGO; 2318 bmc->aux_firmware_rev_attr.attr.mode = S_IRUGO;
2310 bmc->aux_firmware_rev_attr.show = aux_firmware_rev_show; 2319 bmc->aux_firmware_rev_attr.show = aux_firmware_rev_show;
2320 sysfs_attr_init(&bmc->aux_firmware_rev_attr.attr);
2311 2321
2312 err = device_create_file(&bmc->dev->dev, 2322 err = device_create_file(&bmc->dev->dev,
2313 &bmc->device_id_attr); 2323 &bmc->device_id_attr);
diff --git a/drivers/char/tty_buffer.c b/drivers/char/tty_buffer.c
index af8d97715728..7ee52164d474 100644
--- a/drivers/char/tty_buffer.c
+++ b/drivers/char/tty_buffer.c
@@ -248,7 +248,7 @@ int tty_insert_flip_string_fixed_flag(struct tty_struct *tty,
248{ 248{
249 int copied = 0; 249 int copied = 0;
250 do { 250 do {
251 int goal = min(size - copied, TTY_BUFFER_PAGE); 251 int goal = min_t(size_t, size - copied, TTY_BUFFER_PAGE);
252 int space = tty_buffer_request_room(tty, goal); 252 int space = tty_buffer_request_room(tty, goal);
253 struct tty_buffer *tb = tty->buf.tail; 253 struct tty_buffer *tb = tty->buf.tail;
254 /* If there is no space then tb may be NULL */ 254 /* If there is no space then tb may be NULL */
@@ -285,7 +285,7 @@ int tty_insert_flip_string_flags(struct tty_struct *tty,
285{ 285{
286 int copied = 0; 286 int copied = 0;
287 do { 287 do {
288 int goal = min(size - copied, TTY_BUFFER_PAGE); 288 int goal = min_t(size_t, size - copied, TTY_BUFFER_PAGE);
289 int space = tty_buffer_request_room(tty, goal); 289 int space = tty_buffer_request_room(tty, goal);
290 struct tty_buffer *tb = tty->buf.tail; 290 struct tty_buffer *tb = tty->buf.tail;
291 /* If there is no space then tb may be NULL */ 291 /* If there is no space then tb may be NULL */
diff --git a/drivers/char/tty_port.c b/drivers/char/tty_port.c
index be492dd66437..a3bd1d0b66cf 100644
--- a/drivers/char/tty_port.c
+++ b/drivers/char/tty_port.c
@@ -119,7 +119,7 @@ EXPORT_SYMBOL(tty_port_tty_set);
119static void tty_port_shutdown(struct tty_port *port) 119static void tty_port_shutdown(struct tty_port *port)
120{ 120{
121 mutex_lock(&port->mutex); 121 mutex_lock(&port->mutex);
122 if (port->ops->shutdown && 122 if (port->ops->shutdown && !port->console &&
123 test_and_clear_bit(ASYNCB_INITIALIZED, &port->flags)) 123 test_and_clear_bit(ASYNCB_INITIALIZED, &port->flags))
124 port->ops->shutdown(port); 124 port->ops->shutdown(port);
125 mutex_unlock(&port->mutex); 125 mutex_unlock(&port->mutex);
diff --git a/drivers/char/virtio_console.c b/drivers/char/virtio_console.c
index f404ccfc9c20..44288ce0cb45 100644
--- a/drivers/char/virtio_console.c
+++ b/drivers/char/virtio_console.c
@@ -681,6 +681,10 @@ static void resize_console(struct port *port)
681 struct virtio_device *vdev; 681 struct virtio_device *vdev;
682 struct winsize ws; 682 struct winsize ws;
683 683
684 /* The port could have been hot-unplugged */
685 if (!port)
686 return;
687
684 vdev = port->portdev->vdev; 688 vdev = port->portdev->vdev;
685 if (virtio_has_feature(vdev, VIRTIO_CONSOLE_F_SIZE)) { 689 if (virtio_has_feature(vdev, VIRTIO_CONSOLE_F_SIZE)) {
686 vdev->config->get(vdev, 690 vdev->config->get(vdev,
@@ -947,11 +951,18 @@ static void handle_control_message(struct ports_device *portdev,
947 */ 951 */
948 err = sysfs_create_group(&port->dev->kobj, 952 err = sysfs_create_group(&port->dev->kobj,
949 &port_attribute_group); 953 &port_attribute_group);
950 if (err) 954 if (err) {
951 dev_err(port->dev, 955 dev_err(port->dev,
952 "Error %d creating sysfs device attributes\n", 956 "Error %d creating sysfs device attributes\n",
953 err); 957 err);
954 958 } else {
959 /*
960 * Generate a udev event so that appropriate
961 * symlinks can be created based on udev
962 * rules.
963 */
964 kobject_uevent(&port->dev->kobj, KOBJ_CHANGE);
965 }
955 break; 966 break;
956 case VIRTIO_CONSOLE_PORT_REMOVE: 967 case VIRTIO_CONSOLE_PORT_REMOVE:
957 /* 968 /*
diff --git a/drivers/char/vt_ioctl.c b/drivers/char/vt_ioctl.c
index 87778dcf8727..6aa10284104a 100644
--- a/drivers/char/vt_ioctl.c
+++ b/drivers/char/vt_ioctl.c
@@ -888,7 +888,7 @@ int vt_ioctl(struct tty_struct *tty, struct file * file,
888 ret = -EFAULT; 888 ret = -EFAULT;
889 goto out; 889 goto out;
890 } 890 }
891 if (tmp.mode != VT_AUTO && tmp.mode != VT_PROCESS && tmp.mode != VT_PROCESS_AUTO) { 891 if (tmp.mode != VT_AUTO && tmp.mode != VT_PROCESS) {
892 ret = -EINVAL; 892 ret = -EINVAL;
893 goto out; 893 goto out;
894 } 894 }
@@ -1622,7 +1622,7 @@ static void complete_change_console(struct vc_data *vc)
1622 * telling it that it has acquired. Also check if it has died and 1622 * telling it that it has acquired. Also check if it has died and
1623 * clean up (similar to logic employed in change_console()) 1623 * clean up (similar to logic employed in change_console())
1624 */ 1624 */
1625 if (vc->vt_mode.mode == VT_PROCESS || vc->vt_mode.mode == VT_PROCESS_AUTO) { 1625 if (vc->vt_mode.mode == VT_PROCESS) {
1626 /* 1626 /*
1627 * Send the signal as privileged - kill_pid() will 1627 * Send the signal as privileged - kill_pid() will
1628 * tell us if the process has gone or something else 1628 * tell us if the process has gone or something else
@@ -1682,7 +1682,7 @@ void change_console(struct vc_data *new_vc)
1682 * vt to auto control. 1682 * vt to auto control.
1683 */ 1683 */
1684 vc = vc_cons[fg_console].d; 1684 vc = vc_cons[fg_console].d;
1685 if (vc->vt_mode.mode == VT_PROCESS || vc->vt_mode.mode == VT_PROCESS_AUTO) { 1685 if (vc->vt_mode.mode == VT_PROCESS) {
1686 /* 1686 /*
1687 * Send the signal as privileged - kill_pid() will 1687 * Send the signal as privileged - kill_pid() will
1688 * tell us if the process has gone or something else 1688 * tell us if the process has gone or something else
@@ -1693,28 +1693,27 @@ void change_console(struct vc_data *new_vc)
1693 */ 1693 */
1694 vc->vt_newvt = new_vc->vc_num; 1694 vc->vt_newvt = new_vc->vc_num;
1695 if (kill_pid(vc->vt_pid, vc->vt_mode.relsig, 1) == 0) { 1695 if (kill_pid(vc->vt_pid, vc->vt_mode.relsig, 1) == 0) {
1696 if(vc->vt_mode.mode == VT_PROCESS)
1697 /*
1698 * It worked. Mark the vt to switch to and
1699 * return. The process needs to send us a
1700 * VT_RELDISP ioctl to complete the switch.
1701 */
1702 return;
1703 } else {
1704 /* 1696 /*
1705 * The controlling process has died, so we revert back to 1697 * It worked. Mark the vt to switch to and
1706 * normal operation. In this case, we'll also change back 1698 * return. The process needs to send us a
1707 * to KD_TEXT mode. I'm not sure if this is strictly correct 1699 * VT_RELDISP ioctl to complete the switch.
1708 * but it saves the agony when the X server dies and the screen
1709 * remains blanked due to KD_GRAPHICS! It would be nice to do
1710 * this outside of VT_PROCESS but there is no single process
1711 * to account for and tracking tty count may be undesirable.
1712 */ 1700 */
1713 reset_vc(vc); 1701 return;
1714 } 1702 }
1715 1703
1716 /* 1704 /*
1717 * Fall through to normal (VT_AUTO and VT_PROCESS_AUTO) handling of the switch... 1705 * The controlling process has died, so we revert back to
1706 * normal operation. In this case, we'll also change back
1707 * to KD_TEXT mode. I'm not sure if this is strictly correct
1708 * but it saves the agony when the X server dies and the screen
1709 * remains blanked due to KD_GRAPHICS! It would be nice to do
1710 * this outside of VT_PROCESS but there is no single process
1711 * to account for and tracking tty count may be undesirable.
1712 */
1713 reset_vc(vc);
1714
1715 /*
1716 * Fall through to normal (VT_AUTO) handling of the switch...
1718 */ 1717 */
1719 } 1718 }
1720 1719
diff --git a/drivers/edac/edac_mce_amd.c b/drivers/edac/edac_mce_amd.c
index 8fc91a019620..f5b6d9fe4def 100644
--- a/drivers/edac/edac_mce_amd.c
+++ b/drivers/edac/edac_mce_amd.c
@@ -316,7 +316,12 @@ void amd_decode_nb_mce(int node_id, struct err_regs *regs, int handle_errors)
316 if (regs->nbsh & K8_NBSH_ERR_CPU_VAL) 316 if (regs->nbsh & K8_NBSH_ERR_CPU_VAL)
317 pr_cont(", core: %u\n", (u8)(regs->nbsh & 0xf)); 317 pr_cont(", core: %u\n", (u8)(regs->nbsh & 0xf));
318 } else { 318 } else {
319 pr_cont(", core: %d\n", fls((regs->nbsh & 0xf) - 1)); 319 u8 assoc_cpus = regs->nbsh & 0xf;
320
321 if (assoc_cpus > 0)
322 pr_cont(", core: %d", fls(assoc_cpus) - 1);
323
324 pr_cont("\n");
320 } 325 }
321 326
322 pr_emerg("%s.\n", EXT_ERR_MSG(xec)); 327 pr_emerg("%s.\n", EXT_ERR_MSG(xec));
diff --git a/drivers/gpu/drm/nouveau/nouveau_backlight.c b/drivers/gpu/drm/nouveau/nouveau_backlight.c
index 20564f8cb0ec..406228f4a2a0 100644
--- a/drivers/gpu/drm/nouveau/nouveau_backlight.c
+++ b/drivers/gpu/drm/nouveau/nouveau_backlight.c
@@ -89,19 +89,21 @@ static struct backlight_ops nv50_bl_ops = {
89 89
90static int nouveau_nv40_backlight_init(struct drm_device *dev) 90static int nouveau_nv40_backlight_init(struct drm_device *dev)
91{ 91{
92 struct backlight_properties props;
92 struct drm_nouveau_private *dev_priv = dev->dev_private; 93 struct drm_nouveau_private *dev_priv = dev->dev_private;
93 struct backlight_device *bd; 94 struct backlight_device *bd;
94 95
95 if (!(nv_rd32(dev, NV40_PMC_BACKLIGHT) & NV40_PMC_BACKLIGHT_MASK)) 96 if (!(nv_rd32(dev, NV40_PMC_BACKLIGHT) & NV40_PMC_BACKLIGHT_MASK))
96 return 0; 97 return 0;
97 98
99 memset(&props, 0, sizeof(struct backlight_properties));
100 props.max_brightness = 31;
98 bd = backlight_device_register("nv_backlight", &dev->pdev->dev, dev, 101 bd = backlight_device_register("nv_backlight", &dev->pdev->dev, dev,
99 &nv40_bl_ops); 102 &nv40_bl_ops, &props);
100 if (IS_ERR(bd)) 103 if (IS_ERR(bd))
101 return PTR_ERR(bd); 104 return PTR_ERR(bd);
102 105
103 dev_priv->backlight = bd; 106 dev_priv->backlight = bd;
104 bd->props.max_brightness = 31;
105 bd->props.brightness = nv40_get_intensity(bd); 107 bd->props.brightness = nv40_get_intensity(bd);
106 backlight_update_status(bd); 108 backlight_update_status(bd);
107 109
@@ -110,19 +112,21 @@ static int nouveau_nv40_backlight_init(struct drm_device *dev)
110 112
111static int nouveau_nv50_backlight_init(struct drm_device *dev) 113static int nouveau_nv50_backlight_init(struct drm_device *dev)
112{ 114{
115 struct backlight_properties props;
113 struct drm_nouveau_private *dev_priv = dev->dev_private; 116 struct drm_nouveau_private *dev_priv = dev->dev_private;
114 struct backlight_device *bd; 117 struct backlight_device *bd;
115 118
116 if (!nv_rd32(dev, NV50_PDISPLAY_SOR_BACKLIGHT)) 119 if (!nv_rd32(dev, NV50_PDISPLAY_SOR_BACKLIGHT))
117 return 0; 120 return 0;
118 121
122 memset(&props, 0, sizeof(struct backlight_properties));
123 props.max_brightness = 1025;
119 bd = backlight_device_register("nv_backlight", &dev->pdev->dev, dev, 124 bd = backlight_device_register("nv_backlight", &dev->pdev->dev, dev,
120 &nv50_bl_ops); 125 &nv50_bl_ops, &props);
121 if (IS_ERR(bd)) 126 if (IS_ERR(bd))
122 return PTR_ERR(bd); 127 return PTR_ERR(bd);
123 128
124 dev_priv->backlight = bd; 129 dev_priv->backlight = bd;
125 bd->props.max_brightness = 1025;
126 bd->props.brightness = nv50_get_intensity(bd); 130 bd->props.brightness = nv50_get_intensity(bd);
127 backlight_update_status(bd); 131 backlight_update_status(bd);
128 return 0; 132 return 0;
diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c
index 368fbb0c4ca6..2e2aa759d230 100644
--- a/drivers/hid/hid-core.c
+++ b/drivers/hid/hid-core.c
@@ -1357,6 +1357,7 @@ static const struct hid_device_id hid_blacklist[] = {
1357 { HID_USB_DEVICE(USB_VENDOR_ID_THRUSTMASTER, 0xb323) }, 1357 { HID_USB_DEVICE(USB_VENDOR_ID_THRUSTMASTER, 0xb323) },
1358 { HID_USB_DEVICE(USB_VENDOR_ID_THRUSTMASTER, 0xb324) }, 1358 { HID_USB_DEVICE(USB_VENDOR_ID_THRUSTMASTER, 0xb324) },
1359 { HID_USB_DEVICE(USB_VENDOR_ID_THRUSTMASTER, 0xb651) }, 1359 { HID_USB_DEVICE(USB_VENDOR_ID_THRUSTMASTER, 0xb651) },
1360 { HID_USB_DEVICE(USB_VENDOR_ID_THRUSTMASTER, 0xb653) },
1360 { HID_USB_DEVICE(USB_VENDOR_ID_THRUSTMASTER, 0xb654) }, 1361 { HID_USB_DEVICE(USB_VENDOR_ID_THRUSTMASTER, 0xb654) },
1361 { HID_USB_DEVICE(USB_VENDOR_ID_TOPSEED, USB_DEVICE_ID_TOPSEED_CYBERLINK) }, 1362 { HID_USB_DEVICE(USB_VENDOR_ID_TOPSEED, USB_DEVICE_ID_TOPSEED_CYBERLINK) },
1362 { HID_USB_DEVICE(USB_VENDOR_ID_TWINHAN, USB_DEVICE_ID_TWINHAN_IR_REMOTE) }, 1363 { HID_USB_DEVICE(USB_VENDOR_ID_TWINHAN, USB_DEVICE_ID_TWINHAN_IR_REMOTE) },
diff --git a/drivers/hid/hid-debug.c b/drivers/hid/hid-debug.c
index cd4ece6fdfb9..0c4e75573186 100644
--- a/drivers/hid/hid-debug.c
+++ b/drivers/hid/hid-debug.c
@@ -564,10 +564,10 @@ void hid_debug_event(struct hid_device *hdev, char *buf)
564 struct hid_debug_list *list; 564 struct hid_debug_list *list;
565 565
566 list_for_each_entry(list, &hdev->debug_list, node) { 566 list_for_each_entry(list, &hdev->debug_list, node) {
567 for (i = 0; i <= strlen(buf); i++) 567 for (i = 0; i < strlen(buf); i++)
568 list->hid_debug_buf[(list->tail + i) % (HID_DEBUG_BUFSIZE - 1)] = 568 list->hid_debug_buf[(list->tail + i) % HID_DEBUG_BUFSIZE] =
569 buf[i]; 569 buf[i];
570 list->tail = (list->tail + i) % (HID_DEBUG_BUFSIZE - 1); 570 list->tail = (list->tail + i) % HID_DEBUG_BUFSIZE;
571 } 571 }
572} 572}
573EXPORT_SYMBOL_GPL(hid_debug_event); 573EXPORT_SYMBOL_GPL(hid_debug_event);
diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h
index 72c05f90553c..797e06470356 100644
--- a/drivers/hid/hid-ids.h
+++ b/drivers/hid/hid-ids.h
@@ -445,6 +445,7 @@
445 445
446#define USB_VENDOR_ID_UCLOGIC 0x5543 446#define USB_VENDOR_ID_UCLOGIC 0x5543
447#define USB_DEVICE_ID_UCLOGIC_TABLET_PF1209 0x0042 447#define USB_DEVICE_ID_UCLOGIC_TABLET_PF1209 0x0042
448#define USB_DEVICE_ID_UCLOGIC_TABLET_WP4030U 0x0003
448 449
449#define USB_VENDOR_ID_VERNIER 0x08f7 450#define USB_VENDOR_ID_VERNIER 0x08f7
450#define USB_DEVICE_ID_VERNIER_LABPRO 0x0001 451#define USB_DEVICE_ID_VERNIER_LABPRO 0x0001
diff --git a/drivers/hid/hid-magicmouse.c b/drivers/hid/hid-magicmouse.c
index 4a3a94f2b10c..c174b64c3810 100644
--- a/drivers/hid/hid-magicmouse.c
+++ b/drivers/hid/hid-magicmouse.c
@@ -353,7 +353,7 @@ static int magicmouse_probe(struct hid_device *hdev,
353 goto err_free; 353 goto err_free;
354 } 354 }
355 355
356 ret = hid_hw_start(hdev, HID_CONNECT_DEFAULT); 356 ret = hid_hw_start(hdev, HID_CONNECT_DEFAULT & ~HID_CONNECT_HIDINPUT);
357 if (ret) { 357 if (ret) {
358 dev_err(&hdev->dev, "magicmouse hw start failed\n"); 358 dev_err(&hdev->dev, "magicmouse hw start failed\n");
359 goto err_free; 359 goto err_free;
@@ -409,8 +409,11 @@ err_free:
409 409
410static void magicmouse_remove(struct hid_device *hdev) 410static void magicmouse_remove(struct hid_device *hdev)
411{ 411{
412 struct magicmouse_sc *msc = hid_get_drvdata(hdev);
413
412 hid_hw_stop(hdev); 414 hid_hw_stop(hdev);
413 kfree(hid_get_drvdata(hdev)); 415 input_unregister_device(msc->input);
416 kfree(msc);
414} 417}
415 418
416static const struct hid_device_id magic_mice[] = { 419static const struct hid_device_id magic_mice[] = {
diff --git a/drivers/hid/hid-ntrig.c b/drivers/hid/hid-ntrig.c
index 3234c729a895..edcc0c4247bb 100644
--- a/drivers/hid/hid-ntrig.c
+++ b/drivers/hid/hid-ntrig.c
@@ -140,6 +140,9 @@ static int ntrig_event (struct hid_device *hid, struct hid_field *field,
140 nd->reading_mt = 1; 140 nd->reading_mt = 1;
141 nd->first_contact_confidence = 0; 141 nd->first_contact_confidence = 0;
142 break; 142 break;
143 case HID_DG_TIPSWITCH:
144 /* Prevent emission of touch until validated */
145 return 1;
143 case HID_DG_CONFIDENCE: 146 case HID_DG_CONFIDENCE:
144 nd->confidence = value; 147 nd->confidence = value;
145 break; 148 break;
@@ -259,6 +262,7 @@ static int ntrig_event (struct hid_device *hid, struct hid_field *field,
259 BTN_TOOL_TRIPLETAP, 0); 262 BTN_TOOL_TRIPLETAP, 0);
260 input_report_key(input, 263 input_report_key(input,
261 BTN_TOOL_QUADTAP, 0); 264 BTN_TOOL_QUADTAP, 0);
265 input_report_key(input, BTN_TOUCH, 0);
262 } 266 }
263 break; 267 break;
264 268
@@ -308,13 +312,20 @@ static int ntrig_probe(struct hid_device *hdev, const struct hid_device_id *id)
308 312
309 313
310 list_for_each_entry(hidinput, &hdev->inputs, list) { 314 list_for_each_entry(hidinput, &hdev->inputs, list) {
315 if (hidinput->report->maxfield < 1)
316 continue;
317
311 input = hidinput->input; 318 input = hidinput->input;
312 switch (hidinput->report->field[0]->application) { 319 switch (hidinput->report->field[0]->application) {
313 case HID_DG_PEN: 320 case HID_DG_PEN:
314 input->name = "N-Trig Pen"; 321 input->name = "N-Trig Pen";
315 break; 322 break;
316 case HID_DG_TOUCHSCREEN: 323 case HID_DG_TOUCHSCREEN:
324 /* These keys are redundant for fingers, clear them
325 * to prevent incorrect identification */
317 __clear_bit(BTN_TOOL_PEN, input->keybit); 326 __clear_bit(BTN_TOOL_PEN, input->keybit);
327 __clear_bit(BTN_TOOL_FINGER, input->keybit);
328 __clear_bit(BTN_0, input->keybit);
318 /* 329 /*
319 * A little something special to enable 330 * A little something special to enable
320 * two and three finger taps. 331 * two and three finger taps.
diff --git a/drivers/hid/hid-tmff.c b/drivers/hid/hid-tmff.c
index 167ea746fb9c..c32f32c84ac8 100644
--- a/drivers/hid/hid-tmff.c
+++ b/drivers/hid/hid-tmff.c
@@ -251,6 +251,8 @@ static const struct hid_device_id tm_devices[] = {
251 .driver_data = (unsigned long)ff_rumble }, 251 .driver_data = (unsigned long)ff_rumble },
252 { HID_USB_DEVICE(USB_VENDOR_ID_THRUSTMASTER, 0xb651), /* FGT Rumble Force Wheel */ 252 { HID_USB_DEVICE(USB_VENDOR_ID_THRUSTMASTER, 0xb651), /* FGT Rumble Force Wheel */
253 .driver_data = (unsigned long)ff_rumble }, 253 .driver_data = (unsigned long)ff_rumble },
254 { HID_USB_DEVICE(USB_VENDOR_ID_THRUSTMASTER, 0xb653), /* RGT Force Feedback CLUTCH Raging Wheel */
255 .driver_data = (unsigned long)ff_joystick },
254 { HID_USB_DEVICE(USB_VENDOR_ID_THRUSTMASTER, 0xb654), /* FGT Force Feedback Wheel */ 256 { HID_USB_DEVICE(USB_VENDOR_ID_THRUSTMASTER, 0xb654), /* FGT Force Feedback Wheel */
255 .driver_data = (unsigned long)ff_joystick }, 257 .driver_data = (unsigned long)ff_joystick },
256 { } 258 { }
diff --git a/drivers/hid/usbhid/hid-quirks.c b/drivers/hid/usbhid/hid-quirks.c
index 7844280897d1..928943c7ce9a 100644
--- a/drivers/hid/usbhid/hid-quirks.c
+++ b/drivers/hid/usbhid/hid-quirks.c
@@ -63,6 +63,7 @@ static const struct hid_blacklist {
63 { USB_VENDOR_ID_SUN, USB_DEVICE_ID_RARITAN_KVM_DONGLE, HID_QUIRK_NOGET }, 63 { USB_VENDOR_ID_SUN, USB_DEVICE_ID_RARITAN_KVM_DONGLE, HID_QUIRK_NOGET },
64 { USB_VENDOR_ID_TURBOX, USB_DEVICE_ID_TURBOX_KEYBOARD, HID_QUIRK_NOGET }, 64 { USB_VENDOR_ID_TURBOX, USB_DEVICE_ID_TURBOX_KEYBOARD, HID_QUIRK_NOGET },
65 { USB_VENDOR_ID_UCLOGIC, USB_DEVICE_ID_UCLOGIC_TABLET_PF1209, HID_QUIRK_MULTI_INPUT }, 65 { USB_VENDOR_ID_UCLOGIC, USB_DEVICE_ID_UCLOGIC_TABLET_PF1209, HID_QUIRK_MULTI_INPUT },
66 { USB_VENDOR_ID_UCLOGIC, USB_DEVICE_ID_UCLOGIC_TABLET_WP4030U, HID_QUIRK_MULTI_INPUT },
66 { USB_VENDOR_ID_WISEGROUP, USB_DEVICE_ID_DUAL_USB_JOYPAD, HID_QUIRK_NOGET | HID_QUIRK_MULTI_INPUT | HID_QUIRK_SKIP_OUTPUT_REPORTS }, 67 { USB_VENDOR_ID_WISEGROUP, USB_DEVICE_ID_DUAL_USB_JOYPAD, HID_QUIRK_NOGET | HID_QUIRK_MULTI_INPUT | HID_QUIRK_SKIP_OUTPUT_REPORTS },
67 { USB_VENDOR_ID_WISEGROUP, USB_DEVICE_ID_QUAD_USB_JOYPAD, HID_QUIRK_NOGET | HID_QUIRK_MULTI_INPUT }, 68 { USB_VENDOR_ID_WISEGROUP, USB_DEVICE_ID_QUAD_USB_JOYPAD, HID_QUIRK_NOGET | HID_QUIRK_MULTI_INPUT },
68 69
diff --git a/drivers/infiniband/core/sysfs.c b/drivers/infiniband/core/sysfs.c
index 1558bb7fc74d..f901957abc8b 100644
--- a/drivers/infiniband/core/sysfs.c
+++ b/drivers/infiniband/core/sysfs.c
@@ -461,6 +461,7 @@ alloc_group_attrs(ssize_t (*show)(struct ib_port *,
461 element->attr.attr.mode = S_IRUGO; 461 element->attr.attr.mode = S_IRUGO;
462 element->attr.show = show; 462 element->attr.show = show;
463 element->index = i; 463 element->index = i;
464 sysfs_attr_init(&element->attr.attr);
464 465
465 tab_attr[i] = &element->attr.attr; 466 tab_attr[i] = &element->attr.attr;
466 } 467 }
diff --git a/drivers/infiniband/ulp/iser/iscsi_iser.c b/drivers/infiniband/ulp/iser/iscsi_iser.c
index 71237f8f78f7..e78af36d3a0e 100644
--- a/drivers/infiniband/ulp/iser/iscsi_iser.c
+++ b/drivers/infiniband/ulp/iser/iscsi_iser.c
@@ -613,7 +613,7 @@ static struct scsi_host_template iscsi_iser_sht = {
613 .cmd_per_lun = ISER_DEF_CMD_PER_LUN, 613 .cmd_per_lun = ISER_DEF_CMD_PER_LUN,
614 .eh_abort_handler = iscsi_eh_abort, 614 .eh_abort_handler = iscsi_eh_abort,
615 .eh_device_reset_handler= iscsi_eh_device_reset, 615 .eh_device_reset_handler= iscsi_eh_device_reset,
616 .eh_target_reset_handler= iscsi_eh_target_reset, 616 .eh_target_reset_handler = iscsi_eh_recover_target,
617 .target_alloc = iscsi_target_alloc, 617 .target_alloc = iscsi_target_alloc,
618 .use_clustering = DISABLE_CLUSTERING, 618 .use_clustering = DISABLE_CLUSTERING,
619 .proc_name = "iscsi_iser", 619 .proc_name = "iscsi_iser",
diff --git a/drivers/isdn/hisax/avma1_cs.c b/drivers/isdn/hisax/avma1_cs.c
index e5deb15cf40c..8d1d63a02b34 100644
--- a/drivers/isdn/hisax/avma1_cs.c
+++ b/drivers/isdn/hisax/avma1_cs.c
@@ -50,7 +50,7 @@ module_param(isdnprot, int, 0);
50 handler. 50 handler.
51*/ 51*/
52 52
53static int avma1cs_config(struct pcmcia_device *link); 53static int avma1cs_config(struct pcmcia_device *link) __devinit ;
54static void avma1cs_release(struct pcmcia_device *link); 54static void avma1cs_release(struct pcmcia_device *link);
55 55
56/* 56/*
@@ -59,7 +59,7 @@ static void avma1cs_release(struct pcmcia_device *link);
59 needed to manage one actual PCMCIA card. 59 needed to manage one actual PCMCIA card.
60*/ 60*/
61 61
62static void avma1cs_detach(struct pcmcia_device *p_dev); 62static void avma1cs_detach(struct pcmcia_device *p_dev) __devexit ;
63 63
64 64
65/* 65/*
@@ -99,7 +99,7 @@ typedef struct local_info_t {
99 99
100======================================================================*/ 100======================================================================*/
101 101
102static int avma1cs_probe(struct pcmcia_device *p_dev) 102static int __devinit avma1cs_probe(struct pcmcia_device *p_dev)
103{ 103{
104 local_info_t *local; 104 local_info_t *local;
105 105
@@ -140,7 +140,7 @@ static int avma1cs_probe(struct pcmcia_device *p_dev)
140 140
141======================================================================*/ 141======================================================================*/
142 142
143static void avma1cs_detach(struct pcmcia_device *link) 143static void __devexit avma1cs_detach(struct pcmcia_device *link)
144{ 144{
145 dev_dbg(&link->dev, "avma1cs_detach(0x%p)\n", link); 145 dev_dbg(&link->dev, "avma1cs_detach(0x%p)\n", link);
146 avma1cs_release(link); 146 avma1cs_release(link);
@@ -174,7 +174,7 @@ static int avma1cs_configcheck(struct pcmcia_device *p_dev,
174} 174}
175 175
176 176
177static int avma1cs_config(struct pcmcia_device *link) 177static int __devinit avma1cs_config(struct pcmcia_device *link)
178{ 178{
179 local_info_t *dev; 179 local_info_t *dev;
180 int i; 180 int i;
@@ -282,7 +282,7 @@ static struct pcmcia_driver avma1cs_driver = {
282 .name = "avma1_cs", 282 .name = "avma1_cs",
283 }, 283 },
284 .probe = avma1cs_probe, 284 .probe = avma1cs_probe,
285 .remove = avma1cs_detach, 285 .remove = __devexit_p(avma1cs_detach),
286 .id_table = avma1cs_ids, 286 .id_table = avma1cs_ids,
287}; 287};
288 288
diff --git a/drivers/isdn/hisax/elsa_cs.c b/drivers/isdn/hisax/elsa_cs.c
index c9a30b1c9237..c9f2279e21f5 100644
--- a/drivers/isdn/hisax/elsa_cs.c
+++ b/drivers/isdn/hisax/elsa_cs.c
@@ -76,7 +76,7 @@ module_param(protocol, int, 0);
76 handler. 76 handler.
77*/ 77*/
78 78
79static int elsa_cs_config(struct pcmcia_device *link); 79static int elsa_cs_config(struct pcmcia_device *link) __devinit ;
80static void elsa_cs_release(struct pcmcia_device *link); 80static void elsa_cs_release(struct pcmcia_device *link);
81 81
82/* 82/*
@@ -85,7 +85,7 @@ static void elsa_cs_release(struct pcmcia_device *link);
85 needed to manage one actual PCMCIA card. 85 needed to manage one actual PCMCIA card.
86*/ 86*/
87 87
88static void elsa_cs_detach(struct pcmcia_device *p_dev); 88static void elsa_cs_detach(struct pcmcia_device *p_dev) __devexit;
89 89
90/* 90/*
91 A driver needs to provide a dev_node_t structure for each device 91 A driver needs to provide a dev_node_t structure for each device
@@ -121,7 +121,7 @@ typedef struct local_info_t {
121 121
122======================================================================*/ 122======================================================================*/
123 123
124static int elsa_cs_probe(struct pcmcia_device *link) 124static int __devinit elsa_cs_probe(struct pcmcia_device *link)
125{ 125{
126 local_info_t *local; 126 local_info_t *local;
127 127
@@ -166,7 +166,7 @@ static int elsa_cs_probe(struct pcmcia_device *link)
166 166
167======================================================================*/ 167======================================================================*/
168 168
169static void elsa_cs_detach(struct pcmcia_device *link) 169static void __devexit elsa_cs_detach(struct pcmcia_device *link)
170{ 170{
171 local_info_t *info = link->priv; 171 local_info_t *info = link->priv;
172 172
@@ -210,7 +210,7 @@ static int elsa_cs_configcheck(struct pcmcia_device *p_dev,
210 return -ENODEV; 210 return -ENODEV;
211} 211}
212 212
213static int elsa_cs_config(struct pcmcia_device *link) 213static int __devinit elsa_cs_config(struct pcmcia_device *link)
214{ 214{
215 local_info_t *dev; 215 local_info_t *dev;
216 int i; 216 int i;
@@ -327,7 +327,7 @@ static struct pcmcia_driver elsa_cs_driver = {
327 .name = "elsa_cs", 327 .name = "elsa_cs",
328 }, 328 },
329 .probe = elsa_cs_probe, 329 .probe = elsa_cs_probe,
330 .remove = elsa_cs_detach, 330 .remove = __devexit_p(elsa_cs_detach),
331 .id_table = elsa_ids, 331 .id_table = elsa_ids,
332 .suspend = elsa_suspend, 332 .suspend = elsa_suspend,
333 .resume = elsa_resume, 333 .resume = elsa_resume,
diff --git a/drivers/isdn/hisax/sedlbauer_cs.c b/drivers/isdn/hisax/sedlbauer_cs.c
index 7836ec3c7f86..71b3ddef03bb 100644
--- a/drivers/isdn/hisax/sedlbauer_cs.c
+++ b/drivers/isdn/hisax/sedlbauer_cs.c
@@ -76,7 +76,7 @@ module_param(protocol, int, 0);
76 event handler. 76 event handler.
77*/ 77*/
78 78
79static int sedlbauer_config(struct pcmcia_device *link); 79static int sedlbauer_config(struct pcmcia_device *link) __devinit ;
80static void sedlbauer_release(struct pcmcia_device *link); 80static void sedlbauer_release(struct pcmcia_device *link);
81 81
82/* 82/*
@@ -85,7 +85,7 @@ static void sedlbauer_release(struct pcmcia_device *link);
85 needed to manage one actual PCMCIA card. 85 needed to manage one actual PCMCIA card.
86*/ 86*/
87 87
88static void sedlbauer_detach(struct pcmcia_device *p_dev); 88static void sedlbauer_detach(struct pcmcia_device *p_dev) __devexit;
89 89
90/* 90/*
91 You'll also need to prototype all the functions that will actually 91 You'll also need to prototype all the functions that will actually
@@ -129,7 +129,7 @@ typedef struct local_info_t {
129 129
130======================================================================*/ 130======================================================================*/
131 131
132static int sedlbauer_probe(struct pcmcia_device *link) 132static int __devinit sedlbauer_probe(struct pcmcia_device *link)
133{ 133{
134 local_info_t *local; 134 local_info_t *local;
135 135
@@ -177,7 +177,7 @@ static int sedlbauer_probe(struct pcmcia_device *link)
177 177
178======================================================================*/ 178======================================================================*/
179 179
180static void sedlbauer_detach(struct pcmcia_device *link) 180static void __devexit sedlbauer_detach(struct pcmcia_device *link)
181{ 181{
182 dev_dbg(&link->dev, "sedlbauer_detach(0x%p)\n", link); 182 dev_dbg(&link->dev, "sedlbauer_detach(0x%p)\n", link);
183 183
@@ -283,7 +283,7 @@ static int sedlbauer_config_check(struct pcmcia_device *p_dev,
283 283
284 284
285 285
286static int sedlbauer_config(struct pcmcia_device *link) 286static int __devinit sedlbauer_config(struct pcmcia_device *link)
287{ 287{
288 local_info_t *dev = link->priv; 288 local_info_t *dev = link->priv;
289 win_req_t *req; 289 win_req_t *req;
@@ -441,7 +441,7 @@ static struct pcmcia_driver sedlbauer_driver = {
441 .name = "sedlbauer_cs", 441 .name = "sedlbauer_cs",
442 }, 442 },
443 .probe = sedlbauer_probe, 443 .probe = sedlbauer_probe,
444 .remove = sedlbauer_detach, 444 .remove = __devexit_p(sedlbauer_detach),
445 .id_table = sedlbauer_ids, 445 .id_table = sedlbauer_ids,
446 .suspend = sedlbauer_suspend, 446 .suspend = sedlbauer_suspend,
447 .resume = sedlbauer_resume, 447 .resume = sedlbauer_resume,
diff --git a/drivers/isdn/hisax/teles_cs.c b/drivers/isdn/hisax/teles_cs.c
index b0c5976cbdb3..d010a0da8e19 100644
--- a/drivers/isdn/hisax/teles_cs.c
+++ b/drivers/isdn/hisax/teles_cs.c
@@ -57,7 +57,7 @@ module_param(protocol, int, 0);
57 handler. 57 handler.
58*/ 58*/
59 59
60static int teles_cs_config(struct pcmcia_device *link); 60static int teles_cs_config(struct pcmcia_device *link) __devinit ;
61static void teles_cs_release(struct pcmcia_device *link); 61static void teles_cs_release(struct pcmcia_device *link);
62 62
63/* 63/*
@@ -66,7 +66,7 @@ static void teles_cs_release(struct pcmcia_device *link);
66 needed to manage one actual PCMCIA card. 66 needed to manage one actual PCMCIA card.
67*/ 67*/
68 68
69static void teles_detach(struct pcmcia_device *p_dev); 69static void teles_detach(struct pcmcia_device *p_dev) __devexit ;
70 70
71/* 71/*
72 A linked list of "instances" of the teles_cs device. Each actual 72 A linked list of "instances" of the teles_cs device. Each actual
@@ -112,7 +112,7 @@ typedef struct local_info_t {
112 112
113======================================================================*/ 113======================================================================*/
114 114
115static int teles_probe(struct pcmcia_device *link) 115static int __devinit teles_probe(struct pcmcia_device *link)
116{ 116{
117 local_info_t *local; 117 local_info_t *local;
118 118
@@ -156,7 +156,7 @@ static int teles_probe(struct pcmcia_device *link)
156 156
157======================================================================*/ 157======================================================================*/
158 158
159static void teles_detach(struct pcmcia_device *link) 159static void __devexit teles_detach(struct pcmcia_device *link)
160{ 160{
161 local_info_t *info = link->priv; 161 local_info_t *info = link->priv;
162 162
@@ -200,7 +200,7 @@ static int teles_cs_configcheck(struct pcmcia_device *p_dev,
200 return -ENODEV; 200 return -ENODEV;
201} 201}
202 202
203static int teles_cs_config(struct pcmcia_device *link) 203static int __devinit teles_cs_config(struct pcmcia_device *link)
204{ 204{
205 local_info_t *dev; 205 local_info_t *dev;
206 int i; 206 int i;
@@ -319,7 +319,7 @@ static struct pcmcia_driver teles_cs_driver = {
319 .name = "teles_cs", 319 .name = "teles_cs",
320 }, 320 },
321 .probe = teles_probe, 321 .probe = teles_probe,
322 .remove = teles_detach, 322 .remove = __devexit_p(teles_detach),
323 .id_table = teles_ids, 323 .id_table = teles_ids,
324 .suspend = teles_suspend, 324 .suspend = teles_suspend,
325 .resume = teles_resume, 325 .resume = teles_resume,
diff --git a/drivers/leds/Kconfig b/drivers/leds/Kconfig
index e0b64312e66a..505eb64c329c 100644
--- a/drivers/leds/Kconfig
+++ b/drivers/leds/Kconfig
@@ -15,6 +15,8 @@ config LEDS_CLASS
15 This option enables the led sysfs class in /sys/class/leds. You'll 15 This option enables the led sysfs class in /sys/class/leds. You'll
16 need this to do anything useful with LEDs. If unsure, say N. 16 need this to do anything useful with LEDs. If unsure, say N.
17 17
18if LEDS_CLASS
19
18comment "LED drivers" 20comment "LED drivers"
19 21
20config LEDS_88PM860X 22config LEDS_88PM860X
@@ -26,73 +28,73 @@ config LEDS_88PM860X
26 28
27config LEDS_ATMEL_PWM 29config LEDS_ATMEL_PWM
28 tristate "LED Support using Atmel PWM outputs" 30 tristate "LED Support using Atmel PWM outputs"
29 depends on LEDS_CLASS && ATMEL_PWM 31 depends on ATMEL_PWM
30 help 32 help
31 This option enables support for LEDs driven using outputs 33 This option enables support for LEDs driven using outputs
32 of the dedicated PWM controller found on newer Atmel SOCs. 34 of the dedicated PWM controller found on newer Atmel SOCs.
33 35
34config LEDS_LOCOMO 36config LEDS_LOCOMO
35 tristate "LED Support for Locomo device" 37 tristate "LED Support for Locomo device"
36 depends on LEDS_CLASS && SHARP_LOCOMO 38 depends on SHARP_LOCOMO
37 help 39 help
38 This option enables support for the LEDs on Sharp Locomo. 40 This option enables support for the LEDs on Sharp Locomo.
39 Zaurus models SL-5500 and SL-5600. 41 Zaurus models SL-5500 and SL-5600.
40 42
41config LEDS_MIKROTIK_RB532 43config LEDS_MIKROTIK_RB532
42 tristate "LED Support for Mikrotik Routerboard 532" 44 tristate "LED Support for Mikrotik Routerboard 532"
43 depends on LEDS_CLASS && MIKROTIK_RB532 45 depends on MIKROTIK_RB532
44 help 46 help
45 This option enables support for the so called "User LED" of 47 This option enables support for the so called "User LED" of
46 Mikrotik's Routerboard 532. 48 Mikrotik's Routerboard 532.
47 49
48config LEDS_S3C24XX 50config LEDS_S3C24XX
49 tristate "LED Support for Samsung S3C24XX GPIO LEDs" 51 tristate "LED Support for Samsung S3C24XX GPIO LEDs"
50 depends on LEDS_CLASS && ARCH_S3C2410 52 depends on ARCH_S3C2410
51 help 53 help
52 This option enables support for LEDs connected to GPIO lines 54 This option enables support for LEDs connected to GPIO lines
53 on Samsung S3C24XX series CPUs, such as the S3C2410 and S3C2440. 55 on Samsung S3C24XX series CPUs, such as the S3C2410 and S3C2440.
54 56
55config LEDS_AMS_DELTA 57config LEDS_AMS_DELTA
56 tristate "LED Support for the Amstrad Delta (E3)" 58 tristate "LED Support for the Amstrad Delta (E3)"
57 depends on LEDS_CLASS && MACH_AMS_DELTA 59 depends on MACH_AMS_DELTA
58 help 60 help
59 This option enables support for the LEDs on Amstrad Delta (E3). 61 This option enables support for the LEDs on Amstrad Delta (E3).
60 62
61config LEDS_NET48XX 63config LEDS_NET48XX
62 tristate "LED Support for Soekris net48xx series Error LED" 64 tristate "LED Support for Soekris net48xx series Error LED"
63 depends on LEDS_CLASS && SCx200_GPIO 65 depends on SCx200_GPIO
64 help 66 help
65 This option enables support for the Soekris net4801 and net4826 error 67 This option enables support for the Soekris net4801 and net4826 error
66 LED. 68 LED.
67 69
68config LEDS_FSG 70config LEDS_FSG
69 tristate "LED Support for the Freecom FSG-3" 71 tristate "LED Support for the Freecom FSG-3"
70 depends on LEDS_CLASS && MACH_FSG 72 depends on MACH_FSG
71 help 73 help
72 This option enables support for the LEDs on the Freecom FSG-3. 74 This option enables support for the LEDs on the Freecom FSG-3.
73 75
74config LEDS_WRAP 76config LEDS_WRAP
75 tristate "LED Support for the WRAP series LEDs" 77 tristate "LED Support for the WRAP series LEDs"
76 depends on LEDS_CLASS && SCx200_GPIO 78 depends on SCx200_GPIO
77 help 79 help
78 This option enables support for the PCEngines WRAP programmable LEDs. 80 This option enables support for the PCEngines WRAP programmable LEDs.
79 81
80config LEDS_ALIX2 82config LEDS_ALIX2
81 tristate "LED Support for ALIX.2 and ALIX.3 series" 83 tristate "LED Support for ALIX.2 and ALIX.3 series"
82 depends on LEDS_CLASS && X86 && EXPERIMENTAL 84 depends on X86 && !GPIO_CS5535 && !CS5535_GPIO
83 help 85 help
84 This option enables support for the PCEngines ALIX.2 and ALIX.3 LEDs. 86 This option enables support for the PCEngines ALIX.2 and ALIX.3 LEDs.
85 You have to set leds-alix2.force=1 for boards with Award BIOS. 87 You have to set leds-alix2.force=1 for boards with Award BIOS.
86 88
87config LEDS_H1940 89config LEDS_H1940
88 tristate "LED Support for iPAQ H1940 device" 90 tristate "LED Support for iPAQ H1940 device"
89 depends on LEDS_CLASS && ARCH_H1940 91 depends on ARCH_H1940
90 help 92 help
91 This option enables support for the LEDs on the h1940. 93 This option enables support for the LEDs on the h1940.
92 94
93config LEDS_COBALT_QUBE 95config LEDS_COBALT_QUBE
94 tristate "LED Support for the Cobalt Qube series front LED" 96 tristate "LED Support for the Cobalt Qube series front LED"
95 depends on LEDS_CLASS && MIPS_COBALT 97 depends on MIPS_COBALT
96 help 98 help
97 This option enables support for the front LED on Cobalt Qube series 99 This option enables support for the front LED on Cobalt Qube series
98 100
@@ -105,7 +107,7 @@ config LEDS_COBALT_RAQ
105 107
106config LEDS_SUNFIRE 108config LEDS_SUNFIRE
107 tristate "LED support for SunFire servers." 109 tristate "LED support for SunFire servers."
108 depends on LEDS_CLASS && SPARC64 110 depends on SPARC64
109 select LEDS_TRIGGERS 111 select LEDS_TRIGGERS
110 help 112 help
111 This option enables support for the Left, Middle, and Right 113 This option enables support for the Left, Middle, and Right
@@ -113,14 +115,14 @@ config LEDS_SUNFIRE
113 115
114config LEDS_HP6XX 116config LEDS_HP6XX
115 tristate "LED Support for the HP Jornada 6xx" 117 tristate "LED Support for the HP Jornada 6xx"
116 depends on LEDS_CLASS && SH_HP6XX 118 depends on SH_HP6XX
117 help 119 help
118 This option enables LED support for the handheld 120 This option enables LED support for the handheld
119 HP Jornada 620/660/680/690. 121 HP Jornada 620/660/680/690.
120 122
121config LEDS_PCA9532 123config LEDS_PCA9532
122 tristate "LED driver for PCA9532 dimmer" 124 tristate "LED driver for PCA9532 dimmer"
123 depends on LEDS_CLASS && I2C && INPUT && EXPERIMENTAL 125 depends on I2C && INPUT && EXPERIMENTAL
124 help 126 help
125 This option enables support for NXP pca9532 127 This option enables support for NXP pca9532
126 LED controller. It is generally only useful 128 LED controller. It is generally only useful
@@ -128,7 +130,7 @@ config LEDS_PCA9532
128 130
129config LEDS_GPIO 131config LEDS_GPIO
130 tristate "LED Support for GPIO connected LEDs" 132 tristate "LED Support for GPIO connected LEDs"
131 depends on LEDS_CLASS && GENERIC_GPIO 133 depends on GENERIC_GPIO
132 help 134 help
133 This option enables support for the LEDs connected to GPIO 135 This option enables support for the LEDs connected to GPIO
134 outputs. To be useful the particular board must have LEDs 136 outputs. To be useful the particular board must have LEDs
@@ -155,7 +157,7 @@ config LEDS_GPIO_OF
155 157
156config LEDS_LP3944 158config LEDS_LP3944
157 tristate "LED Support for N.S. LP3944 (Fun Light) I2C chip" 159 tristate "LED Support for N.S. LP3944 (Fun Light) I2C chip"
158 depends on LEDS_CLASS && I2C 160 depends on I2C
159 help 161 help
160 This option enables support for LEDs connected to the National 162 This option enables support for LEDs connected to the National
161 Semiconductor LP3944 Lighting Management Unit (LMU) also known as 163 Semiconductor LP3944 Lighting Management Unit (LMU) also known as
@@ -166,7 +168,7 @@ config LEDS_LP3944
166 168
167config LEDS_CLEVO_MAIL 169config LEDS_CLEVO_MAIL
168 tristate "Mail LED on Clevo notebook" 170 tristate "Mail LED on Clevo notebook"
169 depends on LEDS_CLASS && X86 && SERIO_I8042 && DMI 171 depends on X86 && SERIO_I8042 && DMI
170 help 172 help
171 This driver makes the mail LED accessible from userspace 173 This driver makes the mail LED accessible from userspace
172 programs through the leds subsystem. This LED have three 174 programs through the leds subsystem. This LED have three
@@ -196,7 +198,7 @@ config LEDS_CLEVO_MAIL
196 198
197config LEDS_PCA955X 199config LEDS_PCA955X
198 tristate "LED Support for PCA955x I2C chips" 200 tristate "LED Support for PCA955x I2C chips"
199 depends on LEDS_CLASS && I2C 201 depends on I2C
200 help 202 help
201 This option enables support for LEDs connected to PCA955x 203 This option enables support for LEDs connected to PCA955x
202 LED driver chips accessed via the I2C bus. Supported 204 LED driver chips accessed via the I2C bus. Supported
@@ -204,54 +206,54 @@ config LEDS_PCA955X
204 206
205config LEDS_WM831X_STATUS 207config LEDS_WM831X_STATUS
206 tristate "LED support for status LEDs on WM831x PMICs" 208 tristate "LED support for status LEDs on WM831x PMICs"
207 depends on LEDS_CLASS && MFD_WM831X 209 depends on MFD_WM831X
208 help 210 help
209 This option enables support for the status LEDs of the WM831x 211 This option enables support for the status LEDs of the WM831x
210 series of PMICs. 212 series of PMICs.
211 213
212config LEDS_WM8350 214config LEDS_WM8350
213 tristate "LED Support for WM8350 AudioPlus PMIC" 215 tristate "LED Support for WM8350 AudioPlus PMIC"
214 depends on LEDS_CLASS && MFD_WM8350 216 depends on MFD_WM8350
215 help 217 help
216 This option enables support for LEDs driven by the Wolfson 218 This option enables support for LEDs driven by the Wolfson
217 Microelectronics WM8350 AudioPlus PMIC. 219 Microelectronics WM8350 AudioPlus PMIC.
218 220
219config LEDS_DA903X 221config LEDS_DA903X
220 tristate "LED Support for DA9030/DA9034 PMIC" 222 tristate "LED Support for DA9030/DA9034 PMIC"
221 depends on LEDS_CLASS && PMIC_DA903X 223 depends on PMIC_DA903X
222 help 224 help
223 This option enables support for on-chip LED drivers found 225 This option enables support for on-chip LED drivers found
224 on Dialog Semiconductor DA9030/DA9034 PMICs. 226 on Dialog Semiconductor DA9030/DA9034 PMICs.
225 227
226config LEDS_DAC124S085 228config LEDS_DAC124S085
227 tristate "LED Support for DAC124S085 SPI DAC" 229 tristate "LED Support for DAC124S085 SPI DAC"
228 depends on LEDS_CLASS && SPI 230 depends on SPI
229 help 231 help
230 This option enables support for DAC124S085 SPI DAC from NatSemi, 232 This option enables support for DAC124S085 SPI DAC from NatSemi,
231 which can be used to control up to four LEDs. 233 which can be used to control up to four LEDs.
232 234
233config LEDS_PWM 235config LEDS_PWM
234 tristate "PWM driven LED Support" 236 tristate "PWM driven LED Support"
235 depends on LEDS_CLASS && HAVE_PWM 237 depends on HAVE_PWM
236 help 238 help
237 This option enables support for pwm driven LEDs 239 This option enables support for pwm driven LEDs
238 240
239config LEDS_REGULATOR 241config LEDS_REGULATOR
240 tristate "REGULATOR driven LED support" 242 tristate "REGULATOR driven LED support"
241 depends on LEDS_CLASS && REGULATOR 243 depends on REGULATOR
242 help 244 help
243 This option enables support for regulator driven LEDs. 245 This option enables support for regulator driven LEDs.
244 246
245config LEDS_BD2802 247config LEDS_BD2802
246 tristate "LED driver for BD2802 RGB LED" 248 tristate "LED driver for BD2802 RGB LED"
247 depends on LEDS_CLASS && I2C 249 depends on I2C
248 help 250 help
249 This option enables support for BD2802GU RGB LED driver chips 251 This option enables support for BD2802GU RGB LED driver chips
250 accessed via the I2C bus. 252 accessed via the I2C bus.
251 253
252config LEDS_INTEL_SS4200 254config LEDS_INTEL_SS4200
253 tristate "LED driver for Intel NAS SS4200 series" 255 tristate "LED driver for Intel NAS SS4200 series"
254 depends on LEDS_CLASS && PCI && DMI 256 depends on PCI && DMI
255 help 257 help
256 This option enables support for the Intel SS4200 series of 258 This option enables support for the Intel SS4200 series of
257 Network Attached Storage servers. You may control the hard 259 Network Attached Storage servers. You may control the hard
@@ -260,7 +262,7 @@ config LEDS_INTEL_SS4200
260 262
261config LEDS_LT3593 263config LEDS_LT3593
262 tristate "LED driver for LT3593 controllers" 264 tristate "LED driver for LT3593 controllers"
263 depends on LEDS_CLASS && GENERIC_GPIO 265 depends on GENERIC_GPIO
264 help 266 help
265 This option enables support for LEDs driven by a Linear Technology 267 This option enables support for LEDs driven by a Linear Technology
266 LT3593 controller. This controller uses a special one-wire pulse 268 LT3593 controller. This controller uses a special one-wire pulse
@@ -268,7 +270,7 @@ config LEDS_LT3593
268 270
269config LEDS_ADP5520 271config LEDS_ADP5520
270 tristate "LED Support for ADP5520/ADP5501 PMIC" 272 tristate "LED Support for ADP5520/ADP5501 PMIC"
271 depends on LEDS_CLASS && PMIC_ADP5520 273 depends on PMIC_ADP5520
272 help 274 help
273 This option enables support for on-chip LED drivers found 275 This option enables support for on-chip LED drivers found
274 on Analog Devices ADP5520/ADP5501 PMICs. 276 on Analog Devices ADP5520/ADP5501 PMICs.
@@ -276,7 +278,12 @@ config LEDS_ADP5520
276 To compile this driver as a module, choose M here: the module will 278 To compile this driver as a module, choose M here: the module will
277 be called leds-adp5520. 279 be called leds-adp5520.
278 280
279comment "LED Triggers" 281config LEDS_DELL_NETBOOKS
282 tristate "External LED on Dell Business Netbooks"
283 depends on X86 && ACPI_WMI
284 help
285 This adds support for the Latitude 2100 and similar
286 notebooks that have an external LED.
280 287
281config LEDS_TRIGGERS 288config LEDS_TRIGGERS
282 bool "LED Trigger support" 289 bool "LED Trigger support"
@@ -285,9 +292,12 @@ config LEDS_TRIGGERS
285 These triggers allow kernel events to drive the LEDs and can 292 These triggers allow kernel events to drive the LEDs and can
286 be configured via sysfs. If unsure, say Y. 293 be configured via sysfs. If unsure, say Y.
287 294
295if LEDS_TRIGGERS
296
297comment "LED Triggers"
298
288config LEDS_TRIGGER_TIMER 299config LEDS_TRIGGER_TIMER
289 tristate "LED Timer Trigger" 300 tristate "LED Timer Trigger"
290 depends on LEDS_TRIGGERS
291 help 301 help
292 This allows LEDs to be controlled by a programmable timer 302 This allows LEDs to be controlled by a programmable timer
293 via sysfs. Some LED hardware can be programmed to start 303 via sysfs. Some LED hardware can be programmed to start
@@ -298,14 +308,13 @@ config LEDS_TRIGGER_TIMER
298 308
299config LEDS_TRIGGER_IDE_DISK 309config LEDS_TRIGGER_IDE_DISK
300 bool "LED IDE Disk Trigger" 310 bool "LED IDE Disk Trigger"
301 depends on LEDS_TRIGGERS && IDE_GD_ATA 311 depends on IDE_GD_ATA
302 help 312 help
303 This allows LEDs to be controlled by IDE disk activity. 313 This allows LEDs to be controlled by IDE disk activity.
304 If unsure, say Y. 314 If unsure, say Y.
305 315
306config LEDS_TRIGGER_HEARTBEAT 316config LEDS_TRIGGER_HEARTBEAT
307 tristate "LED Heartbeat Trigger" 317 tristate "LED Heartbeat Trigger"
308 depends on LEDS_TRIGGERS
309 help 318 help
310 This allows LEDs to be controlled by a CPU load average. 319 This allows LEDs to be controlled by a CPU load average.
311 The flash frequency is a hyperbolic function of the 1-minute 320 The flash frequency is a hyperbolic function of the 1-minute
@@ -314,7 +323,6 @@ config LEDS_TRIGGER_HEARTBEAT
314 323
315config LEDS_TRIGGER_BACKLIGHT 324config LEDS_TRIGGER_BACKLIGHT
316 tristate "LED backlight Trigger" 325 tristate "LED backlight Trigger"
317 depends on LEDS_TRIGGERS
318 help 326 help
319 This allows LEDs to be controlled as a backlight device: they 327 This allows LEDs to be controlled as a backlight device: they
320 turn off and on when the display is blanked and unblanked. 328 turn off and on when the display is blanked and unblanked.
@@ -323,7 +331,6 @@ config LEDS_TRIGGER_BACKLIGHT
323 331
324config LEDS_TRIGGER_GPIO 332config LEDS_TRIGGER_GPIO
325 tristate "LED GPIO Trigger" 333 tristate "LED GPIO Trigger"
326 depends on LEDS_TRIGGERS
327 depends on GPIOLIB 334 depends on GPIOLIB
328 help 335 help
329 This allows LEDs to be controlled by gpio events. It's good 336 This allows LEDs to be controlled by gpio events. It's good
@@ -336,7 +343,6 @@ config LEDS_TRIGGER_GPIO
336 343
337config LEDS_TRIGGER_DEFAULT_ON 344config LEDS_TRIGGER_DEFAULT_ON
338 tristate "LED Default ON Trigger" 345 tristate "LED Default ON Trigger"
339 depends on LEDS_TRIGGERS
340 help 346 help
341 This allows LEDs to be initialised in the ON state. 347 This allows LEDs to be initialised in the ON state.
342 If unsure, say Y. 348 If unsure, say Y.
@@ -344,4 +350,8 @@ config LEDS_TRIGGER_DEFAULT_ON
344comment "iptables trigger is under Netfilter config (LED target)" 350comment "iptables trigger is under Netfilter config (LED target)"
345 depends on LEDS_TRIGGERS 351 depends on LEDS_TRIGGERS
346 352
353endif # LEDS_TRIGGERS
354
355endif # LEDS_CLASS
356
347endif # NEW_LEDS 357endif # NEW_LEDS
diff --git a/drivers/leds/Makefile b/drivers/leds/Makefile
index d76fb32b77c0..0cd8b9957380 100644
--- a/drivers/leds/Makefile
+++ b/drivers/leds/Makefile
@@ -34,6 +34,7 @@ obj-$(CONFIG_LEDS_REGULATOR) += leds-regulator.o
34obj-$(CONFIG_LEDS_INTEL_SS4200) += leds-ss4200.o 34obj-$(CONFIG_LEDS_INTEL_SS4200) += leds-ss4200.o
35obj-$(CONFIG_LEDS_LT3593) += leds-lt3593.o 35obj-$(CONFIG_LEDS_LT3593) += leds-lt3593.o
36obj-$(CONFIG_LEDS_ADP5520) += leds-adp5520.o 36obj-$(CONFIG_LEDS_ADP5520) += leds-adp5520.o
37obj-$(CONFIG_LEDS_DELL_NETBOOKS) += dell-led.o
37 38
38# LED SPI Drivers 39# LED SPI Drivers
39obj-$(CONFIG_LEDS_DAC124S085) += leds-dac124s085.o 40obj-$(CONFIG_LEDS_DAC124S085) += leds-dac124s085.o
diff --git a/drivers/leds/dell-led.c b/drivers/leds/dell-led.c
new file mode 100644
index 000000000000..ee310891fff8
--- /dev/null
+++ b/drivers/leds/dell-led.c
@@ -0,0 +1,200 @@
1/*
2 * dell_led.c - Dell LED Driver
3 *
4 * Copyright (C) 2010 Dell Inc.
5 * Louis Davis <louis_davis@dell.com>
6 * Jim Dailey <jim_dailey@dell.com>
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as
10 * published by the Free Software Foundation.
11 *
12 */
13
14#include <linux/acpi.h>
15#include <linux/leds.h>
16
17MODULE_AUTHOR("Louis Davis/Jim Dailey");
18MODULE_DESCRIPTION("Dell LED Control Driver");
19MODULE_LICENSE("GPL");
20
21#define DELL_LED_BIOS_GUID "F6E4FE6E-909D-47cb-8BAB-C9F6F2F8D396"
22MODULE_ALIAS("wmi:" DELL_LED_BIOS_GUID);
23
24/* Error Result Codes: */
25#define INVALID_DEVICE_ID 250
26#define INVALID_PARAMETER 251
27#define INVALID_BUFFER 252
28#define INTERFACE_ERROR 253
29#define UNSUPPORTED_COMMAND 254
30#define UNSPECIFIED_ERROR 255
31
32/* Device ID */
33#define DEVICE_ID_PANEL_BACK 1
34
35/* LED Commands */
36#define CMD_LED_ON 16
37#define CMD_LED_OFF 17
38#define CMD_LED_BLINK 18
39
40struct bios_args {
41 unsigned char length;
42 unsigned char result_code;
43 unsigned char device_id;
44 unsigned char command;
45 unsigned char on_time;
46 unsigned char off_time;
47};
48
49static int dell_led_perform_fn(u8 length,
50 u8 result_code,
51 u8 device_id,
52 u8 command,
53 u8 on_time,
54 u8 off_time)
55{
56 struct bios_args *bios_return;
57 u8 return_code;
58 union acpi_object *obj;
59 struct acpi_buffer output = { ACPI_ALLOCATE_BUFFER, NULL };
60 struct acpi_buffer input;
61 acpi_status status;
62
63 struct bios_args args;
64 args.length = length;
65 args.result_code = result_code;
66 args.device_id = device_id;
67 args.command = command;
68 args.on_time = on_time;
69 args.off_time = off_time;
70
71 input.length = sizeof(struct bios_args);
72 input.pointer = &args;
73
74 status = wmi_evaluate_method(DELL_LED_BIOS_GUID,
75 1,
76 1,
77 &input,
78 &output);
79
80 if (ACPI_FAILURE(status))
81 return status;
82
83 obj = output.pointer;
84
85 if (!obj)
86 return -EINVAL;
87 else if (obj->type != ACPI_TYPE_BUFFER) {
88 kfree(obj);
89 return -EINVAL;
90 }
91
92 bios_return = ((struct bios_args *)obj->buffer.pointer);
93 return_code = bios_return->result_code;
94
95 kfree(obj);
96
97 return return_code;
98}
99
100static int led_on(void)
101{
102 return dell_led_perform_fn(3, /* Length of command */
103 INTERFACE_ERROR, /* Init to INTERFACE_ERROR */
104 DEVICE_ID_PANEL_BACK, /* Device ID */
105 CMD_LED_ON, /* Command */
106 0, /* not used */
107 0); /* not used */
108}
109
110static int led_off(void)
111{
112 return dell_led_perform_fn(3, /* Length of command */
113 INTERFACE_ERROR, /* Init to INTERFACE_ERROR */
114 DEVICE_ID_PANEL_BACK, /* Device ID */
115 CMD_LED_OFF, /* Command */
116 0, /* not used */
117 0); /* not used */
118}
119
120static int led_blink(unsigned char on_eighths,
121 unsigned char off_eighths)
122{
123 return dell_led_perform_fn(5, /* Length of command */
124 INTERFACE_ERROR, /* Init to INTERFACE_ERROR */
125 DEVICE_ID_PANEL_BACK, /* Device ID */
126 CMD_LED_BLINK, /* Command */
127 on_eighths, /* blink on in eigths of a second */
128 off_eighths); /* blink off in eights of a second */
129}
130
131static void dell_led_set(struct led_classdev *led_cdev,
132 enum led_brightness value)
133{
134 if (value == LED_OFF)
135 led_off();
136 else
137 led_on();
138}
139
140static int dell_led_blink(struct led_classdev *led_cdev,
141 unsigned long *delay_on,
142 unsigned long *delay_off)
143{
144 unsigned long on_eighths;
145 unsigned long off_eighths;
146
147 /* The Dell LED delay is based on 125ms intervals.
148 Need to round up to next interval. */
149
150 on_eighths = (*delay_on + 124) / 125;
151 if (0 == on_eighths)
152 on_eighths = 1;
153 if (on_eighths > 255)
154 on_eighths = 255;
155 *delay_on = on_eighths * 125;
156
157 off_eighths = (*delay_off + 124) / 125;
158 if (0 == off_eighths)
159 off_eighths = 1;
160 if (off_eighths > 255)
161 off_eighths = 255;
162 *delay_off = off_eighths * 125;
163
164 led_blink(on_eighths, off_eighths);
165
166 return 0;
167}
168
169static struct led_classdev dell_led = {
170 .name = "dell::lid",
171 .brightness = LED_OFF,
172 .max_brightness = 1,
173 .brightness_set = dell_led_set,
174 .blink_set = dell_led_blink,
175 .flags = LED_CORE_SUSPENDRESUME,
176};
177
178static int __init dell_led_init(void)
179{
180 int error = 0;
181
182 if (!wmi_has_guid(DELL_LED_BIOS_GUID))
183 return -ENODEV;
184
185 error = led_off();
186 if (error != 0)
187 return -ENODEV;
188
189 return led_classdev_register(NULL, &dell_led);
190}
191
192static void __exit dell_led_exit(void)
193{
194 led_classdev_unregister(&dell_led);
195
196 led_off();
197}
198
199module_init(dell_led_init);
200module_exit(dell_led_exit);
diff --git a/drivers/leds/led-class.c b/drivers/leds/led-class.c
index 782f95822eab..69e7d86a5143 100644
--- a/drivers/leds/led-class.c
+++ b/drivers/leds/led-class.c
@@ -72,11 +72,14 @@ static ssize_t led_max_brightness_show(struct device *dev,
72 return sprintf(buf, "%u\n", led_cdev->max_brightness); 72 return sprintf(buf, "%u\n", led_cdev->max_brightness);
73} 73}
74 74
75static DEVICE_ATTR(brightness, 0644, led_brightness_show, led_brightness_store); 75static struct device_attribute led_class_attrs[] = {
76static DEVICE_ATTR(max_brightness, 0444, led_max_brightness_show, NULL); 76 __ATTR(brightness, 0644, led_brightness_show, led_brightness_store),
77 __ATTR(max_brightness, 0644, led_max_brightness_show, NULL),
77#ifdef CONFIG_LEDS_TRIGGERS 78#ifdef CONFIG_LEDS_TRIGGERS
78static DEVICE_ATTR(trigger, 0644, led_trigger_show, led_trigger_store); 79 __ATTR(trigger, 0644, led_trigger_show, led_trigger_store),
79#endif 80#endif
81 __ATTR_NULL,
82};
80 83
81/** 84/**
82 * led_classdev_suspend - suspend an led_classdev. 85 * led_classdev_suspend - suspend an led_classdev.
@@ -127,18 +130,11 @@ static int led_resume(struct device *dev)
127 */ 130 */
128int led_classdev_register(struct device *parent, struct led_classdev *led_cdev) 131int led_classdev_register(struct device *parent, struct led_classdev *led_cdev)
129{ 132{
130 int rc;
131
132 led_cdev->dev = device_create(leds_class, parent, 0, led_cdev, 133 led_cdev->dev = device_create(leds_class, parent, 0, led_cdev,
133 "%s", led_cdev->name); 134 "%s", led_cdev->name);
134 if (IS_ERR(led_cdev->dev)) 135 if (IS_ERR(led_cdev->dev))
135 return PTR_ERR(led_cdev->dev); 136 return PTR_ERR(led_cdev->dev);
136 137
137 /* register the attributes */
138 rc = device_create_file(led_cdev->dev, &dev_attr_brightness);
139 if (rc)
140 goto err_out;
141
142#ifdef CONFIG_LEDS_TRIGGERS 138#ifdef CONFIG_LEDS_TRIGGERS
143 init_rwsem(&led_cdev->trigger_lock); 139 init_rwsem(&led_cdev->trigger_lock);
144#endif 140#endif
@@ -150,36 +146,18 @@ int led_classdev_register(struct device *parent, struct led_classdev *led_cdev)
150 if (!led_cdev->max_brightness) 146 if (!led_cdev->max_brightness)
151 led_cdev->max_brightness = LED_FULL; 147 led_cdev->max_brightness = LED_FULL;
152 148
153 rc = device_create_file(led_cdev->dev, &dev_attr_max_brightness);
154 if (rc)
155 goto err_out_attr_max;
156
157 led_update_brightness(led_cdev); 149 led_update_brightness(led_cdev);
158 150
159#ifdef CONFIG_LEDS_TRIGGERS 151#ifdef CONFIG_LEDS_TRIGGERS
160 rc = device_create_file(led_cdev->dev, &dev_attr_trigger);
161 if (rc)
162 goto err_out_led_list;
163
164 led_trigger_set_default(led_cdev); 152 led_trigger_set_default(led_cdev);
165#endif 153#endif
166 154
167 printk(KERN_INFO "Registered led device: %s\n", 155 printk(KERN_DEBUG "Registered led device: %s\n",
168 led_cdev->name); 156 led_cdev->name);
169 157
170 return 0; 158 return 0;
171
172#ifdef CONFIG_LEDS_TRIGGERS
173err_out_led_list:
174 device_remove_file(led_cdev->dev, &dev_attr_max_brightness);
175#endif
176err_out_attr_max:
177 device_remove_file(led_cdev->dev, &dev_attr_brightness);
178 list_del(&led_cdev->node);
179err_out:
180 device_unregister(led_cdev->dev);
181 return rc;
182} 159}
160
183EXPORT_SYMBOL_GPL(led_classdev_register); 161EXPORT_SYMBOL_GPL(led_classdev_register);
184 162
185/** 163/**
@@ -190,10 +168,7 @@ EXPORT_SYMBOL_GPL(led_classdev_register);
190 */ 168 */
191void led_classdev_unregister(struct led_classdev *led_cdev) 169void led_classdev_unregister(struct led_classdev *led_cdev)
192{ 170{
193 device_remove_file(led_cdev->dev, &dev_attr_max_brightness);
194 device_remove_file(led_cdev->dev, &dev_attr_brightness);
195#ifdef CONFIG_LEDS_TRIGGERS 171#ifdef CONFIG_LEDS_TRIGGERS
196 device_remove_file(led_cdev->dev, &dev_attr_trigger);
197 down_write(&led_cdev->trigger_lock); 172 down_write(&led_cdev->trigger_lock);
198 if (led_cdev->trigger) 173 if (led_cdev->trigger)
199 led_trigger_set(led_cdev, NULL); 174 led_trigger_set(led_cdev, NULL);
@@ -215,6 +190,7 @@ static int __init leds_init(void)
215 return PTR_ERR(leds_class); 190 return PTR_ERR(leds_class);
216 leds_class->suspend = led_suspend; 191 leds_class->suspend = led_suspend;
217 leds_class->resume = led_resume; 192 leds_class->resume = led_resume;
193 leds_class->dev_attrs = led_class_attrs;
218 return 0; 194 return 0;
219} 195}
220 196
diff --git a/drivers/leds/leds-gpio.c b/drivers/leds/leds-gpio.c
index e5225d28f392..0823e2622e8c 100644
--- a/drivers/leds/leds-gpio.c
+++ b/drivers/leds/leds-gpio.c
@@ -211,7 +211,6 @@ static int __devinit of_gpio_leds_probe(struct of_device *ofdev,
211 const struct of_device_id *match) 211 const struct of_device_id *match)
212{ 212{
213 struct device_node *np = ofdev->node, *child; 213 struct device_node *np = ofdev->node, *child;
214 struct gpio_led led;
215 struct gpio_led_of_platform_data *pdata; 214 struct gpio_led_of_platform_data *pdata;
216 int count = 0, ret; 215 int count = 0, ret;
217 216
@@ -226,8 +225,8 @@ static int __devinit of_gpio_leds_probe(struct of_device *ofdev,
226 if (!pdata) 225 if (!pdata)
227 return -ENOMEM; 226 return -ENOMEM;
228 227
229 memset(&led, 0, sizeof(led));
230 for_each_child_of_node(np, child) { 228 for_each_child_of_node(np, child) {
229 struct gpio_led led = {};
231 enum of_gpio_flags flags; 230 enum of_gpio_flags flags;
232 const char *state; 231 const char *state;
233 232
diff --git a/drivers/leds/leds-ss4200.c b/drivers/leds/leds-ss4200.c
index 97f04984c1ca..51477ec71391 100644
--- a/drivers/leds/leds-ss4200.c
+++ b/drivers/leds/leds-ss4200.c
@@ -63,7 +63,7 @@ MODULE_LICENSE("GPL");
63/* 63/*
64 * PCI ID of the Intel ICH7 LPC Device within which the GPIO block lives. 64 * PCI ID of the Intel ICH7 LPC Device within which the GPIO block lives.
65 */ 65 */
66static struct pci_device_id ich7_lpc_pci_id[] = 66static const struct pci_device_id ich7_lpc_pci_id[] =
67{ 67{
68 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH7_0) }, 68 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH7_0) },
69 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH7_1) }, 69 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH7_1) },
diff --git a/drivers/macintosh/via-pmu-backlight.c b/drivers/macintosh/via-pmu-backlight.c
index 4f3c4479c16a..1cec02f6c431 100644
--- a/drivers/macintosh/via-pmu-backlight.c
+++ b/drivers/macintosh/via-pmu-backlight.c
@@ -144,6 +144,7 @@ void pmu_backlight_set_sleep(int sleep)
144 144
145void __init pmu_backlight_init() 145void __init pmu_backlight_init()
146{ 146{
147 struct backlight_properties props;
147 struct backlight_device *bd; 148 struct backlight_device *bd;
148 char name[10]; 149 char name[10];
149 int level, autosave; 150 int level, autosave;
@@ -161,13 +162,15 @@ void __init pmu_backlight_init()
161 162
162 snprintf(name, sizeof(name), "pmubl"); 163 snprintf(name, sizeof(name), "pmubl");
163 164
164 bd = backlight_device_register(name, NULL, NULL, &pmu_backlight_data); 165 memset(&props, 0, sizeof(struct backlight_properties));
166 props.max_brightness = FB_BACKLIGHT_LEVELS - 1;
167 bd = backlight_device_register(name, NULL, NULL, &pmu_backlight_data,
168 &props);
165 if (IS_ERR(bd)) { 169 if (IS_ERR(bd)) {
166 printk(KERN_ERR "PMU Backlight registration failed\n"); 170 printk(KERN_ERR "PMU Backlight registration failed\n");
167 return; 171 return;
168 } 172 }
169 uses_pmu_bl = 1; 173 uses_pmu_bl = 1;
170 bd->props.max_brightness = FB_BACKLIGHT_LEVELS - 1;
171 pmu_backlight_init_curve(0x7F, 0x46, 0x0E); 174 pmu_backlight_init_curve(0x7F, 0x46, 0x0E);
172 175
173 level = bd->props.max_brightness; 176 level = bd->props.max_brightness;
diff --git a/drivers/md/linear.c b/drivers/md/linear.c
index af2d39d603c7..bb2a23159b21 100644
--- a/drivers/md/linear.c
+++ b/drivers/md/linear.c
@@ -172,12 +172,14 @@ static linear_conf_t *linear_conf(mddev_t *mddev, int raid_disks)
172 disk_stack_limits(mddev->gendisk, rdev->bdev, 172 disk_stack_limits(mddev->gendisk, rdev->bdev,
173 rdev->data_offset << 9); 173 rdev->data_offset << 9);
174 /* as we don't honour merge_bvec_fn, we must never risk 174 /* as we don't honour merge_bvec_fn, we must never risk
175 * violating it, so limit ->max_sector to one PAGE, as 175 * violating it, so limit max_segments to 1 lying within
176 * a one page request is never in violation. 176 * a single page.
177 */ 177 */
178 if (rdev->bdev->bd_disk->queue->merge_bvec_fn && 178 if (rdev->bdev->bd_disk->queue->merge_bvec_fn) {
179 queue_max_sectors(mddev->queue) > (PAGE_SIZE>>9)) 179 blk_queue_max_segments(mddev->queue, 1);
180 blk_queue_max_hw_sectors(mddev->queue, PAGE_SIZE>>9); 180 blk_queue_segment_boundary(mddev->queue,
181 PAGE_CACHE_SIZE - 1);
182 }
181 183
182 conf->array_sectors += rdev->sectors; 184 conf->array_sectors += rdev->sectors;
183 cnt++; 185 cnt++;
diff --git a/drivers/md/multipath.c b/drivers/md/multipath.c
index 4b323f45ad74..5558ebc705c8 100644
--- a/drivers/md/multipath.c
+++ b/drivers/md/multipath.c
@@ -301,14 +301,16 @@ static int multipath_add_disk(mddev_t *mddev, mdk_rdev_t *rdev)
301 rdev->data_offset << 9); 301 rdev->data_offset << 9);
302 302
303 /* as we don't honour merge_bvec_fn, we must never risk 303 /* as we don't honour merge_bvec_fn, we must never risk
304 * violating it, so limit ->max_sector to one PAGE, as 304 * violating it, so limit ->max_segments to one, lying
305 * a one page request is never in violation. 305 * within a single page.
306 * (Note: it is very unlikely that a device with 306 * (Note: it is very unlikely that a device with
307 * merge_bvec_fn will be involved in multipath.) 307 * merge_bvec_fn will be involved in multipath.)
308 */ 308 */
309 if (q->merge_bvec_fn && 309 if (q->merge_bvec_fn) {
310 queue_max_sectors(q) > (PAGE_SIZE>>9)) 310 blk_queue_max_segments(mddev->queue, 1);
311 blk_queue_max_hw_sectors(mddev->queue, PAGE_SIZE>>9); 311 blk_queue_segment_boundary(mddev->queue,
312 PAGE_CACHE_SIZE - 1);
313 }
312 314
313 conf->working_disks++; 315 conf->working_disks++;
314 mddev->degraded--; 316 mddev->degraded--;
@@ -476,9 +478,11 @@ static int multipath_run (mddev_t *mddev)
476 /* as we don't honour merge_bvec_fn, we must never risk 478 /* as we don't honour merge_bvec_fn, we must never risk
477 * violating it, not that we ever expect a device with 479 * violating it, not that we ever expect a device with
478 * a merge_bvec_fn to be involved in multipath */ 480 * a merge_bvec_fn to be involved in multipath */
479 if (rdev->bdev->bd_disk->queue->merge_bvec_fn && 481 if (rdev->bdev->bd_disk->queue->merge_bvec_fn) {
480 queue_max_sectors(mddev->queue) > (PAGE_SIZE>>9)) 482 blk_queue_max_segments(mddev->queue, 1);
481 blk_queue_max_hw_sectors(mddev->queue, PAGE_SIZE>>9); 483 blk_queue_segment_boundary(mddev->queue,
484 PAGE_CACHE_SIZE - 1);
485 }
482 486
483 if (!test_bit(Faulty, &rdev->flags)) 487 if (!test_bit(Faulty, &rdev->flags))
484 conf->working_disks++; 488 conf->working_disks++;
diff --git a/drivers/md/raid0.c b/drivers/md/raid0.c
index a1f7147b757f..377cf2a3c333 100644
--- a/drivers/md/raid0.c
+++ b/drivers/md/raid0.c
@@ -176,14 +176,15 @@ static int create_strip_zones(mddev_t *mddev)
176 disk_stack_limits(mddev->gendisk, rdev1->bdev, 176 disk_stack_limits(mddev->gendisk, rdev1->bdev,
177 rdev1->data_offset << 9); 177 rdev1->data_offset << 9);
178 /* as we don't honour merge_bvec_fn, we must never risk 178 /* as we don't honour merge_bvec_fn, we must never risk
179 * violating it, so limit ->max_sector to one PAGE, as 179 * violating it, so limit ->max_segments to 1, lying within
180 * a one page request is never in violation. 180 * a single page.
181 */ 181 */
182 182
183 if (rdev1->bdev->bd_disk->queue->merge_bvec_fn && 183 if (rdev1->bdev->bd_disk->queue->merge_bvec_fn) {
184 queue_max_sectors(mddev->queue) > (PAGE_SIZE>>9)) 184 blk_queue_max_segments(mddev->queue, 1);
185 blk_queue_max_hw_sectors(mddev->queue, PAGE_SIZE>>9); 185 blk_queue_segment_boundary(mddev->queue,
186 186 PAGE_CACHE_SIZE - 1);
187 }
187 if (!smallest || (rdev1->sectors < smallest->sectors)) 188 if (!smallest || (rdev1->sectors < smallest->sectors))
188 smallest = rdev1; 189 smallest = rdev1;
189 cnt++; 190 cnt++;
diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
index 5a06122abd3b..f741f77eeb2b 100644
--- a/drivers/md/raid1.c
+++ b/drivers/md/raid1.c
@@ -1152,13 +1152,17 @@ static int raid1_add_disk(mddev_t *mddev, mdk_rdev_t *rdev)
1152 1152
1153 disk_stack_limits(mddev->gendisk, rdev->bdev, 1153 disk_stack_limits(mddev->gendisk, rdev->bdev,
1154 rdev->data_offset << 9); 1154 rdev->data_offset << 9);
1155 /* as we don't honour merge_bvec_fn, we must never risk 1155 /* as we don't honour merge_bvec_fn, we must
1156 * violating it, so limit ->max_sector to one PAGE, as 1156 * never risk violating it, so limit
1157 * a one page request is never in violation. 1157 * ->max_segments to one lying with a single
1158 * page, as a one page request is never in
1159 * violation.
1158 */ 1160 */
1159 if (rdev->bdev->bd_disk->queue->merge_bvec_fn && 1161 if (rdev->bdev->bd_disk->queue->merge_bvec_fn) {
1160 queue_max_sectors(mddev->queue) > (PAGE_SIZE>>9)) 1162 blk_queue_max_segments(mddev->queue, 1);
1161 blk_queue_max_hw_sectors(mddev->queue, PAGE_SIZE>>9); 1163 blk_queue_segment_boundary(mddev->queue,
1164 PAGE_CACHE_SIZE - 1);
1165 }
1162 1166
1163 p->head_position = 0; 1167 p->head_position = 0;
1164 rdev->raid_disk = mirror; 1168 rdev->raid_disk = mirror;
@@ -2098,12 +2102,14 @@ static int run(mddev_t *mddev)
2098 disk_stack_limits(mddev->gendisk, rdev->bdev, 2102 disk_stack_limits(mddev->gendisk, rdev->bdev,
2099 rdev->data_offset << 9); 2103 rdev->data_offset << 9);
2100 /* as we don't honour merge_bvec_fn, we must never risk 2104 /* as we don't honour merge_bvec_fn, we must never risk
2101 * violating it, so limit ->max_sector to one PAGE, as 2105 * violating it, so limit ->max_segments to 1 lying within
2102 * a one page request is never in violation. 2106 * a single page, as a one page request is never in violation.
2103 */ 2107 */
2104 if (rdev->bdev->bd_disk->queue->merge_bvec_fn && 2108 if (rdev->bdev->bd_disk->queue->merge_bvec_fn) {
2105 queue_max_sectors(mddev->queue) > (PAGE_SIZE>>9)) 2109 blk_queue_max_segments(mddev->queue, 1);
2106 blk_queue_max_hw_sectors(mddev->queue, PAGE_SIZE>>9); 2110 blk_queue_segment_boundary(mddev->queue,
2111 PAGE_CACHE_SIZE - 1);
2112 }
2107 } 2113 }
2108 2114
2109 mddev->degraded = 0; 2115 mddev->degraded = 0;
diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
index 7584f9ab9bcf..b4ba41ecbd20 100644
--- a/drivers/md/raid10.c
+++ b/drivers/md/raid10.c
@@ -1155,13 +1155,17 @@ static int raid10_add_disk(mddev_t *mddev, mdk_rdev_t *rdev)
1155 1155
1156 disk_stack_limits(mddev->gendisk, rdev->bdev, 1156 disk_stack_limits(mddev->gendisk, rdev->bdev,
1157 rdev->data_offset << 9); 1157 rdev->data_offset << 9);
1158 /* as we don't honour merge_bvec_fn, we must never risk 1158 /* as we don't honour merge_bvec_fn, we must
1159 * violating it, so limit ->max_sector to one PAGE, as 1159 * never risk violating it, so limit
1160 * a one page request is never in violation. 1160 * ->max_segments to one lying with a single
1161 * page, as a one page request is never in
1162 * violation.
1161 */ 1163 */
1162 if (rdev->bdev->bd_disk->queue->merge_bvec_fn && 1164 if (rdev->bdev->bd_disk->queue->merge_bvec_fn) {
1163 queue_max_sectors(mddev->queue) > (PAGE_SIZE>>9)) 1165 blk_queue_max_segments(mddev->queue, 1);
1164 blk_queue_max_hw_sectors(mddev->queue, PAGE_SIZE>>9); 1166 blk_queue_segment_boundary(mddev->queue,
1167 PAGE_CACHE_SIZE - 1);
1168 }
1165 1169
1166 p->head_position = 0; 1170 p->head_position = 0;
1167 rdev->raid_disk = mirror; 1171 rdev->raid_disk = mirror;
@@ -2255,12 +2259,14 @@ static int run(mddev_t *mddev)
2255 disk_stack_limits(mddev->gendisk, rdev->bdev, 2259 disk_stack_limits(mddev->gendisk, rdev->bdev,
2256 rdev->data_offset << 9); 2260 rdev->data_offset << 9);
2257 /* as we don't honour merge_bvec_fn, we must never risk 2261 /* as we don't honour merge_bvec_fn, we must never risk
2258 * violating it, so limit ->max_sector to one PAGE, as 2262 * violating it, so limit max_segments to 1 lying
2259 * a one page request is never in violation. 2263 * within a single page.
2260 */ 2264 */
2261 if (rdev->bdev->bd_disk->queue->merge_bvec_fn && 2265 if (rdev->bdev->bd_disk->queue->merge_bvec_fn) {
2262 queue_max_sectors(mddev->queue) > (PAGE_SIZE>>9)) 2266 blk_queue_max_segments(mddev->queue, 1);
2263 blk_queue_max_hw_sectors(mddev->queue, PAGE_SIZE>>9); 2267 blk_queue_segment_boundary(mddev->queue,
2268 PAGE_CACHE_SIZE - 1);
2269 }
2264 2270
2265 disk->head_position = 0; 2271 disk->head_position = 0;
2266 } 2272 }
diff --git a/drivers/mtd/maps/omap_nor.c b/drivers/mtd/maps/omap_nor.c
deleted file mode 100644
index e69de29bb2d1..000000000000
--- a/drivers/mtd/maps/omap_nor.c
+++ /dev/null
diff --git a/drivers/mtd/nand/Kconfig b/drivers/mtd/nand/Kconfig
index 1157d5679e66..42e5ea49e975 100644
--- a/drivers/mtd/nand/Kconfig
+++ b/drivers/mtd/nand/Kconfig
@@ -457,7 +457,7 @@ config MTD_NAND_NOMADIK
457 457
458config MTD_NAND_SH_FLCTL 458config MTD_NAND_SH_FLCTL
459 tristate "Support for NAND on Renesas SuperH FLCTL" 459 tristate "Support for NAND on Renesas SuperH FLCTL"
460 depends on MTD_NAND && SUPERH 460 depends on MTD_NAND && (SUPERH || ARCH_SHMOBILE)
461 help 461 help
462 Several Renesas SuperH CPU has FLCTL. This option enables support 462 Several Renesas SuperH CPU has FLCTL. This option enables support
463 for NAND Flash using FLCTL. 463 for NAND Flash using FLCTL.
diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig
index 20e2dec1d534..65db201fd77e 100644
--- a/drivers/net/Kconfig
+++ b/drivers/net/Kconfig
@@ -2583,6 +2583,31 @@ config CHELSIO_T3
2583 To compile this driver as a module, choose M here: the module 2583 To compile this driver as a module, choose M here: the module
2584 will be called cxgb3. 2584 will be called cxgb3.
2585 2585
2586config CHELSIO_T4_DEPENDS
2587 tristate
2588 depends on PCI && INET
2589 default y
2590
2591config CHELSIO_T4
2592 tristate "Chelsio Communications T4 Ethernet support"
2593 depends on CHELSIO_T4_DEPENDS
2594 select FW_LOADER
2595 select MDIO
2596 help
2597 This driver supports Chelsio T4-based gigabit and 10Gb Ethernet
2598 adapters.
2599
2600 For general information about Chelsio and our products, visit
2601 our website at <http://www.chelsio.com>.
2602
2603 For customer support, please visit our customer support page at
2604 <http://www.chelsio.com/support.htm>.
2605
2606 Please send feedback to <linux-bugs@chelsio.com>.
2607
2608 To compile this driver as a module choose M here; the module
2609 will be called cxgb4.
2610
2586config EHEA 2611config EHEA
2587 tristate "eHEA Ethernet support" 2612 tristate "eHEA Ethernet support"
2588 depends on IBMEBUS && INET && SPARSEMEM 2613 depends on IBMEBUS && INET && SPARSEMEM
diff --git a/drivers/net/Makefile b/drivers/net/Makefile
index f8444f439a65..ebf80b983063 100644
--- a/drivers/net/Makefile
+++ b/drivers/net/Makefile
@@ -19,6 +19,7 @@ obj-$(CONFIG_IXGB) += ixgb/
19obj-$(CONFIG_IP1000) += ipg.o 19obj-$(CONFIG_IP1000) += ipg.o
20obj-$(CONFIG_CHELSIO_T1) += chelsio/ 20obj-$(CONFIG_CHELSIO_T1) += chelsio/
21obj-$(CONFIG_CHELSIO_T3) += cxgb3/ 21obj-$(CONFIG_CHELSIO_T3) += cxgb3/
22obj-$(CONFIG_CHELSIO_T4) += cxgb4/
22obj-$(CONFIG_EHEA) += ehea/ 23obj-$(CONFIG_EHEA) += ehea/
23obj-$(CONFIG_CAN) += can/ 24obj-$(CONFIG_CAN) += can/
24obj-$(CONFIG_BONDING) += bonding/ 25obj-$(CONFIG_BONDING) += bonding/
diff --git a/drivers/net/atlx/atl1.c b/drivers/net/atlx/atl1.c
index 9ba547069db3..0ebd8208f606 100644
--- a/drivers/net/atlx/atl1.c
+++ b/drivers/net/atlx/atl1.c
@@ -84,7 +84,7 @@
84 84
85#define ATLX_DRIVER_VERSION "2.1.3" 85#define ATLX_DRIVER_VERSION "2.1.3"
86MODULE_AUTHOR("Xiong Huang <xiong.huang@atheros.com>, \ 86MODULE_AUTHOR("Xiong Huang <xiong.huang@atheros.com>, \
87 Chris Snook <csnook@redhat.com>, Jay Cliburn <jcliburn@gmail.com>"); 87Chris Snook <csnook@redhat.com>, Jay Cliburn <jcliburn@gmail.com>");
88MODULE_LICENSE("GPL"); 88MODULE_LICENSE("GPL");
89MODULE_VERSION(ATLX_DRIVER_VERSION); 89MODULE_VERSION(ATLX_DRIVER_VERSION);
90 90
diff --git a/drivers/net/benet/be_cmds.c b/drivers/net/benet/be_cmds.c
index 61a9afdb83f4..da8793026bb1 100644
--- a/drivers/net/benet/be_cmds.c
+++ b/drivers/net/benet/be_cmds.c
@@ -1466,8 +1466,8 @@ int be_cmd_get_flash_crc(struct be_adapter *adapter, u8 *flashed_crc,
1466 1466
1467 req->params.op_type = cpu_to_le32(IMG_TYPE_REDBOOT); 1467 req->params.op_type = cpu_to_le32(IMG_TYPE_REDBOOT);
1468 req->params.op_code = cpu_to_le32(FLASHROM_OPER_REPORT); 1468 req->params.op_code = cpu_to_le32(FLASHROM_OPER_REPORT);
1469 req->params.offset = offset; 1469 req->params.offset = cpu_to_le32(offset);
1470 req->params.data_buf_size = 0x4; 1470 req->params.data_buf_size = cpu_to_le32(0x4);
1471 1471
1472 status = be_mcc_notify_wait(adapter); 1472 status = be_mcc_notify_wait(adapter);
1473 if (!status) 1473 if (!status)
diff --git a/drivers/net/benet/be_ethtool.c b/drivers/net/benet/be_ethtool.c
index d7390da470cd..d488d52d710a 100644
--- a/drivers/net/benet/be_ethtool.c
+++ b/drivers/net/benet/be_ethtool.c
@@ -490,7 +490,7 @@ be_test_ddr_dma(struct be_adapter *adapter)
490{ 490{
491 int ret, i; 491 int ret, i;
492 struct be_dma_mem ddrdma_cmd; 492 struct be_dma_mem ddrdma_cmd;
493 u64 pattern[2] = {0x5a5a5a5a5a5a5a5a, 0xa5a5a5a5a5a5a5a5}; 493 u64 pattern[2] = {0x5a5a5a5a5a5a5a5aULL, 0xa5a5a5a5a5a5a5a5ULL};
494 494
495 ddrdma_cmd.size = sizeof(struct be_cmd_req_ddrdma_test); 495 ddrdma_cmd.size = sizeof(struct be_cmd_req_ddrdma_test);
496 ddrdma_cmd.va = pci_alloc_consistent(adapter->pdev, ddrdma_cmd.size, 496 ddrdma_cmd.va = pci_alloc_consistent(adapter->pdev, ddrdma_cmd.size,
diff --git a/drivers/net/benet/be_main.c b/drivers/net/benet/be_main.c
index cb0a4a6d5dea..49d51965312e 100644
--- a/drivers/net/benet/be_main.c
+++ b/drivers/net/benet/be_main.c
@@ -879,7 +879,7 @@ static void be_rx_compl_process(struct be_adapter *adapter,
879 return; 879 return;
880 } 880 }
881 vid = AMAP_GET_BITS(struct amap_eth_rx_compl, vlan_tag, rxcp); 881 vid = AMAP_GET_BITS(struct amap_eth_rx_compl, vlan_tag, rxcp);
882 vid = be16_to_cpu(vid); 882 vid = swab16(vid);
883 vlan_hwaccel_receive_skb(skb, adapter->vlan_grp, vid); 883 vlan_hwaccel_receive_skb(skb, adapter->vlan_grp, vid);
884 } else { 884 } else {
885 netif_receive_skb(skb); 885 netif_receive_skb(skb);
@@ -956,7 +956,7 @@ static void be_rx_compl_process_gro(struct be_adapter *adapter,
956 napi_gro_frags(&eq_obj->napi); 956 napi_gro_frags(&eq_obj->napi);
957 } else { 957 } else {
958 vid = AMAP_GET_BITS(struct amap_eth_rx_compl, vlan_tag, rxcp); 958 vid = AMAP_GET_BITS(struct amap_eth_rx_compl, vlan_tag, rxcp);
959 vid = be16_to_cpu(vid); 959 vid = swab16(vid);
960 960
961 if (!adapter->vlan_grp || adapter->vlans_added == 0) 961 if (!adapter->vlan_grp || adapter->vlans_added == 0)
962 return; 962 return;
@@ -1977,7 +1977,7 @@ static bool be_flash_redboot(struct be_adapter *adapter,
1977 p += crc_offset; 1977 p += crc_offset;
1978 1978
1979 status = be_cmd_get_flash_crc(adapter, flashed_crc, 1979 status = be_cmd_get_flash_crc(adapter, flashed_crc,
1980 (img_start + image_size - 4)); 1980 (image_size - 4));
1981 if (status) { 1981 if (status) {
1982 dev_err(&adapter->pdev->dev, 1982 dev_err(&adapter->pdev->dev,
1983 "could not get crc from flash, not flashing redboot\n"); 1983 "could not get crc from flash, not flashing redboot\n");
@@ -2113,7 +2113,7 @@ int be_load_fw(struct be_adapter *adapter, u8 *func)
2113 struct flash_file_hdr_g3 *fhdr3; 2113 struct flash_file_hdr_g3 *fhdr3;
2114 struct image_hdr *img_hdr_ptr = NULL; 2114 struct image_hdr *img_hdr_ptr = NULL;
2115 struct be_dma_mem flash_cmd; 2115 struct be_dma_mem flash_cmd;
2116 int status, i = 0; 2116 int status, i = 0, num_imgs = 0;
2117 const u8 *p; 2117 const u8 *p;
2118 2118
2119 strcpy(fw_file, func); 2119 strcpy(fw_file, func);
@@ -2139,15 +2139,14 @@ int be_load_fw(struct be_adapter *adapter, u8 *func)
2139 if ((adapter->generation == BE_GEN3) && 2139 if ((adapter->generation == BE_GEN3) &&
2140 (get_ufigen_type(fhdr) == BE_GEN3)) { 2140 (get_ufigen_type(fhdr) == BE_GEN3)) {
2141 fhdr3 = (struct flash_file_hdr_g3 *) fw->data; 2141 fhdr3 = (struct flash_file_hdr_g3 *) fw->data;
2142 for (i = 0; i < fhdr3->num_imgs; i++) { 2142 num_imgs = le32_to_cpu(fhdr3->num_imgs);
2143 for (i = 0; i < num_imgs; i++) {
2143 img_hdr_ptr = (struct image_hdr *) (fw->data + 2144 img_hdr_ptr = (struct image_hdr *) (fw->data +
2144 (sizeof(struct flash_file_hdr_g3) + 2145 (sizeof(struct flash_file_hdr_g3) +
2145 i * sizeof(struct image_hdr))); 2146 i * sizeof(struct image_hdr)));
2146 if (img_hdr_ptr->imageid == 1) { 2147 if (le32_to_cpu(img_hdr_ptr->imageid) == 1)
2147 status = be_flash_data(adapter, fw, 2148 status = be_flash_data(adapter, fw, &flash_cmd,
2148 &flash_cmd, fhdr3->num_imgs); 2149 num_imgs);
2149 }
2150
2151 } 2150 }
2152 } else if ((adapter->generation == BE_GEN2) && 2151 } else if ((adapter->generation == BE_GEN2) &&
2153 (get_ufigen_type(fhdr) == BE_GEN2)) { 2152 (get_ufigen_type(fhdr) == BE_GEN2)) {
diff --git a/drivers/net/bnx2.c b/drivers/net/bnx2.c
index 0b69ffb7951d..802b538502eb 100644
--- a/drivers/net/bnx2.c
+++ b/drivers/net/bnx2.c
@@ -246,6 +246,8 @@ static const struct flash_spec flash_5709 = {
246 246
247MODULE_DEVICE_TABLE(pci, bnx2_pci_tbl); 247MODULE_DEVICE_TABLE(pci, bnx2_pci_tbl);
248 248
249static void bnx2_init_napi(struct bnx2 *bp);
250
249static inline u32 bnx2_tx_avail(struct bnx2 *bp, struct bnx2_tx_ring_info *txr) 251static inline u32 bnx2_tx_avail(struct bnx2 *bp, struct bnx2_tx_ring_info *txr)
250{ 252{
251 u32 diff; 253 u32 diff;
@@ -6196,6 +6198,7 @@ bnx2_open(struct net_device *dev)
6196 bnx2_disable_int(bp); 6198 bnx2_disable_int(bp);
6197 6199
6198 bnx2_setup_int_mode(bp, disable_msi); 6200 bnx2_setup_int_mode(bp, disable_msi);
6201 bnx2_init_napi(bp);
6199 bnx2_napi_enable(bp); 6202 bnx2_napi_enable(bp);
6200 rc = bnx2_alloc_mem(bp); 6203 rc = bnx2_alloc_mem(bp);
6201 if (rc) 6204 if (rc)
@@ -7642,9 +7645,11 @@ poll_bnx2(struct net_device *dev)
7642 int i; 7645 int i;
7643 7646
7644 for (i = 0; i < bp->irq_nvecs; i++) { 7647 for (i = 0; i < bp->irq_nvecs; i++) {
7645 disable_irq(bp->irq_tbl[i].vector); 7648 struct bnx2_irq *irq = &bp->irq_tbl[i];
7646 bnx2_interrupt(bp->irq_tbl[i].vector, &bp->bnx2_napi[i]); 7649
7647 enable_irq(bp->irq_tbl[i].vector); 7650 disable_irq(irq->vector);
7651 irq->handler(irq->vector, &bp->bnx2_napi[i]);
7652 enable_irq(irq->vector);
7648 } 7653 }
7649} 7654}
7650#endif 7655#endif
@@ -8206,7 +8211,7 @@ bnx2_init_napi(struct bnx2 *bp)
8206{ 8211{
8207 int i; 8212 int i;
8208 8213
8209 for (i = 0; i < BNX2_MAX_MSIX_VEC; i++) { 8214 for (i = 0; i < bp->irq_nvecs; i++) {
8210 struct bnx2_napi *bnapi = &bp->bnx2_napi[i]; 8215 struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
8211 int (*poll)(struct napi_struct *, int); 8216 int (*poll)(struct napi_struct *, int);
8212 8217
@@ -8275,7 +8280,6 @@ bnx2_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
8275 dev->ethtool_ops = &bnx2_ethtool_ops; 8280 dev->ethtool_ops = &bnx2_ethtool_ops;
8276 8281
8277 bp = netdev_priv(dev); 8282 bp = netdev_priv(dev);
8278 bnx2_init_napi(bp);
8279 8283
8280 pci_set_drvdata(pdev, dev); 8284 pci_set_drvdata(pdev, dev);
8281 8285
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
index 22682f1c8473..85e813c7762b 100644
--- a/drivers/net/bonding/bond_main.c
+++ b/drivers/net/bonding/bond_main.c
@@ -1162,6 +1162,11 @@ void bond_change_active_slave(struct bonding *bond, struct slave *new_active)
1162 write_lock_bh(&bond->curr_slave_lock); 1162 write_lock_bh(&bond->curr_slave_lock);
1163 } 1163 }
1164 } 1164 }
1165
1166 /* resend IGMP joins since all were sent on curr_active_slave */
1167 if (bond->params.mode == BOND_MODE_ROUNDROBIN) {
1168 bond_resend_igmp_join_requests(bond);
1169 }
1165} 1170}
1166 1171
1167/** 1172/**
@@ -4096,22 +4101,41 @@ static int bond_xmit_roundrobin(struct sk_buff *skb, struct net_device *bond_dev
4096 struct bonding *bond = netdev_priv(bond_dev); 4101 struct bonding *bond = netdev_priv(bond_dev);
4097 struct slave *slave, *start_at; 4102 struct slave *slave, *start_at;
4098 int i, slave_no, res = 1; 4103 int i, slave_no, res = 1;
4104 struct iphdr *iph = ip_hdr(skb);
4099 4105
4100 read_lock(&bond->lock); 4106 read_lock(&bond->lock);
4101 4107
4102 if (!BOND_IS_OK(bond)) 4108 if (!BOND_IS_OK(bond))
4103 goto out; 4109 goto out;
4104
4105 /* 4110 /*
4106 * Concurrent TX may collide on rr_tx_counter; we accept that 4111 * Start with the curr_active_slave that joined the bond as the
4107 * as being rare enough not to justify using an atomic op here 4112 * default for sending IGMP traffic. For failover purposes one
4113 * needs to maintain some consistency for the interface that will
4114 * send the join/membership reports. The curr_active_slave found
4115 * will send all of this type of traffic.
4108 */ 4116 */
4109 slave_no = bond->rr_tx_counter++ % bond->slave_cnt; 4117 if ((iph->protocol == IPPROTO_IGMP) &&
4118 (skb->protocol == htons(ETH_P_IP))) {
4110 4119
4111 bond_for_each_slave(bond, slave, i) { 4120 read_lock(&bond->curr_slave_lock);
4112 slave_no--; 4121 slave = bond->curr_active_slave;
4113 if (slave_no < 0) 4122 read_unlock(&bond->curr_slave_lock);
4114 break; 4123
4124 if (!slave)
4125 goto out;
4126 } else {
4127 /*
4128 * Concurrent TX may collide on rr_tx_counter; we accept
4129 * that as being rare enough not to justify using an
4130 * atomic op here.
4131 */
4132 slave_no = bond->rr_tx_counter++ % bond->slave_cnt;
4133
4134 bond_for_each_slave(bond, slave, i) {
4135 slave_no--;
4136 if (slave_no < 0)
4137 break;
4138 }
4115 } 4139 }
4116 4140
4117 start_at = slave; 4141 start_at = slave;
@@ -4384,6 +4408,14 @@ static const struct net_device_ops bond_netdev_ops = {
4384 .ndo_vlan_rx_kill_vid = bond_vlan_rx_kill_vid, 4408 .ndo_vlan_rx_kill_vid = bond_vlan_rx_kill_vid,
4385}; 4409};
4386 4410
4411static void bond_destructor(struct net_device *bond_dev)
4412{
4413 struct bonding *bond = netdev_priv(bond_dev);
4414 if (bond->wq)
4415 destroy_workqueue(bond->wq);
4416 free_netdev(bond_dev);
4417}
4418
4387static void bond_setup(struct net_device *bond_dev) 4419static void bond_setup(struct net_device *bond_dev)
4388{ 4420{
4389 struct bonding *bond = netdev_priv(bond_dev); 4421 struct bonding *bond = netdev_priv(bond_dev);
@@ -4404,7 +4436,7 @@ static void bond_setup(struct net_device *bond_dev)
4404 bond_dev->ethtool_ops = &bond_ethtool_ops; 4436 bond_dev->ethtool_ops = &bond_ethtool_ops;
4405 bond_set_mode_ops(bond, bond->params.mode); 4437 bond_set_mode_ops(bond, bond->params.mode);
4406 4438
4407 bond_dev->destructor = free_netdev; 4439 bond_dev->destructor = bond_destructor;
4408 4440
4409 /* Initialize the device options */ 4441 /* Initialize the device options */
4410 bond_dev->tx_queue_len = 0; 4442 bond_dev->tx_queue_len = 0;
@@ -4476,9 +4508,6 @@ static void bond_uninit(struct net_device *bond_dev)
4476 4508
4477 bond_remove_proc_entry(bond); 4509 bond_remove_proc_entry(bond);
4478 4510
4479 if (bond->wq)
4480 destroy_workqueue(bond->wq);
4481
4482 __hw_addr_flush(&bond->mc_list); 4511 __hw_addr_flush(&bond->mc_list);
4483} 4512}
4484 4513
@@ -4890,8 +4919,8 @@ int bond_create(struct net *net, const char *name)
4890 bond_setup); 4919 bond_setup);
4891 if (!bond_dev) { 4920 if (!bond_dev) {
4892 pr_err("%s: eek! can't alloc netdev!\n", name); 4921 pr_err("%s: eek! can't alloc netdev!\n", name);
4893 res = -ENOMEM; 4922 rtnl_unlock();
4894 goto out; 4923 return -ENOMEM;
4895 } 4924 }
4896 4925
4897 dev_net_set(bond_dev, net); 4926 dev_net_set(bond_dev, net);
@@ -4900,19 +4929,16 @@ int bond_create(struct net *net, const char *name)
4900 if (!name) { 4929 if (!name) {
4901 res = dev_alloc_name(bond_dev, "bond%d"); 4930 res = dev_alloc_name(bond_dev, "bond%d");
4902 if (res < 0) 4931 if (res < 0)
4903 goto out_netdev; 4932 goto out;
4904 } 4933 }
4905 4934
4906 res = register_netdevice(bond_dev); 4935 res = register_netdevice(bond_dev);
4907 if (res < 0)
4908 goto out_netdev;
4909 4936
4910out: 4937out:
4911 rtnl_unlock(); 4938 rtnl_unlock();
4939 if (res < 0)
4940 bond_destructor(bond_dev);
4912 return res; 4941 return res;
4913out_netdev:
4914 free_netdev(bond_dev);
4915 goto out;
4916} 4942}
4917 4943
4918static int __net_init bond_net_init(struct net *net) 4944static int __net_init bond_net_init(struct net *net)
diff --git a/drivers/net/can/bfin_can.c b/drivers/net/can/bfin_can.c
index 866905fa4119..03489864376d 100644
--- a/drivers/net/can/bfin_can.c
+++ b/drivers/net/can/bfin_can.c
@@ -22,6 +22,7 @@
22#include <linux/can/dev.h> 22#include <linux/can/dev.h>
23#include <linux/can/error.h> 23#include <linux/can/error.h>
24 24
25#include <asm/bfin_can.h>
25#include <asm/portmux.h> 26#include <asm/portmux.h>
26 27
27#define DRV_NAME "bfin_can" 28#define DRV_NAME "bfin_can"
@@ -29,90 +30,6 @@
29#define TX_ECHO_SKB_MAX 1 30#define TX_ECHO_SKB_MAX 1
30 31
31/* 32/*
32 * transmit and receive channels
33 */
34#define TRANSMIT_CHL 24
35#define RECEIVE_STD_CHL 0
36#define RECEIVE_EXT_CHL 4
37#define RECEIVE_RTR_CHL 8
38#define RECEIVE_EXT_RTR_CHL 12
39#define MAX_CHL_NUMBER 32
40
41/*
42 * bfin can registers layout
43 */
44struct bfin_can_mask_regs {
45 u16 aml;
46 u16 dummy1;
47 u16 amh;
48 u16 dummy2;
49};
50
51struct bfin_can_channel_regs {
52 u16 data[8];
53 u16 dlc;
54 u16 dummy1;
55 u16 tsv;
56 u16 dummy2;
57 u16 id0;
58 u16 dummy3;
59 u16 id1;
60 u16 dummy4;
61};
62
63struct bfin_can_regs {
64 /*
65 * global control and status registers
66 */
67 u16 mc1; /* offset 0 */
68 u16 dummy1;
69 u16 md1; /* offset 4 */
70 u16 rsv1[13];
71 u16 mbtif1; /* offset 0x20 */
72 u16 dummy2;
73 u16 mbrif1; /* offset 0x24 */
74 u16 dummy3;
75 u16 mbim1; /* offset 0x28 */
76 u16 rsv2[11];
77 u16 mc2; /* offset 0x40 */
78 u16 dummy4;
79 u16 md2; /* offset 0x44 */
80 u16 dummy5;
81 u16 trs2; /* offset 0x48 */
82 u16 rsv3[11];
83 u16 mbtif2; /* offset 0x60 */
84 u16 dummy6;
85 u16 mbrif2; /* offset 0x64 */
86 u16 dummy7;
87 u16 mbim2; /* offset 0x68 */
88 u16 rsv4[11];
89 u16 clk; /* offset 0x80 */
90 u16 dummy8;
91 u16 timing; /* offset 0x84 */
92 u16 rsv5[3];
93 u16 status; /* offset 0x8c */
94 u16 dummy9;
95 u16 cec; /* offset 0x90 */
96 u16 dummy10;
97 u16 gis; /* offset 0x94 */
98 u16 dummy11;
99 u16 gim; /* offset 0x98 */
100 u16 rsv6[3];
101 u16 ctrl; /* offset 0xa0 */
102 u16 dummy12;
103 u16 intr; /* offset 0xa4 */
104 u16 rsv7[7];
105 u16 esr; /* offset 0xb4 */
106 u16 rsv8[37];
107
108 /*
109 * channel(mailbox) mask and message registers
110 */
111 struct bfin_can_mask_regs msk[MAX_CHL_NUMBER]; /* offset 0x100 */
112 struct bfin_can_channel_regs chl[MAX_CHL_NUMBER]; /* offset 0x200 */
113};
114
115/*
116 * bfin can private data 33 * bfin can private data
117 */ 34 */
118struct bfin_can_priv { 35struct bfin_can_priv {
@@ -163,7 +80,7 @@ static int bfin_can_set_bittiming(struct net_device *dev)
163 if (priv->can.ctrlmode & CAN_CTRLMODE_3_SAMPLES) 80 if (priv->can.ctrlmode & CAN_CTRLMODE_3_SAMPLES)
164 timing |= SAM; 81 timing |= SAM;
165 82
166 bfin_write16(&reg->clk, clk); 83 bfin_write16(&reg->clock, clk);
167 bfin_write16(&reg->timing, timing); 84 bfin_write16(&reg->timing, timing);
168 85
169 dev_info(dev->dev.parent, "setting CLOCK=0x%04x TIMING=0x%04x\n", 86 dev_info(dev->dev.parent, "setting CLOCK=0x%04x TIMING=0x%04x\n",
@@ -185,11 +102,11 @@ static void bfin_can_set_reset_mode(struct net_device *dev)
185 bfin_write16(&reg->gim, 0); 102 bfin_write16(&reg->gim, 0);
186 103
187 /* reset can and enter configuration mode */ 104 /* reset can and enter configuration mode */
188 bfin_write16(&reg->ctrl, SRS | CCR); 105 bfin_write16(&reg->control, SRS | CCR);
189 SSYNC(); 106 SSYNC();
190 bfin_write16(&reg->ctrl, CCR); 107 bfin_write16(&reg->control, CCR);
191 SSYNC(); 108 SSYNC();
192 while (!(bfin_read16(&reg->ctrl) & CCA)) { 109 while (!(bfin_read16(&reg->control) & CCA)) {
193 udelay(10); 110 udelay(10);
194 if (--timeout == 0) { 111 if (--timeout == 0) {
195 dev_err(dev->dev.parent, 112 dev_err(dev->dev.parent,
@@ -244,7 +161,7 @@ static void bfin_can_set_normal_mode(struct net_device *dev)
244 /* 161 /*
245 * leave configuration mode 162 * leave configuration mode
246 */ 163 */
247 bfin_write16(&reg->ctrl, bfin_read16(&reg->ctrl) & ~CCR); 164 bfin_write16(&reg->control, bfin_read16(&reg->control) & ~CCR);
248 165
249 while (bfin_read16(&reg->status) & CCA) { 166 while (bfin_read16(&reg->status) & CCA) {
250 udelay(10); 167 udelay(10);
@@ -726,7 +643,7 @@ static int bfin_can_suspend(struct platform_device *pdev, pm_message_t mesg)
726 643
727 if (netif_running(dev)) { 644 if (netif_running(dev)) {
728 /* enter sleep mode */ 645 /* enter sleep mode */
729 bfin_write16(&reg->ctrl, bfin_read16(&reg->ctrl) | SMR); 646 bfin_write16(&reg->control, bfin_read16(&reg->control) | SMR);
730 SSYNC(); 647 SSYNC();
731 while (!(bfin_read16(&reg->intr) & SMACK)) { 648 while (!(bfin_read16(&reg->intr) & SMACK)) {
732 udelay(10); 649 udelay(10);
diff --git a/drivers/net/cxgb4/Makefile b/drivers/net/cxgb4/Makefile
new file mode 100644
index 000000000000..498667487f52
--- /dev/null
+++ b/drivers/net/cxgb4/Makefile
@@ -0,0 +1,7 @@
1#
2# Chelsio T4 driver
3#
4
5obj-$(CONFIG_CHELSIO_T4) += cxgb4.o
6
7cxgb4-objs := cxgb4_main.o l2t.o t4_hw.o sge.o
diff --git a/drivers/net/cxgb4/cxgb4.h b/drivers/net/cxgb4/cxgb4.h
new file mode 100644
index 000000000000..3d8ff4889b56
--- /dev/null
+++ b/drivers/net/cxgb4/cxgb4.h
@@ -0,0 +1,741 @@
1/*
2 * This file is part of the Chelsio T4 Ethernet driver for Linux.
3 *
4 * Copyright (c) 2003-2010 Chelsio Communications, Inc. All rights reserved.
5 *
6 * This software is available to you under a choice of one of two
7 * licenses. You may choose to be licensed under the terms of the GNU
8 * General Public License (GPL) Version 2, available from the file
9 * COPYING in the main directory of this source tree, or the
10 * OpenIB.org BSD license below:
11 *
12 * Redistribution and use in source and binary forms, with or
13 * without modification, are permitted provided that the following
14 * conditions are met:
15 *
16 * - Redistributions of source code must retain the above
17 * copyright notice, this list of conditions and the following
18 * disclaimer.
19 *
20 * - Redistributions in binary form must reproduce the above
21 * copyright notice, this list of conditions and the following
22 * disclaimer in the documentation and/or other materials
23 * provided with the distribution.
24 *
25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
32 * SOFTWARE.
33 */
34
35#ifndef __CXGB4_H__
36#define __CXGB4_H__
37
38#include <linux/bitops.h>
39#include <linux/cache.h>
40#include <linux/interrupt.h>
41#include <linux/list.h>
42#include <linux/netdevice.h>
43#include <linux/pci.h>
44#include <linux/spinlock.h>
45#include <linux/timer.h>
46#include <asm/io.h>
47#include "cxgb4_uld.h"
48#include "t4_hw.h"
49
50#define FW_VERSION_MAJOR 1
51#define FW_VERSION_MINOR 1
52#define FW_VERSION_MICRO 0
53
54enum {
55 MAX_NPORTS = 4, /* max # of ports */
56 SERNUM_LEN = 16, /* Serial # length */
57 EC_LEN = 16, /* E/C length */
58 ID_LEN = 16, /* ID length */
59};
60
61enum {
62 MEM_EDC0,
63 MEM_EDC1,
64 MEM_MC
65};
66
67enum dev_master {
68 MASTER_CANT,
69 MASTER_MAY,
70 MASTER_MUST
71};
72
73enum dev_state {
74 DEV_STATE_UNINIT,
75 DEV_STATE_INIT,
76 DEV_STATE_ERR
77};
78
79enum {
80 PAUSE_RX = 1 << 0,
81 PAUSE_TX = 1 << 1,
82 PAUSE_AUTONEG = 1 << 2
83};
84
85struct port_stats {
86 u64 tx_octets; /* total # of octets in good frames */
87 u64 tx_frames; /* all good frames */
88 u64 tx_bcast_frames; /* all broadcast frames */
89 u64 tx_mcast_frames; /* all multicast frames */
90 u64 tx_ucast_frames; /* all unicast frames */
91 u64 tx_error_frames; /* all error frames */
92
93 u64 tx_frames_64; /* # of Tx frames in a particular range */
94 u64 tx_frames_65_127;
95 u64 tx_frames_128_255;
96 u64 tx_frames_256_511;
97 u64 tx_frames_512_1023;
98 u64 tx_frames_1024_1518;
99 u64 tx_frames_1519_max;
100
101 u64 tx_drop; /* # of dropped Tx frames */
102 u64 tx_pause; /* # of transmitted pause frames */
103 u64 tx_ppp0; /* # of transmitted PPP prio 0 frames */
104 u64 tx_ppp1; /* # of transmitted PPP prio 1 frames */
105 u64 tx_ppp2; /* # of transmitted PPP prio 2 frames */
106 u64 tx_ppp3; /* # of transmitted PPP prio 3 frames */
107 u64 tx_ppp4; /* # of transmitted PPP prio 4 frames */
108 u64 tx_ppp5; /* # of transmitted PPP prio 5 frames */
109 u64 tx_ppp6; /* # of transmitted PPP prio 6 frames */
110 u64 tx_ppp7; /* # of transmitted PPP prio 7 frames */
111
112 u64 rx_octets; /* total # of octets in good frames */
113 u64 rx_frames; /* all good frames */
114 u64 rx_bcast_frames; /* all broadcast frames */
115 u64 rx_mcast_frames; /* all multicast frames */
116 u64 rx_ucast_frames; /* all unicast frames */
117 u64 rx_too_long; /* # of frames exceeding MTU */
118 u64 rx_jabber; /* # of jabber frames */
119 u64 rx_fcs_err; /* # of received frames with bad FCS */
120 u64 rx_len_err; /* # of received frames with length error */
121 u64 rx_symbol_err; /* symbol errors */
122 u64 rx_runt; /* # of short frames */
123
124 u64 rx_frames_64; /* # of Rx frames in a particular range */
125 u64 rx_frames_65_127;
126 u64 rx_frames_128_255;
127 u64 rx_frames_256_511;
128 u64 rx_frames_512_1023;
129 u64 rx_frames_1024_1518;
130 u64 rx_frames_1519_max;
131
132 u64 rx_pause; /* # of received pause frames */
133 u64 rx_ppp0; /* # of received PPP prio 0 frames */
134 u64 rx_ppp1; /* # of received PPP prio 1 frames */
135 u64 rx_ppp2; /* # of received PPP prio 2 frames */
136 u64 rx_ppp3; /* # of received PPP prio 3 frames */
137 u64 rx_ppp4; /* # of received PPP prio 4 frames */
138 u64 rx_ppp5; /* # of received PPP prio 5 frames */
139 u64 rx_ppp6; /* # of received PPP prio 6 frames */
140 u64 rx_ppp7; /* # of received PPP prio 7 frames */
141
142 u64 rx_ovflow0; /* drops due to buffer-group 0 overflows */
143 u64 rx_ovflow1; /* drops due to buffer-group 1 overflows */
144 u64 rx_ovflow2; /* drops due to buffer-group 2 overflows */
145 u64 rx_ovflow3; /* drops due to buffer-group 3 overflows */
146 u64 rx_trunc0; /* buffer-group 0 truncated packets */
147 u64 rx_trunc1; /* buffer-group 1 truncated packets */
148 u64 rx_trunc2; /* buffer-group 2 truncated packets */
149 u64 rx_trunc3; /* buffer-group 3 truncated packets */
150};
151
152struct lb_port_stats {
153 u64 octets;
154 u64 frames;
155 u64 bcast_frames;
156 u64 mcast_frames;
157 u64 ucast_frames;
158 u64 error_frames;
159
160 u64 frames_64;
161 u64 frames_65_127;
162 u64 frames_128_255;
163 u64 frames_256_511;
164 u64 frames_512_1023;
165 u64 frames_1024_1518;
166 u64 frames_1519_max;
167
168 u64 drop;
169
170 u64 ovflow0;
171 u64 ovflow1;
172 u64 ovflow2;
173 u64 ovflow3;
174 u64 trunc0;
175 u64 trunc1;
176 u64 trunc2;
177 u64 trunc3;
178};
179
180struct tp_tcp_stats {
181 u32 tcpOutRsts;
182 u64 tcpInSegs;
183 u64 tcpOutSegs;
184 u64 tcpRetransSegs;
185};
186
187struct tp_err_stats {
188 u32 macInErrs[4];
189 u32 hdrInErrs[4];
190 u32 tcpInErrs[4];
191 u32 tnlCongDrops[4];
192 u32 ofldChanDrops[4];
193 u32 tnlTxDrops[4];
194 u32 ofldVlanDrops[4];
195 u32 tcp6InErrs[4];
196 u32 ofldNoNeigh;
197 u32 ofldCongDefer;
198};
199
200struct tp_params {
201 unsigned int ntxchan; /* # of Tx channels */
202 unsigned int tre; /* log2 of core clocks per TP tick */
203};
204
205struct vpd_params {
206 unsigned int cclk;
207 u8 ec[EC_LEN + 1];
208 u8 sn[SERNUM_LEN + 1];
209 u8 id[ID_LEN + 1];
210};
211
212struct pci_params {
213 unsigned char speed;
214 unsigned char width;
215};
216
217struct adapter_params {
218 struct tp_params tp;
219 struct vpd_params vpd;
220 struct pci_params pci;
221
222 unsigned int fw_vers;
223 unsigned int tp_vers;
224 u8 api_vers[7];
225
226 unsigned short mtus[NMTUS];
227 unsigned short a_wnd[NCCTRL_WIN];
228 unsigned short b_wnd[NCCTRL_WIN];
229
230 unsigned char nports; /* # of ethernet ports */
231 unsigned char portvec;
232 unsigned char rev; /* chip revision */
233 unsigned char offload;
234
235 unsigned int ofldq_wr_cred;
236};
237
238struct trace_params {
239 u32 data[TRACE_LEN / 4];
240 u32 mask[TRACE_LEN / 4];
241 unsigned short snap_len;
242 unsigned short min_len;
243 unsigned char skip_ofst;
244 unsigned char skip_len;
245 unsigned char invert;
246 unsigned char port;
247};
248
249struct link_config {
250 unsigned short supported; /* link capabilities */
251 unsigned short advertising; /* advertised capabilities */
252 unsigned short requested_speed; /* speed user has requested */
253 unsigned short speed; /* actual link speed */
254 unsigned char requested_fc; /* flow control user has requested */
255 unsigned char fc; /* actual link flow control */
256 unsigned char autoneg; /* autonegotiating? */
257 unsigned char link_ok; /* link up? */
258};
259
260#define FW_LEN16(fw_struct) FW_CMD_LEN16(sizeof(fw_struct) / 16)
261
262enum {
263 MAX_ETH_QSETS = 32, /* # of Ethernet Tx/Rx queue sets */
264 MAX_OFLD_QSETS = 16, /* # of offload Tx/Rx queue sets */
265 MAX_CTRL_QUEUES = NCHAN, /* # of control Tx queues */
266 MAX_RDMA_QUEUES = NCHAN, /* # of streaming RDMA Rx queues */
267};
268
269enum {
270 MAX_EGRQ = 128, /* max # of egress queues, including FLs */
271 MAX_INGQ = 64 /* max # of interrupt-capable ingress queues */
272};
273
274struct adapter;
275struct vlan_group;
276struct sge_rspq;
277
278struct port_info {
279 struct adapter *adapter;
280 struct vlan_group *vlan_grp;
281 u16 viid;
282 s16 xact_addr_filt; /* index of exact MAC address filter */
283 u16 rss_size; /* size of VI's RSS table slice */
284 s8 mdio_addr;
285 u8 port_type;
286 u8 mod_type;
287 u8 port_id;
288 u8 tx_chan;
289 u8 lport; /* associated offload logical port */
290 u8 rx_offload; /* CSO, etc */
291 u8 nqsets; /* # of qsets */
292 u8 first_qset; /* index of first qset */
293 struct link_config link_cfg;
294};
295
296/* port_info.rx_offload flags */
297enum {
298 RX_CSO = 1 << 0,
299};
300
301struct dentry;
302struct work_struct;
303
304enum { /* adapter flags */
305 FULL_INIT_DONE = (1 << 0),
306 USING_MSI = (1 << 1),
307 USING_MSIX = (1 << 2),
308 QUEUES_BOUND = (1 << 3),
309 FW_OK = (1 << 4),
310};
311
312struct rx_sw_desc;
313
314struct sge_fl { /* SGE free-buffer queue state */
315 unsigned int avail; /* # of available Rx buffers */
316 unsigned int pend_cred; /* new buffers since last FL DB ring */
317 unsigned int cidx; /* consumer index */
318 unsigned int pidx; /* producer index */
319 unsigned long alloc_failed; /* # of times buffer allocation failed */
320 unsigned long large_alloc_failed;
321 unsigned long starving;
322 /* RO fields */
323 unsigned int cntxt_id; /* SGE context id for the free list */
324 unsigned int size; /* capacity of free list */
325 struct rx_sw_desc *sdesc; /* address of SW Rx descriptor ring */
326 __be64 *desc; /* address of HW Rx descriptor ring */
327 dma_addr_t addr; /* bus address of HW ring start */
328};
329
330/* A packet gather list */
331struct pkt_gl {
332 skb_frag_t frags[MAX_SKB_FRAGS];
333 void *va; /* virtual address of first byte */
334 unsigned int nfrags; /* # of fragments */
335 unsigned int tot_len; /* total length of fragments */
336};
337
338typedef int (*rspq_handler_t)(struct sge_rspq *q, const __be64 *rsp,
339 const struct pkt_gl *gl);
340
341struct sge_rspq { /* state for an SGE response queue */
342 struct napi_struct napi;
343 const __be64 *cur_desc; /* current descriptor in queue */
344 unsigned int cidx; /* consumer index */
345 u8 gen; /* current generation bit */
346 u8 intr_params; /* interrupt holdoff parameters */
347 u8 next_intr_params; /* holdoff params for next interrupt */
348 u8 pktcnt_idx; /* interrupt packet threshold */
349 u8 uld; /* ULD handling this queue */
350 u8 idx; /* queue index within its group */
351 int offset; /* offset into current Rx buffer */
352 u16 cntxt_id; /* SGE context id for the response q */
353 u16 abs_id; /* absolute SGE id for the response q */
354 __be64 *desc; /* address of HW response ring */
355 dma_addr_t phys_addr; /* physical address of the ring */
356 unsigned int iqe_len; /* entry size */
357 unsigned int size; /* capacity of response queue */
358 struct adapter *adap;
359 struct net_device *netdev; /* associated net device */
360 rspq_handler_t handler;
361};
362
363struct sge_eth_stats { /* Ethernet queue statistics */
364 unsigned long pkts; /* # of ethernet packets */
365 unsigned long lro_pkts; /* # of LRO super packets */
366 unsigned long lro_merged; /* # of wire packets merged by LRO */
367 unsigned long rx_cso; /* # of Rx checksum offloads */
368 unsigned long vlan_ex; /* # of Rx VLAN extractions */
369 unsigned long rx_drops; /* # of packets dropped due to no mem */
370};
371
372struct sge_eth_rxq { /* SW Ethernet Rx queue */
373 struct sge_rspq rspq;
374 struct sge_fl fl;
375 struct sge_eth_stats stats;
376} ____cacheline_aligned_in_smp;
377
378struct sge_ofld_stats { /* offload queue statistics */
379 unsigned long pkts; /* # of packets */
380 unsigned long imm; /* # of immediate-data packets */
381 unsigned long an; /* # of asynchronous notifications */
382 unsigned long nomem; /* # of responses deferred due to no mem */
383};
384
385struct sge_ofld_rxq { /* SW offload Rx queue */
386 struct sge_rspq rspq;
387 struct sge_fl fl;
388 struct sge_ofld_stats stats;
389} ____cacheline_aligned_in_smp;
390
391struct tx_desc {
392 __be64 flit[8];
393};
394
395struct tx_sw_desc;
396
397struct sge_txq {
398 unsigned int in_use; /* # of in-use Tx descriptors */
399 unsigned int size; /* # of descriptors */
400 unsigned int cidx; /* SW consumer index */
401 unsigned int pidx; /* producer index */
402 unsigned long stops; /* # of times q has been stopped */
403 unsigned long restarts; /* # of queue restarts */
404 unsigned int cntxt_id; /* SGE context id for the Tx q */
405 struct tx_desc *desc; /* address of HW Tx descriptor ring */
406 struct tx_sw_desc *sdesc; /* address of SW Tx descriptor ring */
407 struct sge_qstat *stat; /* queue status entry */
408 dma_addr_t phys_addr; /* physical address of the ring */
409};
410
411struct sge_eth_txq { /* state for an SGE Ethernet Tx queue */
412 struct sge_txq q;
413 struct netdev_queue *txq; /* associated netdev TX queue */
414 unsigned long tso; /* # of TSO requests */
415 unsigned long tx_cso; /* # of Tx checksum offloads */
416 unsigned long vlan_ins; /* # of Tx VLAN insertions */
417 unsigned long mapping_err; /* # of I/O MMU packet mapping errors */
418} ____cacheline_aligned_in_smp;
419
420struct sge_ofld_txq { /* state for an SGE offload Tx queue */
421 struct sge_txq q;
422 struct adapter *adap;
423 struct sk_buff_head sendq; /* list of backpressured packets */
424 struct tasklet_struct qresume_tsk; /* restarts the queue */
425 u8 full; /* the Tx ring is full */
426 unsigned long mapping_err; /* # of I/O MMU packet mapping errors */
427} ____cacheline_aligned_in_smp;
428
429struct sge_ctrl_txq { /* state for an SGE control Tx queue */
430 struct sge_txq q;
431 struct adapter *adap;
432 struct sk_buff_head sendq; /* list of backpressured packets */
433 struct tasklet_struct qresume_tsk; /* restarts the queue */
434 u8 full; /* the Tx ring is full */
435} ____cacheline_aligned_in_smp;
436
437struct sge {
438 struct sge_eth_txq ethtxq[MAX_ETH_QSETS];
439 struct sge_ofld_txq ofldtxq[MAX_OFLD_QSETS];
440 struct sge_ctrl_txq ctrlq[MAX_CTRL_QUEUES];
441
442 struct sge_eth_rxq ethrxq[MAX_ETH_QSETS];
443 struct sge_ofld_rxq ofldrxq[MAX_OFLD_QSETS];
444 struct sge_ofld_rxq rdmarxq[MAX_RDMA_QUEUES];
445 struct sge_rspq fw_evtq ____cacheline_aligned_in_smp;
446
447 struct sge_rspq intrq ____cacheline_aligned_in_smp;
448 spinlock_t intrq_lock;
449
450 u16 max_ethqsets; /* # of available Ethernet queue sets */
451 u16 ethqsets; /* # of active Ethernet queue sets */
452 u16 ethtxq_rover; /* Tx queue to clean up next */
453 u16 ofldqsets; /* # of active offload queue sets */
454 u16 rdmaqs; /* # of available RDMA Rx queues */
455 u16 ofld_rxq[MAX_OFLD_QSETS];
456 u16 rdma_rxq[NCHAN];
457 u16 timer_val[SGE_NTIMERS];
458 u8 counter_val[SGE_NCOUNTERS];
459 unsigned int starve_thres;
460 u8 idma_state[2];
461 void *egr_map[MAX_EGRQ]; /* qid->queue egress queue map */
462 struct sge_rspq *ingr_map[MAX_INGQ]; /* qid->queue ingress queue map */
463 DECLARE_BITMAP(starving_fl, MAX_EGRQ);
464 DECLARE_BITMAP(txq_maperr, MAX_EGRQ);
465 struct timer_list rx_timer; /* refills starving FLs */
466 struct timer_list tx_timer; /* checks Tx queues */
467};
468
469#define for_each_ethrxq(sge, i) for (i = 0; i < (sge)->ethqsets; i++)
470#define for_each_ofldrxq(sge, i) for (i = 0; i < (sge)->ofldqsets; i++)
471#define for_each_rdmarxq(sge, i) for (i = 0; i < (sge)->rdmaqs; i++)
472
473struct l2t_data;
474
475struct adapter {
476 void __iomem *regs;
477 struct pci_dev *pdev;
478 struct device *pdev_dev;
479 unsigned long registered_device_map;
480 unsigned long open_device_map;
481 unsigned long flags;
482
483 const char *name;
484 int msg_enable;
485
486 struct adapter_params params;
487 struct cxgb4_virt_res vres;
488 unsigned int swintr;
489
490 unsigned int wol;
491
492 struct {
493 unsigned short vec;
494 char desc[14];
495 } msix_info[MAX_INGQ + 1];
496
497 struct sge sge;
498
499 struct net_device *port[MAX_NPORTS];
500 u8 chan_map[NCHAN]; /* channel -> port map */
501
502 struct l2t_data *l2t;
503 void *uld_handle[CXGB4_ULD_MAX];
504 struct list_head list_node;
505
506 struct tid_info tids;
507 void **tid_release_head;
508 spinlock_t tid_release_lock;
509 struct work_struct tid_release_task;
510 bool tid_release_task_busy;
511
512 struct dentry *debugfs_root;
513
514 spinlock_t stats_lock;
515};
516
517static inline u32 t4_read_reg(struct adapter *adap, u32 reg_addr)
518{
519 return readl(adap->regs + reg_addr);
520}
521
522static inline void t4_write_reg(struct adapter *adap, u32 reg_addr, u32 val)
523{
524 writel(val, adap->regs + reg_addr);
525}
526
527#ifndef readq
528static inline u64 readq(const volatile void __iomem *addr)
529{
530 return readl(addr) + ((u64)readl(addr + 4) << 32);
531}
532
533static inline void writeq(u64 val, volatile void __iomem *addr)
534{
535 writel(val, addr);
536 writel(val >> 32, addr + 4);
537}
538#endif
539
540static inline u64 t4_read_reg64(struct adapter *adap, u32 reg_addr)
541{
542 return readq(adap->regs + reg_addr);
543}
544
545static inline void t4_write_reg64(struct adapter *adap, u32 reg_addr, u64 val)
546{
547 writeq(val, adap->regs + reg_addr);
548}
549
550/**
551 * netdev2pinfo - return the port_info structure associated with a net_device
552 * @dev: the netdev
553 *
554 * Return the struct port_info associated with a net_device
555 */
556static inline struct port_info *netdev2pinfo(const struct net_device *dev)
557{
558 return netdev_priv(dev);
559}
560
561/**
562 * adap2pinfo - return the port_info of a port
563 * @adap: the adapter
564 * @idx: the port index
565 *
566 * Return the port_info structure for the port of the given index.
567 */
568static inline struct port_info *adap2pinfo(struct adapter *adap, int idx)
569{
570 return netdev_priv(adap->port[idx]);
571}
572
573/**
574 * netdev2adap - return the adapter structure associated with a net_device
575 * @dev: the netdev
576 *
577 * Return the struct adapter associated with a net_device
578 */
579static inline struct adapter *netdev2adap(const struct net_device *dev)
580{
581 return netdev2pinfo(dev)->adapter;
582}
583
584void t4_os_portmod_changed(const struct adapter *adap, int port_id);
585void t4_os_link_changed(struct adapter *adap, int port_id, int link_stat);
586
587void *t4_alloc_mem(size_t size);
588void t4_free_mem(void *addr);
589
590void t4_free_sge_resources(struct adapter *adap);
591irq_handler_t t4_intr_handler(struct adapter *adap);
592netdev_tx_t t4_eth_xmit(struct sk_buff *skb, struct net_device *dev);
593int t4_ethrx_handler(struct sge_rspq *q, const __be64 *rsp,
594 const struct pkt_gl *gl);
595int t4_mgmt_tx(struct adapter *adap, struct sk_buff *skb);
596int t4_ofld_send(struct adapter *adap, struct sk_buff *skb);
597int t4_sge_alloc_rxq(struct adapter *adap, struct sge_rspq *iq, bool fwevtq,
598 struct net_device *dev, int intr_idx,
599 struct sge_fl *fl, rspq_handler_t hnd);
600int t4_sge_alloc_eth_txq(struct adapter *adap, struct sge_eth_txq *txq,
601 struct net_device *dev, struct netdev_queue *netdevq,
602 unsigned int iqid);
603int t4_sge_alloc_ctrl_txq(struct adapter *adap, struct sge_ctrl_txq *txq,
604 struct net_device *dev, unsigned int iqid,
605 unsigned int cmplqid);
606int t4_sge_alloc_ofld_txq(struct adapter *adap, struct sge_ofld_txq *txq,
607 struct net_device *dev, unsigned int iqid);
608irqreturn_t t4_sge_intr_msix(int irq, void *cookie);
609void t4_sge_init(struct adapter *adap);
610void t4_sge_start(struct adapter *adap);
611void t4_sge_stop(struct adapter *adap);
612
613#define for_each_port(adapter, iter) \
614 for (iter = 0; iter < (adapter)->params.nports; ++iter)
615
616static inline unsigned int core_ticks_per_usec(const struct adapter *adap)
617{
618 return adap->params.vpd.cclk / 1000;
619}
620
621static inline unsigned int us_to_core_ticks(const struct adapter *adap,
622 unsigned int us)
623{
624 return (us * adap->params.vpd.cclk) / 1000;
625}
626
627void t4_set_reg_field(struct adapter *adap, unsigned int addr, u32 mask,
628 u32 val);
629
630int t4_wr_mbox_meat(struct adapter *adap, int mbox, const void *cmd, int size,
631 void *rpl, bool sleep_ok);
632
633static inline int t4_wr_mbox(struct adapter *adap, int mbox, const void *cmd,
634 int size, void *rpl)
635{
636 return t4_wr_mbox_meat(adap, mbox, cmd, size, rpl, true);
637}
638
639static inline int t4_wr_mbox_ns(struct adapter *adap, int mbox, const void *cmd,
640 int size, void *rpl)
641{
642 return t4_wr_mbox_meat(adap, mbox, cmd, size, rpl, false);
643}
644
645void t4_intr_enable(struct adapter *adapter);
646void t4_intr_disable(struct adapter *adapter);
647void t4_intr_clear(struct adapter *adapter);
648int t4_slow_intr_handler(struct adapter *adapter);
649
650int t4_link_start(struct adapter *adap, unsigned int mbox, unsigned int port,
651 struct link_config *lc);
652int t4_restart_aneg(struct adapter *adap, unsigned int mbox, unsigned int port);
653int t4_seeprom_wp(struct adapter *adapter, bool enable);
654int t4_read_flash(struct adapter *adapter, unsigned int addr,
655 unsigned int nwords, u32 *data, int byte_oriented);
656int t4_load_fw(struct adapter *adapter, const u8 *fw_data, unsigned int size);
657int t4_check_fw_version(struct adapter *adapter);
658int t4_prep_adapter(struct adapter *adapter);
659int t4_port_init(struct adapter *adap, int mbox, int pf, int vf);
660void t4_fatal_err(struct adapter *adapter);
661void t4_set_vlan_accel(struct adapter *adapter, unsigned int ports, int on);
662int t4_set_trace_filter(struct adapter *adapter, const struct trace_params *tp,
663 int filter_index, int enable);
664void t4_get_trace_filter(struct adapter *adapter, struct trace_params *tp,
665 int filter_index, int *enabled);
666int t4_config_rss_range(struct adapter *adapter, int mbox, unsigned int viid,
667 int start, int n, const u16 *rspq, unsigned int nrspq);
668int t4_config_glbl_rss(struct adapter *adapter, int mbox, unsigned int mode,
669 unsigned int flags);
670int t4_read_rss(struct adapter *adapter, u16 *entries);
671int t4_mc_read(struct adapter *adap, u32 addr, __be32 *data, u64 *parity);
672int t4_edc_read(struct adapter *adap, int idx, u32 addr, __be32 *data,
673 u64 *parity);
674
675void t4_get_port_stats(struct adapter *adap, int idx, struct port_stats *p);
676void t4_get_lb_stats(struct adapter *adap, int idx, struct lb_port_stats *p);
677
678void t4_read_mtu_tbl(struct adapter *adap, u16 *mtus, u8 *mtu_log);
679void t4_tp_get_err_stats(struct adapter *adap, struct tp_err_stats *st);
680void t4_tp_get_tcp_stats(struct adapter *adap, struct tp_tcp_stats *v4,
681 struct tp_tcp_stats *v6);
682void t4_load_mtus(struct adapter *adap, const unsigned short *mtus,
683 const unsigned short *alpha, const unsigned short *beta);
684
685void t4_wol_magic_enable(struct adapter *adap, unsigned int port,
686 const u8 *addr);
687int t4_wol_pat_enable(struct adapter *adap, unsigned int port, unsigned int map,
688 u64 mask0, u64 mask1, unsigned int crc, bool enable);
689
690int t4_fw_hello(struct adapter *adap, unsigned int mbox, unsigned int evt_mbox,
691 enum dev_master master, enum dev_state *state);
692int t4_fw_bye(struct adapter *adap, unsigned int mbox);
693int t4_early_init(struct adapter *adap, unsigned int mbox);
694int t4_fw_reset(struct adapter *adap, unsigned int mbox, int reset);
695int t4_query_params(struct adapter *adap, unsigned int mbox, unsigned int pf,
696 unsigned int vf, unsigned int nparams, const u32 *params,
697 u32 *val);
698int t4_set_params(struct adapter *adap, unsigned int mbox, unsigned int pf,
699 unsigned int vf, unsigned int nparams, const u32 *params,
700 const u32 *val);
701int t4_cfg_pfvf(struct adapter *adap, unsigned int mbox, unsigned int pf,
702 unsigned int vf, unsigned int txq, unsigned int txq_eth_ctrl,
703 unsigned int rxqi, unsigned int rxq, unsigned int tc,
704 unsigned int vi, unsigned int cmask, unsigned int pmask,
705 unsigned int nexact, unsigned int rcaps, unsigned int wxcaps);
706int t4_alloc_vi(struct adapter *adap, unsigned int mbox, unsigned int port,
707 unsigned int pf, unsigned int vf, unsigned int nmac, u8 *mac,
708 unsigned int *rss_size);
709int t4_free_vi(struct adapter *adap, unsigned int mbox, unsigned int pf,
710 unsigned int vf, unsigned int viid);
711int t4_set_rxmode(struct adapter *adap, unsigned int mbox, unsigned int viid,
712 int mtu, int promisc, int all_multi, int bcast, bool sleep_ok);
713int t4_alloc_mac_filt(struct adapter *adap, unsigned int mbox,
714 unsigned int viid, bool free, unsigned int naddr,
715 const u8 **addr, u16 *idx, u64 *hash, bool sleep_ok);
716int t4_change_mac(struct adapter *adap, unsigned int mbox, unsigned int viid,
717 int idx, const u8 *addr, bool persist, bool add_smt);
718int t4_set_addr_hash(struct adapter *adap, unsigned int mbox, unsigned int viid,
719 bool ucast, u64 vec, bool sleep_ok);
720int t4_enable_vi(struct adapter *adap, unsigned int mbox, unsigned int viid,
721 bool rx_en, bool tx_en);
722int t4_identify_port(struct adapter *adap, unsigned int mbox, unsigned int viid,
723 unsigned int nblinks);
724int t4_mdio_rd(struct adapter *adap, unsigned int mbox, unsigned int phy_addr,
725 unsigned int mmd, unsigned int reg, u16 *valp);
726int t4_mdio_wr(struct adapter *adap, unsigned int mbox, unsigned int phy_addr,
727 unsigned int mmd, unsigned int reg, u16 val);
728int t4_iq_start_stop(struct adapter *adap, unsigned int mbox, bool start,
729 unsigned int pf, unsigned int vf, unsigned int iqid,
730 unsigned int fl0id, unsigned int fl1id);
731int t4_iq_free(struct adapter *adap, unsigned int mbox, unsigned int pf,
732 unsigned int vf, unsigned int iqtype, unsigned int iqid,
733 unsigned int fl0id, unsigned int fl1id);
734int t4_eth_eq_free(struct adapter *adap, unsigned int mbox, unsigned int pf,
735 unsigned int vf, unsigned int eqid);
736int t4_ctrl_eq_free(struct adapter *adap, unsigned int mbox, unsigned int pf,
737 unsigned int vf, unsigned int eqid);
738int t4_ofld_eq_free(struct adapter *adap, unsigned int mbox, unsigned int pf,
739 unsigned int vf, unsigned int eqid);
740int t4_handle_fw_rpl(struct adapter *adap, const __be64 *rpl);
741#endif /* __CXGB4_H__ */
diff --git a/drivers/net/cxgb4/cxgb4_main.c b/drivers/net/cxgb4/cxgb4_main.c
new file mode 100644
index 000000000000..5f582dba928f
--- /dev/null
+++ b/drivers/net/cxgb4/cxgb4_main.c
@@ -0,0 +1,3388 @@
1/*
2 * This file is part of the Chelsio T4 Ethernet driver for Linux.
3 *
4 * Copyright (c) 2003-2010 Chelsio Communications, Inc. All rights reserved.
5 *
6 * This software is available to you under a choice of one of two
7 * licenses. You may choose to be licensed under the terms of the GNU
8 * General Public License (GPL) Version 2, available from the file
9 * COPYING in the main directory of this source tree, or the
10 * OpenIB.org BSD license below:
11 *
12 * Redistribution and use in source and binary forms, with or
13 * without modification, are permitted provided that the following
14 * conditions are met:
15 *
16 * - Redistributions of source code must retain the above
17 * copyright notice, this list of conditions and the following
18 * disclaimer.
19 *
20 * - Redistributions in binary form must reproduce the above
21 * copyright notice, this list of conditions and the following
22 * disclaimer in the documentation and/or other materials
23 * provided with the distribution.
24 *
25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
32 * SOFTWARE.
33 */
34
35#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
36
37#include <linux/bitmap.h>
38#include <linux/crc32.h>
39#include <linux/ctype.h>
40#include <linux/debugfs.h>
41#include <linux/err.h>
42#include <linux/etherdevice.h>
43#include <linux/firmware.h>
44#include <linux/if_vlan.h>
45#include <linux/init.h>
46#include <linux/log2.h>
47#include <linux/mdio.h>
48#include <linux/module.h>
49#include <linux/moduleparam.h>
50#include <linux/mutex.h>
51#include <linux/netdevice.h>
52#include <linux/pci.h>
53#include <linux/aer.h>
54#include <linux/rtnetlink.h>
55#include <linux/sched.h>
56#include <linux/seq_file.h>
57#include <linux/sockios.h>
58#include <linux/vmalloc.h>
59#include <linux/workqueue.h>
60#include <net/neighbour.h>
61#include <net/netevent.h>
62#include <asm/uaccess.h>
63
64#include "cxgb4.h"
65#include "t4_regs.h"
66#include "t4_msg.h"
67#include "t4fw_api.h"
68#include "l2t.h"
69
70#define DRV_VERSION "1.0.0-ko"
71#define DRV_DESC "Chelsio T4 Network Driver"
72
73/*
74 * Max interrupt hold-off timer value in us. Queues fall back to this value
75 * under extreme memory pressure so it's largish to give the system time to
76 * recover.
77 */
78#define MAX_SGE_TIMERVAL 200U
79
80enum {
81 MEMWIN0_APERTURE = 65536,
82 MEMWIN0_BASE = 0x30000,
83 MEMWIN1_APERTURE = 32768,
84 MEMWIN1_BASE = 0x28000,
85 MEMWIN2_APERTURE = 2048,
86 MEMWIN2_BASE = 0x1b800,
87};
88
89enum {
90 MAX_TXQ_ENTRIES = 16384,
91 MAX_CTRL_TXQ_ENTRIES = 1024,
92 MAX_RSPQ_ENTRIES = 16384,
93 MAX_RX_BUFFERS = 16384,
94 MIN_TXQ_ENTRIES = 32,
95 MIN_CTRL_TXQ_ENTRIES = 32,
96 MIN_RSPQ_ENTRIES = 128,
97 MIN_FL_ENTRIES = 16
98};
99
100#define DFLT_MSG_ENABLE (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK | \
101 NETIF_MSG_TIMER | NETIF_MSG_IFDOWN | NETIF_MSG_IFUP |\
102 NETIF_MSG_RX_ERR | NETIF_MSG_TX_ERR)
103
104#define CH_DEVICE(devid) { PCI_VDEVICE(CHELSIO, devid), 0 }
105
106static DEFINE_PCI_DEVICE_TABLE(cxgb4_pci_tbl) = {
107 CH_DEVICE(0xa000), /* PE10K */
108 { 0, }
109};
110
111#define FW_FNAME "cxgb4/t4fw.bin"
112
113MODULE_DESCRIPTION(DRV_DESC);
114MODULE_AUTHOR("Chelsio Communications");
115MODULE_LICENSE("Dual BSD/GPL");
116MODULE_VERSION(DRV_VERSION);
117MODULE_DEVICE_TABLE(pci, cxgb4_pci_tbl);
118MODULE_FIRMWARE(FW_FNAME);
119
120static int dflt_msg_enable = DFLT_MSG_ENABLE;
121
122module_param(dflt_msg_enable, int, 0644);
123MODULE_PARM_DESC(dflt_msg_enable, "Chelsio T4 default message enable bitmap");
124
125/*
126 * The driver uses the best interrupt scheme available on a platform in the
127 * order MSI-X, MSI, legacy INTx interrupts. This parameter determines which
128 * of these schemes the driver may consider as follows:
129 *
130 * msi = 2: choose from among all three options
131 * msi = 1: only consider MSI and INTx interrupts
132 * msi = 0: force INTx interrupts
133 */
134static int msi = 2;
135
136module_param(msi, int, 0644);
137MODULE_PARM_DESC(msi, "whether to use INTx (0), MSI (1) or MSI-X (2)");
138
139/*
140 * Queue interrupt hold-off timer values. Queues default to the first of these
141 * upon creation.
142 */
143static unsigned int intr_holdoff[SGE_NTIMERS - 1] = { 5, 10, 20, 50, 100 };
144
145module_param_array(intr_holdoff, uint, NULL, 0644);
146MODULE_PARM_DESC(intr_holdoff, "values for queue interrupt hold-off timers "
147 "0..4 in microseconds");
148
149static unsigned int intr_cnt[SGE_NCOUNTERS - 1] = { 4, 8, 16 };
150
151module_param_array(intr_cnt, uint, NULL, 0644);
152MODULE_PARM_DESC(intr_cnt,
153 "thresholds 1..3 for queue interrupt packet counters");
154
155static int vf_acls;
156
157#ifdef CONFIG_PCI_IOV
158module_param(vf_acls, bool, 0644);
159MODULE_PARM_DESC(vf_acls, "if set enable virtualization L2 ACL enforcement");
160
161static unsigned int num_vf[4];
162
163module_param_array(num_vf, uint, NULL, 0644);
164MODULE_PARM_DESC(num_vf, "number of VFs for each of PFs 0-3");
165#endif
166
167static struct dentry *cxgb4_debugfs_root;
168
169static LIST_HEAD(adapter_list);
170static DEFINE_MUTEX(uld_mutex);
171static struct cxgb4_uld_info ulds[CXGB4_ULD_MAX];
172static const char *uld_str[] = { "RDMA", "iSCSI" };
173
174static void link_report(struct net_device *dev)
175{
176 if (!netif_carrier_ok(dev))
177 netdev_info(dev, "link down\n");
178 else {
179 static const char *fc[] = { "no", "Rx", "Tx", "Tx/Rx" };
180
181 const char *s = "10Mbps";
182 const struct port_info *p = netdev_priv(dev);
183
184 switch (p->link_cfg.speed) {
185 case SPEED_10000:
186 s = "10Gbps";
187 break;
188 case SPEED_1000:
189 s = "1000Mbps";
190 break;
191 case SPEED_100:
192 s = "100Mbps";
193 break;
194 }
195
196 netdev_info(dev, "link up, %s, full-duplex, %s PAUSE\n", s,
197 fc[p->link_cfg.fc]);
198 }
199}
200
201void t4_os_link_changed(struct adapter *adapter, int port_id, int link_stat)
202{
203 struct net_device *dev = adapter->port[port_id];
204
205 /* Skip changes from disabled ports. */
206 if (netif_running(dev) && link_stat != netif_carrier_ok(dev)) {
207 if (link_stat)
208 netif_carrier_on(dev);
209 else
210 netif_carrier_off(dev);
211
212 link_report(dev);
213 }
214}
215
216void t4_os_portmod_changed(const struct adapter *adap, int port_id)
217{
218 static const char *mod_str[] = {
219 NULL, "LR", "SR", "ER", "passive DA", "active DA"
220 };
221
222 const struct net_device *dev = adap->port[port_id];
223 const struct port_info *pi = netdev_priv(dev);
224
225 if (pi->mod_type == FW_PORT_MOD_TYPE_NONE)
226 netdev_info(dev, "port module unplugged\n");
227 else
228 netdev_info(dev, "%s module inserted\n", mod_str[pi->mod_type]);
229}
230
231/*
232 * Configure the exact and hash address filters to handle a port's multicast
233 * and secondary unicast MAC addresses.
234 */
235static int set_addr_filters(const struct net_device *dev, bool sleep)
236{
237 u64 mhash = 0;
238 u64 uhash = 0;
239 bool free = true;
240 u16 filt_idx[7];
241 const u8 *addr[7];
242 int ret, naddr = 0;
243 const struct netdev_hw_addr *ha;
244 int uc_cnt = netdev_uc_count(dev);
245 int mc_cnt = netdev_mc_count(dev);
246 const struct port_info *pi = netdev_priv(dev);
247
248 /* first do the secondary unicast addresses */
249 netdev_for_each_uc_addr(ha, dev) {
250 addr[naddr++] = ha->addr;
251 if (--uc_cnt == 0 || naddr >= ARRAY_SIZE(addr)) {
252 ret = t4_alloc_mac_filt(pi->adapter, 0, pi->viid, free,
253 naddr, addr, filt_idx, &uhash, sleep);
254 if (ret < 0)
255 return ret;
256
257 free = false;
258 naddr = 0;
259 }
260 }
261
262 /* next set up the multicast addresses */
263 netdev_for_each_mc_addr(ha, dev) {
264 addr[naddr++] = ha->addr;
265 if (--mc_cnt == 0 || naddr >= ARRAY_SIZE(addr)) {
266 ret = t4_alloc_mac_filt(pi->adapter, 0, pi->viid, free,
267 naddr, addr, filt_idx, &mhash, sleep);
268 if (ret < 0)
269 return ret;
270
271 free = false;
272 naddr = 0;
273 }
274 }
275
276 return t4_set_addr_hash(pi->adapter, 0, pi->viid, uhash != 0,
277 uhash | mhash, sleep);
278}
279
280/*
281 * Set Rx properties of a port, such as promiscruity, address filters, and MTU.
282 * If @mtu is -1 it is left unchanged.
283 */
284static int set_rxmode(struct net_device *dev, int mtu, bool sleep_ok)
285{
286 int ret;
287 struct port_info *pi = netdev_priv(dev);
288
289 ret = set_addr_filters(dev, sleep_ok);
290 if (ret == 0)
291 ret = t4_set_rxmode(pi->adapter, 0, pi->viid, mtu,
292 (dev->flags & IFF_PROMISC) ? 1 : 0,
293 (dev->flags & IFF_ALLMULTI) ? 1 : 0, 1,
294 sleep_ok);
295 return ret;
296}
297
298/**
299 * link_start - enable a port
300 * @dev: the port to enable
301 *
302 * Performs the MAC and PHY actions needed to enable a port.
303 */
304static int link_start(struct net_device *dev)
305{
306 int ret;
307 struct port_info *pi = netdev_priv(dev);
308
309 /*
310 * We do not set address filters and promiscuity here, the stack does
311 * that step explicitly.
312 */
313 ret = t4_set_rxmode(pi->adapter, 0, pi->viid, dev->mtu, -1, -1, -1,
314 true);
315 if (ret == 0) {
316 ret = t4_change_mac(pi->adapter, 0, pi->viid,
317 pi->xact_addr_filt, dev->dev_addr, true,
318 false);
319 if (ret >= 0) {
320 pi->xact_addr_filt = ret;
321 ret = 0;
322 }
323 }
324 if (ret == 0)
325 ret = t4_link_start(pi->adapter, 0, pi->tx_chan, &pi->link_cfg);
326 if (ret == 0)
327 ret = t4_enable_vi(pi->adapter, 0, pi->viid, true, true);
328 return ret;
329}
330
331/*
332 * Response queue handler for the FW event queue.
333 */
334static int fwevtq_handler(struct sge_rspq *q, const __be64 *rsp,
335 const struct pkt_gl *gl)
336{
337 u8 opcode = ((const struct rss_header *)rsp)->opcode;
338
339 rsp++; /* skip RSS header */
340 if (likely(opcode == CPL_SGE_EGR_UPDATE)) {
341 const struct cpl_sge_egr_update *p = (void *)rsp;
342 unsigned int qid = EGR_QID(ntohl(p->opcode_qid));
343 struct sge_txq *txq = q->adap->sge.egr_map[qid];
344
345 txq->restarts++;
346 if ((u8 *)txq < (u8 *)q->adap->sge.ethrxq) {
347 struct sge_eth_txq *eq;
348
349 eq = container_of(txq, struct sge_eth_txq, q);
350 netif_tx_wake_queue(eq->txq);
351 } else {
352 struct sge_ofld_txq *oq;
353
354 oq = container_of(txq, struct sge_ofld_txq, q);
355 tasklet_schedule(&oq->qresume_tsk);
356 }
357 } else if (opcode == CPL_FW6_MSG || opcode == CPL_FW4_MSG) {
358 const struct cpl_fw6_msg *p = (void *)rsp;
359
360 if (p->type == 0)
361 t4_handle_fw_rpl(q->adap, p->data);
362 } else if (opcode == CPL_L2T_WRITE_RPL) {
363 const struct cpl_l2t_write_rpl *p = (void *)rsp;
364
365 do_l2t_write_rpl(q->adap, p);
366 } else
367 dev_err(q->adap->pdev_dev,
368 "unexpected CPL %#x on FW event queue\n", opcode);
369 return 0;
370}
371
372/**
373 * uldrx_handler - response queue handler for ULD queues
374 * @q: the response queue that received the packet
375 * @rsp: the response queue descriptor holding the offload message
376 * @gl: the gather list of packet fragments
377 *
378 * Deliver an ingress offload packet to a ULD. All processing is done by
379 * the ULD, we just maintain statistics.
380 */
381static int uldrx_handler(struct sge_rspq *q, const __be64 *rsp,
382 const struct pkt_gl *gl)
383{
384 struct sge_ofld_rxq *rxq = container_of(q, struct sge_ofld_rxq, rspq);
385
386 if (ulds[q->uld].rx_handler(q->adap->uld_handle[q->uld], rsp, gl)) {
387 rxq->stats.nomem++;
388 return -1;
389 }
390 if (gl == NULL)
391 rxq->stats.imm++;
392 else if (gl == CXGB4_MSG_AN)
393 rxq->stats.an++;
394 else
395 rxq->stats.pkts++;
396 return 0;
397}
398
399static void disable_msi(struct adapter *adapter)
400{
401 if (adapter->flags & USING_MSIX) {
402 pci_disable_msix(adapter->pdev);
403 adapter->flags &= ~USING_MSIX;
404 } else if (adapter->flags & USING_MSI) {
405 pci_disable_msi(adapter->pdev);
406 adapter->flags &= ~USING_MSI;
407 }
408}
409
410/*
411 * Interrupt handler for non-data events used with MSI-X.
412 */
413static irqreturn_t t4_nondata_intr(int irq, void *cookie)
414{
415 struct adapter *adap = cookie;
416
417 u32 v = t4_read_reg(adap, MYPF_REG(PL_PF_INT_CAUSE));
418 if (v & PFSW) {
419 adap->swintr = 1;
420 t4_write_reg(adap, MYPF_REG(PL_PF_INT_CAUSE), v);
421 }
422 t4_slow_intr_handler(adap);
423 return IRQ_HANDLED;
424}
425
426/*
427 * Name the MSI-X interrupts.
428 */
429static void name_msix_vecs(struct adapter *adap)
430{
431 int i, j, msi_idx = 2, n = sizeof(adap->msix_info[0].desc) - 1;
432
433 /* non-data interrupts */
434 snprintf(adap->msix_info[0].desc, n, "%s", adap->name);
435 adap->msix_info[0].desc[n] = 0;
436
437 /* FW events */
438 snprintf(adap->msix_info[1].desc, n, "%s-FWeventq", adap->name);
439 adap->msix_info[1].desc[n] = 0;
440
441 /* Ethernet queues */
442 for_each_port(adap, j) {
443 struct net_device *d = adap->port[j];
444 const struct port_info *pi = netdev_priv(d);
445
446 for (i = 0; i < pi->nqsets; i++, msi_idx++) {
447 snprintf(adap->msix_info[msi_idx].desc, n, "%s-Rx%d",
448 d->name, i);
449 adap->msix_info[msi_idx].desc[n] = 0;
450 }
451 }
452
453 /* offload queues */
454 for_each_ofldrxq(&adap->sge, i) {
455 snprintf(adap->msix_info[msi_idx].desc, n, "%s-ofld%d",
456 adap->name, i);
457 adap->msix_info[msi_idx++].desc[n] = 0;
458 }
459 for_each_rdmarxq(&adap->sge, i) {
460 snprintf(adap->msix_info[msi_idx].desc, n, "%s-rdma%d",
461 adap->name, i);
462 adap->msix_info[msi_idx++].desc[n] = 0;
463 }
464}
465
466static int request_msix_queue_irqs(struct adapter *adap)
467{
468 struct sge *s = &adap->sge;
469 int err, ethqidx, ofldqidx = 0, rdmaqidx = 0, msi = 2;
470
471 err = request_irq(adap->msix_info[1].vec, t4_sge_intr_msix, 0,
472 adap->msix_info[1].desc, &s->fw_evtq);
473 if (err)
474 return err;
475
476 for_each_ethrxq(s, ethqidx) {
477 err = request_irq(adap->msix_info[msi].vec, t4_sge_intr_msix, 0,
478 adap->msix_info[msi].desc,
479 &s->ethrxq[ethqidx].rspq);
480 if (err)
481 goto unwind;
482 msi++;
483 }
484 for_each_ofldrxq(s, ofldqidx) {
485 err = request_irq(adap->msix_info[msi].vec, t4_sge_intr_msix, 0,
486 adap->msix_info[msi].desc,
487 &s->ofldrxq[ofldqidx].rspq);
488 if (err)
489 goto unwind;
490 msi++;
491 }
492 for_each_rdmarxq(s, rdmaqidx) {
493 err = request_irq(adap->msix_info[msi].vec, t4_sge_intr_msix, 0,
494 adap->msix_info[msi].desc,
495 &s->rdmarxq[rdmaqidx].rspq);
496 if (err)
497 goto unwind;
498 msi++;
499 }
500 return 0;
501
502unwind:
503 while (--rdmaqidx >= 0)
504 free_irq(adap->msix_info[--msi].vec,
505 &s->rdmarxq[rdmaqidx].rspq);
506 while (--ofldqidx >= 0)
507 free_irq(adap->msix_info[--msi].vec,
508 &s->ofldrxq[ofldqidx].rspq);
509 while (--ethqidx >= 0)
510 free_irq(adap->msix_info[--msi].vec, &s->ethrxq[ethqidx].rspq);
511 free_irq(adap->msix_info[1].vec, &s->fw_evtq);
512 return err;
513}
514
515static void free_msix_queue_irqs(struct adapter *adap)
516{
517 int i, msi = 2;
518 struct sge *s = &adap->sge;
519
520 free_irq(adap->msix_info[1].vec, &s->fw_evtq);
521 for_each_ethrxq(s, i)
522 free_irq(adap->msix_info[msi++].vec, &s->ethrxq[i].rspq);
523 for_each_ofldrxq(s, i)
524 free_irq(adap->msix_info[msi++].vec, &s->ofldrxq[i].rspq);
525 for_each_rdmarxq(s, i)
526 free_irq(adap->msix_info[msi++].vec, &s->rdmarxq[i].rspq);
527}
528
529/**
530 * setup_rss - configure RSS
531 * @adap: the adapter
532 *
533 * Sets up RSS to distribute packets to multiple receive queues. We
534 * configure the RSS CPU lookup table to distribute to the number of HW
535 * receive queues, and the response queue lookup table to narrow that
536 * down to the response queues actually configured for each port.
537 * We always configure the RSS mapping for all ports since the mapping
538 * table has plenty of entries.
539 */
540static int setup_rss(struct adapter *adap)
541{
542 int i, j, err;
543 u16 rss[MAX_ETH_QSETS];
544
545 for_each_port(adap, i) {
546 const struct port_info *pi = adap2pinfo(adap, i);
547 const struct sge_eth_rxq *q = &adap->sge.ethrxq[pi->first_qset];
548
549 for (j = 0; j < pi->nqsets; j++)
550 rss[j] = q[j].rspq.abs_id;
551
552 err = t4_config_rss_range(adap, 0, pi->viid, 0, pi->rss_size,
553 rss, pi->nqsets);
554 if (err)
555 return err;
556 }
557 return 0;
558}
559
560/*
561 * Wait until all NAPI handlers are descheduled.
562 */
563static void quiesce_rx(struct adapter *adap)
564{
565 int i;
566
567 for (i = 0; i < ARRAY_SIZE(adap->sge.ingr_map); i++) {
568 struct sge_rspq *q = adap->sge.ingr_map[i];
569
570 if (q && q->handler)
571 napi_disable(&q->napi);
572 }
573}
574
575/*
576 * Enable NAPI scheduling and interrupt generation for all Rx queues.
577 */
578static void enable_rx(struct adapter *adap)
579{
580 int i;
581
582 for (i = 0; i < ARRAY_SIZE(adap->sge.ingr_map); i++) {
583 struct sge_rspq *q = adap->sge.ingr_map[i];
584
585 if (!q)
586 continue;
587 if (q->handler)
588 napi_enable(&q->napi);
589 /* 0-increment GTS to start the timer and enable interrupts */
590 t4_write_reg(adap, MYPF_REG(SGE_PF_GTS),
591 SEINTARM(q->intr_params) |
592 INGRESSQID(q->cntxt_id));
593 }
594}
595
596/**
597 * setup_sge_queues - configure SGE Tx/Rx/response queues
598 * @adap: the adapter
599 *
600 * Determines how many sets of SGE queues to use and initializes them.
601 * We support multiple queue sets per port if we have MSI-X, otherwise
602 * just one queue set per port.
603 */
604static int setup_sge_queues(struct adapter *adap)
605{
606 int err, msi_idx, i, j;
607 struct sge *s = &adap->sge;
608
609 bitmap_zero(s->starving_fl, MAX_EGRQ);
610 bitmap_zero(s->txq_maperr, MAX_EGRQ);
611
612 if (adap->flags & USING_MSIX)
613 msi_idx = 1; /* vector 0 is for non-queue interrupts */
614 else {
615 err = t4_sge_alloc_rxq(adap, &s->intrq, false, adap->port[0], 0,
616 NULL, NULL);
617 if (err)
618 return err;
619 msi_idx = -((int)s->intrq.abs_id + 1);
620 }
621
622 err = t4_sge_alloc_rxq(adap, &s->fw_evtq, true, adap->port[0],
623 msi_idx, NULL, fwevtq_handler);
624 if (err) {
625freeout: t4_free_sge_resources(adap);
626 return err;
627 }
628
629 for_each_port(adap, i) {
630 struct net_device *dev = adap->port[i];
631 struct port_info *pi = netdev_priv(dev);
632 struct sge_eth_rxq *q = &s->ethrxq[pi->first_qset];
633 struct sge_eth_txq *t = &s->ethtxq[pi->first_qset];
634
635 for (j = 0; j < pi->nqsets; j++, q++) {
636 if (msi_idx > 0)
637 msi_idx++;
638 err = t4_sge_alloc_rxq(adap, &q->rspq, false, dev,
639 msi_idx, &q->fl,
640 t4_ethrx_handler);
641 if (err)
642 goto freeout;
643 q->rspq.idx = j;
644 memset(&q->stats, 0, sizeof(q->stats));
645 }
646 for (j = 0; j < pi->nqsets; j++, t++) {
647 err = t4_sge_alloc_eth_txq(adap, t, dev,
648 netdev_get_tx_queue(dev, j),
649 s->fw_evtq.cntxt_id);
650 if (err)
651 goto freeout;
652 }
653 }
654
655 j = s->ofldqsets / adap->params.nports; /* ofld queues per channel */
656 for_each_ofldrxq(s, i) {
657 struct sge_ofld_rxq *q = &s->ofldrxq[i];
658 struct net_device *dev = adap->port[i / j];
659
660 if (msi_idx > 0)
661 msi_idx++;
662 err = t4_sge_alloc_rxq(adap, &q->rspq, false, dev, msi_idx,
663 &q->fl, uldrx_handler);
664 if (err)
665 goto freeout;
666 memset(&q->stats, 0, sizeof(q->stats));
667 s->ofld_rxq[i] = q->rspq.abs_id;
668 err = t4_sge_alloc_ofld_txq(adap, &s->ofldtxq[i], dev,
669 s->fw_evtq.cntxt_id);
670 if (err)
671 goto freeout;
672 }
673
674 for_each_rdmarxq(s, i) {
675 struct sge_ofld_rxq *q = &s->rdmarxq[i];
676
677 if (msi_idx > 0)
678 msi_idx++;
679 err = t4_sge_alloc_rxq(adap, &q->rspq, false, adap->port[i],
680 msi_idx, &q->fl, uldrx_handler);
681 if (err)
682 goto freeout;
683 memset(&q->stats, 0, sizeof(q->stats));
684 s->rdma_rxq[i] = q->rspq.abs_id;
685 }
686
687 for_each_port(adap, i) {
688 /*
689 * Note that ->rdmarxq[i].rspq.cntxt_id below is 0 if we don't
690 * have RDMA queues, and that's the right value.
691 */
692 err = t4_sge_alloc_ctrl_txq(adap, &s->ctrlq[i], adap->port[i],
693 s->fw_evtq.cntxt_id,
694 s->rdmarxq[i].rspq.cntxt_id);
695 if (err)
696 goto freeout;
697 }
698
699 t4_write_reg(adap, MPS_TRC_RSS_CONTROL,
700 RSSCONTROL(netdev2pinfo(adap->port[0])->tx_chan) |
701 QUEUENUMBER(s->ethrxq[0].rspq.abs_id));
702 return 0;
703}
704
705/*
706 * Returns 0 if new FW was successfully loaded, a positive errno if a load was
707 * started but failed, and a negative errno if flash load couldn't start.
708 */
709static int upgrade_fw(struct adapter *adap)
710{
711 int ret;
712 u32 vers;
713 const struct fw_hdr *hdr;
714 const struct firmware *fw;
715 struct device *dev = adap->pdev_dev;
716
717 ret = request_firmware(&fw, FW_FNAME, dev);
718 if (ret < 0) {
719 dev_err(dev, "unable to load firmware image " FW_FNAME
720 ", error %d\n", ret);
721 return ret;
722 }
723
724 hdr = (const struct fw_hdr *)fw->data;
725 vers = ntohl(hdr->fw_ver);
726 if (FW_HDR_FW_VER_MAJOR_GET(vers) != FW_VERSION_MAJOR) {
727 ret = -EINVAL; /* wrong major version, won't do */
728 goto out;
729 }
730
731 /*
732 * If the flash FW is unusable or we found something newer, load it.
733 */
734 if (FW_HDR_FW_VER_MAJOR_GET(adap->params.fw_vers) != FW_VERSION_MAJOR ||
735 vers > adap->params.fw_vers) {
736 ret = -t4_load_fw(adap, fw->data, fw->size);
737 if (!ret)
738 dev_info(dev, "firmware upgraded to version %pI4 from "
739 FW_FNAME "\n", &hdr->fw_ver);
740 }
741out: release_firmware(fw);
742 return ret;
743}
744
745/*
746 * Allocate a chunk of memory using kmalloc or, if that fails, vmalloc.
747 * The allocated memory is cleared.
748 */
749void *t4_alloc_mem(size_t size)
750{
751 void *p = kmalloc(size, GFP_KERNEL);
752
753 if (!p)
754 p = vmalloc(size);
755 if (p)
756 memset(p, 0, size);
757 return p;
758}
759
760/*
761 * Free memory allocated through alloc_mem().
762 */
763void t4_free_mem(void *addr)
764{
765 if (is_vmalloc_addr(addr))
766 vfree(addr);
767 else
768 kfree(addr);
769}
770
771static inline int is_offload(const struct adapter *adap)
772{
773 return adap->params.offload;
774}
775
776/*
777 * Implementation of ethtool operations.
778 */
779
780static u32 get_msglevel(struct net_device *dev)
781{
782 return netdev2adap(dev)->msg_enable;
783}
784
785static void set_msglevel(struct net_device *dev, u32 val)
786{
787 netdev2adap(dev)->msg_enable = val;
788}
789
790static char stats_strings[][ETH_GSTRING_LEN] = {
791 "TxOctetsOK ",
792 "TxFramesOK ",
793 "TxBroadcastFrames ",
794 "TxMulticastFrames ",
795 "TxUnicastFrames ",
796 "TxErrorFrames ",
797
798 "TxFrames64 ",
799 "TxFrames65To127 ",
800 "TxFrames128To255 ",
801 "TxFrames256To511 ",
802 "TxFrames512To1023 ",
803 "TxFrames1024To1518 ",
804 "TxFrames1519ToMax ",
805
806 "TxFramesDropped ",
807 "TxPauseFrames ",
808 "TxPPP0Frames ",
809 "TxPPP1Frames ",
810 "TxPPP2Frames ",
811 "TxPPP3Frames ",
812 "TxPPP4Frames ",
813 "TxPPP5Frames ",
814 "TxPPP6Frames ",
815 "TxPPP7Frames ",
816
817 "RxOctetsOK ",
818 "RxFramesOK ",
819 "RxBroadcastFrames ",
820 "RxMulticastFrames ",
821 "RxUnicastFrames ",
822
823 "RxFramesTooLong ",
824 "RxJabberErrors ",
825 "RxFCSErrors ",
826 "RxLengthErrors ",
827 "RxSymbolErrors ",
828 "RxRuntFrames ",
829
830 "RxFrames64 ",
831 "RxFrames65To127 ",
832 "RxFrames128To255 ",
833 "RxFrames256To511 ",
834 "RxFrames512To1023 ",
835 "RxFrames1024To1518 ",
836 "RxFrames1519ToMax ",
837
838 "RxPauseFrames ",
839 "RxPPP0Frames ",
840 "RxPPP1Frames ",
841 "RxPPP2Frames ",
842 "RxPPP3Frames ",
843 "RxPPP4Frames ",
844 "RxPPP5Frames ",
845 "RxPPP6Frames ",
846 "RxPPP7Frames ",
847
848 "RxBG0FramesDropped ",
849 "RxBG1FramesDropped ",
850 "RxBG2FramesDropped ",
851 "RxBG3FramesDropped ",
852 "RxBG0FramesTrunc ",
853 "RxBG1FramesTrunc ",
854 "RxBG2FramesTrunc ",
855 "RxBG3FramesTrunc ",
856
857 "TSO ",
858 "TxCsumOffload ",
859 "RxCsumGood ",
860 "VLANextractions ",
861 "VLANinsertions ",
862};
863
864static int get_sset_count(struct net_device *dev, int sset)
865{
866 switch (sset) {
867 case ETH_SS_STATS:
868 return ARRAY_SIZE(stats_strings);
869 default:
870 return -EOPNOTSUPP;
871 }
872}
873
874#define T4_REGMAP_SIZE (160 * 1024)
875
876static int get_regs_len(struct net_device *dev)
877{
878 return T4_REGMAP_SIZE;
879}
880
881static int get_eeprom_len(struct net_device *dev)
882{
883 return EEPROMSIZE;
884}
885
886static void get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
887{
888 struct adapter *adapter = netdev2adap(dev);
889
890 strcpy(info->driver, KBUILD_MODNAME);
891 strcpy(info->version, DRV_VERSION);
892 strcpy(info->bus_info, pci_name(adapter->pdev));
893
894 if (!adapter->params.fw_vers)
895 strcpy(info->fw_version, "N/A");
896 else
897 snprintf(info->fw_version, sizeof(info->fw_version),
898 "%u.%u.%u.%u, TP %u.%u.%u.%u",
899 FW_HDR_FW_VER_MAJOR_GET(adapter->params.fw_vers),
900 FW_HDR_FW_VER_MINOR_GET(adapter->params.fw_vers),
901 FW_HDR_FW_VER_MICRO_GET(adapter->params.fw_vers),
902 FW_HDR_FW_VER_BUILD_GET(adapter->params.fw_vers),
903 FW_HDR_FW_VER_MAJOR_GET(adapter->params.tp_vers),
904 FW_HDR_FW_VER_MINOR_GET(adapter->params.tp_vers),
905 FW_HDR_FW_VER_MICRO_GET(adapter->params.tp_vers),
906 FW_HDR_FW_VER_BUILD_GET(adapter->params.tp_vers));
907}
908
909static void get_strings(struct net_device *dev, u32 stringset, u8 *data)
910{
911 if (stringset == ETH_SS_STATS)
912 memcpy(data, stats_strings, sizeof(stats_strings));
913}
914
915/*
916 * port stats maintained per queue of the port. They should be in the same
917 * order as in stats_strings above.
918 */
919struct queue_port_stats {
920 u64 tso;
921 u64 tx_csum;
922 u64 rx_csum;
923 u64 vlan_ex;
924 u64 vlan_ins;
925};
926
927static void collect_sge_port_stats(const struct adapter *adap,
928 const struct port_info *p, struct queue_port_stats *s)
929{
930 int i;
931 const struct sge_eth_txq *tx = &adap->sge.ethtxq[p->first_qset];
932 const struct sge_eth_rxq *rx = &adap->sge.ethrxq[p->first_qset];
933
934 memset(s, 0, sizeof(*s));
935 for (i = 0; i < p->nqsets; i++, rx++, tx++) {
936 s->tso += tx->tso;
937 s->tx_csum += tx->tx_cso;
938 s->rx_csum += rx->stats.rx_cso;
939 s->vlan_ex += rx->stats.vlan_ex;
940 s->vlan_ins += tx->vlan_ins;
941 }
942}
943
944static void get_stats(struct net_device *dev, struct ethtool_stats *stats,
945 u64 *data)
946{
947 struct port_info *pi = netdev_priv(dev);
948 struct adapter *adapter = pi->adapter;
949
950 t4_get_port_stats(adapter, pi->tx_chan, (struct port_stats *)data);
951
952 data += sizeof(struct port_stats) / sizeof(u64);
953 collect_sge_port_stats(adapter, pi, (struct queue_port_stats *)data);
954}
955
956/*
957 * Return a version number to identify the type of adapter. The scheme is:
958 * - bits 0..9: chip version
959 * - bits 10..15: chip revision
960 */
961static inline unsigned int mk_adap_vers(const struct adapter *ap)
962{
963 return 4 | (ap->params.rev << 10);
964}
965
966static void reg_block_dump(struct adapter *ap, void *buf, unsigned int start,
967 unsigned int end)
968{
969 u32 *p = buf + start;
970
971 for ( ; start <= end; start += sizeof(u32))
972 *p++ = t4_read_reg(ap, start);
973}
974
975static void get_regs(struct net_device *dev, struct ethtool_regs *regs,
976 void *buf)
977{
978 static const unsigned int reg_ranges[] = {
979 0x1008, 0x1108,
980 0x1180, 0x11b4,
981 0x11fc, 0x123c,
982 0x1300, 0x173c,
983 0x1800, 0x18fc,
984 0x3000, 0x30d8,
985 0x30e0, 0x5924,
986 0x5960, 0x59d4,
987 0x5a00, 0x5af8,
988 0x6000, 0x6098,
989 0x6100, 0x6150,
990 0x6200, 0x6208,
991 0x6240, 0x6248,
992 0x6280, 0x6338,
993 0x6370, 0x638c,
994 0x6400, 0x643c,
995 0x6500, 0x6524,
996 0x6a00, 0x6a38,
997 0x6a60, 0x6a78,
998 0x6b00, 0x6b84,
999 0x6bf0, 0x6c84,
1000 0x6cf0, 0x6d84,
1001 0x6df0, 0x6e84,
1002 0x6ef0, 0x6f84,
1003 0x6ff0, 0x7084,
1004 0x70f0, 0x7184,
1005 0x71f0, 0x7284,
1006 0x72f0, 0x7384,
1007 0x73f0, 0x7450,
1008 0x7500, 0x7530,
1009 0x7600, 0x761c,
1010 0x7680, 0x76cc,
1011 0x7700, 0x7798,
1012 0x77c0, 0x77fc,
1013 0x7900, 0x79fc,
1014 0x7b00, 0x7c38,
1015 0x7d00, 0x7efc,
1016 0x8dc0, 0x8e1c,
1017 0x8e30, 0x8e78,
1018 0x8ea0, 0x8f6c,
1019 0x8fc0, 0x9074,
1020 0x90fc, 0x90fc,
1021 0x9400, 0x9458,
1022 0x9600, 0x96bc,
1023 0x9800, 0x9808,
1024 0x9820, 0x983c,
1025 0x9850, 0x9864,
1026 0x9c00, 0x9c6c,
1027 0x9c80, 0x9cec,
1028 0x9d00, 0x9d6c,
1029 0x9d80, 0x9dec,
1030 0x9e00, 0x9e6c,
1031 0x9e80, 0x9eec,
1032 0x9f00, 0x9f6c,
1033 0x9f80, 0x9fec,
1034 0xd004, 0xd03c,
1035 0xdfc0, 0xdfe0,
1036 0xe000, 0xea7c,
1037 0xf000, 0x11190,
1038 0x19040, 0x19124,
1039 0x19150, 0x191b0,
1040 0x191d0, 0x191e8,
1041 0x19238, 0x1924c,
1042 0x193f8, 0x19474,
1043 0x19490, 0x194f8,
1044 0x19800, 0x19f30,
1045 0x1a000, 0x1a06c,
1046 0x1a0b0, 0x1a120,
1047 0x1a128, 0x1a138,
1048 0x1a190, 0x1a1c4,
1049 0x1a1fc, 0x1a1fc,
1050 0x1e040, 0x1e04c,
1051 0x1e240, 0x1e28c,
1052 0x1e2c0, 0x1e2c0,
1053 0x1e2e0, 0x1e2e0,
1054 0x1e300, 0x1e384,
1055 0x1e3c0, 0x1e3c8,
1056 0x1e440, 0x1e44c,
1057 0x1e640, 0x1e68c,
1058 0x1e6c0, 0x1e6c0,
1059 0x1e6e0, 0x1e6e0,
1060 0x1e700, 0x1e784,
1061 0x1e7c0, 0x1e7c8,
1062 0x1e840, 0x1e84c,
1063 0x1ea40, 0x1ea8c,
1064 0x1eac0, 0x1eac0,
1065 0x1eae0, 0x1eae0,
1066 0x1eb00, 0x1eb84,
1067 0x1ebc0, 0x1ebc8,
1068 0x1ec40, 0x1ec4c,
1069 0x1ee40, 0x1ee8c,
1070 0x1eec0, 0x1eec0,
1071 0x1eee0, 0x1eee0,
1072 0x1ef00, 0x1ef84,
1073 0x1efc0, 0x1efc8,
1074 0x1f040, 0x1f04c,
1075 0x1f240, 0x1f28c,
1076 0x1f2c0, 0x1f2c0,
1077 0x1f2e0, 0x1f2e0,
1078 0x1f300, 0x1f384,
1079 0x1f3c0, 0x1f3c8,
1080 0x1f440, 0x1f44c,
1081 0x1f640, 0x1f68c,
1082 0x1f6c0, 0x1f6c0,
1083 0x1f6e0, 0x1f6e0,
1084 0x1f700, 0x1f784,
1085 0x1f7c0, 0x1f7c8,
1086 0x1f840, 0x1f84c,
1087 0x1fa40, 0x1fa8c,
1088 0x1fac0, 0x1fac0,
1089 0x1fae0, 0x1fae0,
1090 0x1fb00, 0x1fb84,
1091 0x1fbc0, 0x1fbc8,
1092 0x1fc40, 0x1fc4c,
1093 0x1fe40, 0x1fe8c,
1094 0x1fec0, 0x1fec0,
1095 0x1fee0, 0x1fee0,
1096 0x1ff00, 0x1ff84,
1097 0x1ffc0, 0x1ffc8,
1098 0x20000, 0x2002c,
1099 0x20100, 0x2013c,
1100 0x20190, 0x201c8,
1101 0x20200, 0x20318,
1102 0x20400, 0x20528,
1103 0x20540, 0x20614,
1104 0x21000, 0x21040,
1105 0x2104c, 0x21060,
1106 0x210c0, 0x210ec,
1107 0x21200, 0x21268,
1108 0x21270, 0x21284,
1109 0x212fc, 0x21388,
1110 0x21400, 0x21404,
1111 0x21500, 0x21518,
1112 0x2152c, 0x2153c,
1113 0x21550, 0x21554,
1114 0x21600, 0x21600,
1115 0x21608, 0x21628,
1116 0x21630, 0x2163c,
1117 0x21700, 0x2171c,
1118 0x21780, 0x2178c,
1119 0x21800, 0x21c38,
1120 0x21c80, 0x21d7c,
1121 0x21e00, 0x21e04,
1122 0x22000, 0x2202c,
1123 0x22100, 0x2213c,
1124 0x22190, 0x221c8,
1125 0x22200, 0x22318,
1126 0x22400, 0x22528,
1127 0x22540, 0x22614,
1128 0x23000, 0x23040,
1129 0x2304c, 0x23060,
1130 0x230c0, 0x230ec,
1131 0x23200, 0x23268,
1132 0x23270, 0x23284,
1133 0x232fc, 0x23388,
1134 0x23400, 0x23404,
1135 0x23500, 0x23518,
1136 0x2352c, 0x2353c,
1137 0x23550, 0x23554,
1138 0x23600, 0x23600,
1139 0x23608, 0x23628,
1140 0x23630, 0x2363c,
1141 0x23700, 0x2371c,
1142 0x23780, 0x2378c,
1143 0x23800, 0x23c38,
1144 0x23c80, 0x23d7c,
1145 0x23e00, 0x23e04,
1146 0x24000, 0x2402c,
1147 0x24100, 0x2413c,
1148 0x24190, 0x241c8,
1149 0x24200, 0x24318,
1150 0x24400, 0x24528,
1151 0x24540, 0x24614,
1152 0x25000, 0x25040,
1153 0x2504c, 0x25060,
1154 0x250c0, 0x250ec,
1155 0x25200, 0x25268,
1156 0x25270, 0x25284,
1157 0x252fc, 0x25388,
1158 0x25400, 0x25404,
1159 0x25500, 0x25518,
1160 0x2552c, 0x2553c,
1161 0x25550, 0x25554,
1162 0x25600, 0x25600,
1163 0x25608, 0x25628,
1164 0x25630, 0x2563c,
1165 0x25700, 0x2571c,
1166 0x25780, 0x2578c,
1167 0x25800, 0x25c38,
1168 0x25c80, 0x25d7c,
1169 0x25e00, 0x25e04,
1170 0x26000, 0x2602c,
1171 0x26100, 0x2613c,
1172 0x26190, 0x261c8,
1173 0x26200, 0x26318,
1174 0x26400, 0x26528,
1175 0x26540, 0x26614,
1176 0x27000, 0x27040,
1177 0x2704c, 0x27060,
1178 0x270c0, 0x270ec,
1179 0x27200, 0x27268,
1180 0x27270, 0x27284,
1181 0x272fc, 0x27388,
1182 0x27400, 0x27404,
1183 0x27500, 0x27518,
1184 0x2752c, 0x2753c,
1185 0x27550, 0x27554,
1186 0x27600, 0x27600,
1187 0x27608, 0x27628,
1188 0x27630, 0x2763c,
1189 0x27700, 0x2771c,
1190 0x27780, 0x2778c,
1191 0x27800, 0x27c38,
1192 0x27c80, 0x27d7c,
1193 0x27e00, 0x27e04
1194 };
1195
1196 int i;
1197 struct adapter *ap = netdev2adap(dev);
1198
1199 regs->version = mk_adap_vers(ap);
1200
1201 memset(buf, 0, T4_REGMAP_SIZE);
1202 for (i = 0; i < ARRAY_SIZE(reg_ranges); i += 2)
1203 reg_block_dump(ap, buf, reg_ranges[i], reg_ranges[i + 1]);
1204}
1205
1206static int restart_autoneg(struct net_device *dev)
1207{
1208 struct port_info *p = netdev_priv(dev);
1209
1210 if (!netif_running(dev))
1211 return -EAGAIN;
1212 if (p->link_cfg.autoneg != AUTONEG_ENABLE)
1213 return -EINVAL;
1214 t4_restart_aneg(p->adapter, 0, p->tx_chan);
1215 return 0;
1216}
1217
1218static int identify_port(struct net_device *dev, u32 data)
1219{
1220 if (data == 0)
1221 data = 2; /* default to 2 seconds */
1222
1223 return t4_identify_port(netdev2adap(dev), 0, netdev2pinfo(dev)->viid,
1224 data * 5);
1225}
1226
1227static unsigned int from_fw_linkcaps(unsigned int type, unsigned int caps)
1228{
1229 unsigned int v = 0;
1230
1231 if (type == FW_PORT_TYPE_BT_SGMII || type == FW_PORT_TYPE_BT_XAUI) {
1232 v |= SUPPORTED_TP;
1233 if (caps & FW_PORT_CAP_SPEED_100M)
1234 v |= SUPPORTED_100baseT_Full;
1235 if (caps & FW_PORT_CAP_SPEED_1G)
1236 v |= SUPPORTED_1000baseT_Full;
1237 if (caps & FW_PORT_CAP_SPEED_10G)
1238 v |= SUPPORTED_10000baseT_Full;
1239 } else if (type == FW_PORT_TYPE_KX4 || type == FW_PORT_TYPE_KX) {
1240 v |= SUPPORTED_Backplane;
1241 if (caps & FW_PORT_CAP_SPEED_1G)
1242 v |= SUPPORTED_1000baseKX_Full;
1243 if (caps & FW_PORT_CAP_SPEED_10G)
1244 v |= SUPPORTED_10000baseKX4_Full;
1245 } else if (type == FW_PORT_TYPE_KR)
1246 v |= SUPPORTED_Backplane | SUPPORTED_10000baseKR_Full;
1247 else if (type == FW_PORT_TYPE_FIBER)
1248 v |= SUPPORTED_FIBRE;
1249
1250 if (caps & FW_PORT_CAP_ANEG)
1251 v |= SUPPORTED_Autoneg;
1252 return v;
1253}
1254
1255static unsigned int to_fw_linkcaps(unsigned int caps)
1256{
1257 unsigned int v = 0;
1258
1259 if (caps & ADVERTISED_100baseT_Full)
1260 v |= FW_PORT_CAP_SPEED_100M;
1261 if (caps & ADVERTISED_1000baseT_Full)
1262 v |= FW_PORT_CAP_SPEED_1G;
1263 if (caps & ADVERTISED_10000baseT_Full)
1264 v |= FW_PORT_CAP_SPEED_10G;
1265 return v;
1266}
1267
1268static int get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1269{
1270 const struct port_info *p = netdev_priv(dev);
1271
1272 if (p->port_type == FW_PORT_TYPE_BT_SGMII ||
1273 p->port_type == FW_PORT_TYPE_BT_XAUI)
1274 cmd->port = PORT_TP;
1275 else if (p->port_type == FW_PORT_TYPE_FIBER)
1276 cmd->port = PORT_FIBRE;
1277 else if (p->port_type == FW_PORT_TYPE_TWINAX)
1278 cmd->port = PORT_DA;
1279 else
1280 cmd->port = PORT_OTHER;
1281
1282 if (p->mdio_addr >= 0) {
1283 cmd->phy_address = p->mdio_addr;
1284 cmd->transceiver = XCVR_EXTERNAL;
1285 cmd->mdio_support = p->port_type == FW_PORT_TYPE_BT_SGMII ?
1286 MDIO_SUPPORTS_C22 : MDIO_SUPPORTS_C45;
1287 } else {
1288 cmd->phy_address = 0; /* not really, but no better option */
1289 cmd->transceiver = XCVR_INTERNAL;
1290 cmd->mdio_support = 0;
1291 }
1292
1293 cmd->supported = from_fw_linkcaps(p->port_type, p->link_cfg.supported);
1294 cmd->advertising = from_fw_linkcaps(p->port_type,
1295 p->link_cfg.advertising);
1296 cmd->speed = netif_carrier_ok(dev) ? p->link_cfg.speed : 0;
1297 cmd->duplex = DUPLEX_FULL;
1298 cmd->autoneg = p->link_cfg.autoneg;
1299 cmd->maxtxpkt = 0;
1300 cmd->maxrxpkt = 0;
1301 return 0;
1302}
1303
1304static unsigned int speed_to_caps(int speed)
1305{
1306 if (speed == SPEED_100)
1307 return FW_PORT_CAP_SPEED_100M;
1308 if (speed == SPEED_1000)
1309 return FW_PORT_CAP_SPEED_1G;
1310 if (speed == SPEED_10000)
1311 return FW_PORT_CAP_SPEED_10G;
1312 return 0;
1313}
1314
1315static int set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1316{
1317 unsigned int cap;
1318 struct port_info *p = netdev_priv(dev);
1319 struct link_config *lc = &p->link_cfg;
1320
1321 if (cmd->duplex != DUPLEX_FULL) /* only full-duplex supported */
1322 return -EINVAL;
1323
1324 if (!(lc->supported & FW_PORT_CAP_ANEG)) {
1325 /*
1326 * PHY offers a single speed. See if that's what's
1327 * being requested.
1328 */
1329 if (cmd->autoneg == AUTONEG_DISABLE &&
1330 (lc->supported & speed_to_caps(cmd->speed)))
1331 return 0;
1332 return -EINVAL;
1333 }
1334
1335 if (cmd->autoneg == AUTONEG_DISABLE) {
1336 cap = speed_to_caps(cmd->speed);
1337
1338 if (!(lc->supported & cap) || cmd->speed == SPEED_1000 ||
1339 cmd->speed == SPEED_10000)
1340 return -EINVAL;
1341 lc->requested_speed = cap;
1342 lc->advertising = 0;
1343 } else {
1344 cap = to_fw_linkcaps(cmd->advertising);
1345 if (!(lc->supported & cap))
1346 return -EINVAL;
1347 lc->requested_speed = 0;
1348 lc->advertising = cap | FW_PORT_CAP_ANEG;
1349 }
1350 lc->autoneg = cmd->autoneg;
1351
1352 if (netif_running(dev))
1353 return t4_link_start(p->adapter, 0, p->tx_chan, lc);
1354 return 0;
1355}
1356
1357static void get_pauseparam(struct net_device *dev,
1358 struct ethtool_pauseparam *epause)
1359{
1360 struct port_info *p = netdev_priv(dev);
1361
1362 epause->autoneg = (p->link_cfg.requested_fc & PAUSE_AUTONEG) != 0;
1363 epause->rx_pause = (p->link_cfg.fc & PAUSE_RX) != 0;
1364 epause->tx_pause = (p->link_cfg.fc & PAUSE_TX) != 0;
1365}
1366
1367static int set_pauseparam(struct net_device *dev,
1368 struct ethtool_pauseparam *epause)
1369{
1370 struct port_info *p = netdev_priv(dev);
1371 struct link_config *lc = &p->link_cfg;
1372
1373 if (epause->autoneg == AUTONEG_DISABLE)
1374 lc->requested_fc = 0;
1375 else if (lc->supported & FW_PORT_CAP_ANEG)
1376 lc->requested_fc = PAUSE_AUTONEG;
1377 else
1378 return -EINVAL;
1379
1380 if (epause->rx_pause)
1381 lc->requested_fc |= PAUSE_RX;
1382 if (epause->tx_pause)
1383 lc->requested_fc |= PAUSE_TX;
1384 if (netif_running(dev))
1385 return t4_link_start(p->adapter, 0, p->tx_chan, lc);
1386 return 0;
1387}
1388
1389static u32 get_rx_csum(struct net_device *dev)
1390{
1391 struct port_info *p = netdev_priv(dev);
1392
1393 return p->rx_offload & RX_CSO;
1394}
1395
1396static int set_rx_csum(struct net_device *dev, u32 data)
1397{
1398 struct port_info *p = netdev_priv(dev);
1399
1400 if (data)
1401 p->rx_offload |= RX_CSO;
1402 else
1403 p->rx_offload &= ~RX_CSO;
1404 return 0;
1405}
1406
1407static void get_sge_param(struct net_device *dev, struct ethtool_ringparam *e)
1408{
1409 const struct port_info *pi = netdev_priv(dev);
1410 const struct sge *s = &pi->adapter->sge;
1411
1412 e->rx_max_pending = MAX_RX_BUFFERS;
1413 e->rx_mini_max_pending = MAX_RSPQ_ENTRIES;
1414 e->rx_jumbo_max_pending = 0;
1415 e->tx_max_pending = MAX_TXQ_ENTRIES;
1416
1417 e->rx_pending = s->ethrxq[pi->first_qset].fl.size - 8;
1418 e->rx_mini_pending = s->ethrxq[pi->first_qset].rspq.size;
1419 e->rx_jumbo_pending = 0;
1420 e->tx_pending = s->ethtxq[pi->first_qset].q.size;
1421}
1422
1423static int set_sge_param(struct net_device *dev, struct ethtool_ringparam *e)
1424{
1425 int i;
1426 const struct port_info *pi = netdev_priv(dev);
1427 struct adapter *adapter = pi->adapter;
1428 struct sge *s = &adapter->sge;
1429
1430 if (e->rx_pending > MAX_RX_BUFFERS || e->rx_jumbo_pending ||
1431 e->tx_pending > MAX_TXQ_ENTRIES ||
1432 e->rx_mini_pending > MAX_RSPQ_ENTRIES ||
1433 e->rx_mini_pending < MIN_RSPQ_ENTRIES ||
1434 e->rx_pending < MIN_FL_ENTRIES || e->tx_pending < MIN_TXQ_ENTRIES)
1435 return -EINVAL;
1436
1437 if (adapter->flags & FULL_INIT_DONE)
1438 return -EBUSY;
1439
1440 for (i = 0; i < pi->nqsets; ++i) {
1441 s->ethtxq[pi->first_qset + i].q.size = e->tx_pending;
1442 s->ethrxq[pi->first_qset + i].fl.size = e->rx_pending + 8;
1443 s->ethrxq[pi->first_qset + i].rspq.size = e->rx_mini_pending;
1444 }
1445 return 0;
1446}
1447
1448static int closest_timer(const struct sge *s, int time)
1449{
1450 int i, delta, match = 0, min_delta = INT_MAX;
1451
1452 for (i = 0; i < ARRAY_SIZE(s->timer_val); i++) {
1453 delta = time - s->timer_val[i];
1454 if (delta < 0)
1455 delta = -delta;
1456 if (delta < min_delta) {
1457 min_delta = delta;
1458 match = i;
1459 }
1460 }
1461 return match;
1462}
1463
1464static int closest_thres(const struct sge *s, int thres)
1465{
1466 int i, delta, match = 0, min_delta = INT_MAX;
1467
1468 for (i = 0; i < ARRAY_SIZE(s->counter_val); i++) {
1469 delta = thres - s->counter_val[i];
1470 if (delta < 0)
1471 delta = -delta;
1472 if (delta < min_delta) {
1473 min_delta = delta;
1474 match = i;
1475 }
1476 }
1477 return match;
1478}
1479
1480/*
1481 * Return a queue's interrupt hold-off time in us. 0 means no timer.
1482 */
1483static unsigned int qtimer_val(const struct adapter *adap,
1484 const struct sge_rspq *q)
1485{
1486 unsigned int idx = q->intr_params >> 1;
1487
1488 return idx < SGE_NTIMERS ? adap->sge.timer_val[idx] : 0;
1489}
1490
1491/**
1492 * set_rxq_intr_params - set a queue's interrupt holdoff parameters
1493 * @adap: the adapter
1494 * @q: the Rx queue
1495 * @us: the hold-off time in us, or 0 to disable timer
1496 * @cnt: the hold-off packet count, or 0 to disable counter
1497 *
1498 * Sets an Rx queue's interrupt hold-off time and packet count. At least
1499 * one of the two needs to be enabled for the queue to generate interrupts.
1500 */
1501static int set_rxq_intr_params(struct adapter *adap, struct sge_rspq *q,
1502 unsigned int us, unsigned int cnt)
1503{
1504 if ((us | cnt) == 0)
1505 cnt = 1;
1506
1507 if (cnt) {
1508 int err;
1509 u32 v, new_idx;
1510
1511 new_idx = closest_thres(&adap->sge, cnt);
1512 if (q->desc && q->pktcnt_idx != new_idx) {
1513 /* the queue has already been created, update it */
1514 v = FW_PARAMS_MNEM(FW_PARAMS_MNEM_DMAQ) |
1515 FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DMAQ_IQ_INTCNTTHRESH) |
1516 FW_PARAMS_PARAM_YZ(q->cntxt_id);
1517 err = t4_set_params(adap, 0, 0, 0, 1, &v, &new_idx);
1518 if (err)
1519 return err;
1520 }
1521 q->pktcnt_idx = new_idx;
1522 }
1523
1524 us = us == 0 ? 6 : closest_timer(&adap->sge, us);
1525 q->intr_params = QINTR_TIMER_IDX(us) | (cnt > 0 ? QINTR_CNT_EN : 0);
1526 return 0;
1527}
1528
1529static int set_coalesce(struct net_device *dev, struct ethtool_coalesce *c)
1530{
1531 const struct port_info *pi = netdev_priv(dev);
1532 struct adapter *adap = pi->adapter;
1533
1534 return set_rxq_intr_params(adap, &adap->sge.ethrxq[pi->first_qset].rspq,
1535 c->rx_coalesce_usecs, c->rx_max_coalesced_frames);
1536}
1537
1538static int get_coalesce(struct net_device *dev, struct ethtool_coalesce *c)
1539{
1540 const struct port_info *pi = netdev_priv(dev);
1541 const struct adapter *adap = pi->adapter;
1542 const struct sge_rspq *rq = &adap->sge.ethrxq[pi->first_qset].rspq;
1543
1544 c->rx_coalesce_usecs = qtimer_val(adap, rq);
1545 c->rx_max_coalesced_frames = (rq->intr_params & QINTR_CNT_EN) ?
1546 adap->sge.counter_val[rq->pktcnt_idx] : 0;
1547 return 0;
1548}
1549
1550/*
1551 * Translate a physical EEPROM address to virtual. The first 1K is accessed
1552 * through virtual addresses starting at 31K, the rest is accessed through
1553 * virtual addresses starting at 0. This mapping is correct only for PF0.
1554 */
1555static int eeprom_ptov(unsigned int phys_addr)
1556{
1557 if (phys_addr < 1024)
1558 return phys_addr + (31 << 10);
1559 if (phys_addr < EEPROMSIZE)
1560 return phys_addr - 1024;
1561 return -EINVAL;
1562}
1563
1564/*
1565 * The next two routines implement eeprom read/write from physical addresses.
1566 * The physical->virtual translation is correct only for PF0.
1567 */
1568static int eeprom_rd_phys(struct adapter *adap, unsigned int phys_addr, u32 *v)
1569{
1570 int vaddr = eeprom_ptov(phys_addr);
1571
1572 if (vaddr >= 0)
1573 vaddr = pci_read_vpd(adap->pdev, vaddr, sizeof(u32), v);
1574 return vaddr < 0 ? vaddr : 0;
1575}
1576
1577static int eeprom_wr_phys(struct adapter *adap, unsigned int phys_addr, u32 v)
1578{
1579 int vaddr = eeprom_ptov(phys_addr);
1580
1581 if (vaddr >= 0)
1582 vaddr = pci_write_vpd(adap->pdev, vaddr, sizeof(u32), &v);
1583 return vaddr < 0 ? vaddr : 0;
1584}
1585
1586#define EEPROM_MAGIC 0x38E2F10C
1587
1588static int get_eeprom(struct net_device *dev, struct ethtool_eeprom *e,
1589 u8 *data)
1590{
1591 int i, err = 0;
1592 struct adapter *adapter = netdev2adap(dev);
1593
1594 u8 *buf = kmalloc(EEPROMSIZE, GFP_KERNEL);
1595 if (!buf)
1596 return -ENOMEM;
1597
1598 e->magic = EEPROM_MAGIC;
1599 for (i = e->offset & ~3; !err && i < e->offset + e->len; i += 4)
1600 err = eeprom_rd_phys(adapter, i, (u32 *)&buf[i]);
1601
1602 if (!err)
1603 memcpy(data, buf + e->offset, e->len);
1604 kfree(buf);
1605 return err;
1606}
1607
1608static int set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
1609 u8 *data)
1610{
1611 u8 *buf;
1612 int err = 0;
1613 u32 aligned_offset, aligned_len, *p;
1614 struct adapter *adapter = netdev2adap(dev);
1615
1616 if (eeprom->magic != EEPROM_MAGIC)
1617 return -EINVAL;
1618
1619 aligned_offset = eeprom->offset & ~3;
1620 aligned_len = (eeprom->len + (eeprom->offset & 3) + 3) & ~3;
1621
1622 if (aligned_offset != eeprom->offset || aligned_len != eeprom->len) {
1623 /*
1624 * RMW possibly needed for first or last words.
1625 */
1626 buf = kmalloc(aligned_len, GFP_KERNEL);
1627 if (!buf)
1628 return -ENOMEM;
1629 err = eeprom_rd_phys(adapter, aligned_offset, (u32 *)buf);
1630 if (!err && aligned_len > 4)
1631 err = eeprom_rd_phys(adapter,
1632 aligned_offset + aligned_len - 4,
1633 (u32 *)&buf[aligned_len - 4]);
1634 if (err)
1635 goto out;
1636 memcpy(buf + (eeprom->offset & 3), data, eeprom->len);
1637 } else
1638 buf = data;
1639
1640 err = t4_seeprom_wp(adapter, false);
1641 if (err)
1642 goto out;
1643
1644 for (p = (u32 *)buf; !err && aligned_len; aligned_len -= 4, p++) {
1645 err = eeprom_wr_phys(adapter, aligned_offset, *p);
1646 aligned_offset += 4;
1647 }
1648
1649 if (!err)
1650 err = t4_seeprom_wp(adapter, true);
1651out:
1652 if (buf != data)
1653 kfree(buf);
1654 return err;
1655}
1656
1657static int set_flash(struct net_device *netdev, struct ethtool_flash *ef)
1658{
1659 int ret;
1660 const struct firmware *fw;
1661 struct adapter *adap = netdev2adap(netdev);
1662
1663 ef->data[sizeof(ef->data) - 1] = '\0';
1664 ret = request_firmware(&fw, ef->data, adap->pdev_dev);
1665 if (ret < 0)
1666 return ret;
1667
1668 ret = t4_load_fw(adap, fw->data, fw->size);
1669 release_firmware(fw);
1670 if (!ret)
1671 dev_info(adap->pdev_dev, "loaded firmware %s\n", ef->data);
1672 return ret;
1673}
1674
1675#define WOL_SUPPORTED (WAKE_BCAST | WAKE_MAGIC)
1676#define BCAST_CRC 0xa0ccc1a6
1677
1678static void get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
1679{
1680 wol->supported = WAKE_BCAST | WAKE_MAGIC;
1681 wol->wolopts = netdev2adap(dev)->wol;
1682 memset(&wol->sopass, 0, sizeof(wol->sopass));
1683}
1684
1685static int set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
1686{
1687 int err = 0;
1688 struct port_info *pi = netdev_priv(dev);
1689
1690 if (wol->wolopts & ~WOL_SUPPORTED)
1691 return -EINVAL;
1692 t4_wol_magic_enable(pi->adapter, pi->tx_chan,
1693 (wol->wolopts & WAKE_MAGIC) ? dev->dev_addr : NULL);
1694 if (wol->wolopts & WAKE_BCAST) {
1695 err = t4_wol_pat_enable(pi->adapter, pi->tx_chan, 0xfe, ~0ULL,
1696 ~0ULL, 0, false);
1697 if (!err)
1698 err = t4_wol_pat_enable(pi->adapter, pi->tx_chan, 1,
1699 ~6ULL, ~0ULL, BCAST_CRC, true);
1700 } else
1701 t4_wol_pat_enable(pi->adapter, pi->tx_chan, 0, 0, 0, 0, false);
1702 return err;
1703}
1704
1705static int set_tso(struct net_device *dev, u32 value)
1706{
1707 if (value)
1708 dev->features |= NETIF_F_TSO | NETIF_F_TSO6;
1709 else
1710 dev->features &= ~(NETIF_F_TSO | NETIF_F_TSO6);
1711 return 0;
1712}
1713
1714static struct ethtool_ops cxgb_ethtool_ops = {
1715 .get_settings = get_settings,
1716 .set_settings = set_settings,
1717 .get_drvinfo = get_drvinfo,
1718 .get_msglevel = get_msglevel,
1719 .set_msglevel = set_msglevel,
1720 .get_ringparam = get_sge_param,
1721 .set_ringparam = set_sge_param,
1722 .get_coalesce = get_coalesce,
1723 .set_coalesce = set_coalesce,
1724 .get_eeprom_len = get_eeprom_len,
1725 .get_eeprom = get_eeprom,
1726 .set_eeprom = set_eeprom,
1727 .get_pauseparam = get_pauseparam,
1728 .set_pauseparam = set_pauseparam,
1729 .get_rx_csum = get_rx_csum,
1730 .set_rx_csum = set_rx_csum,
1731 .set_tx_csum = ethtool_op_set_tx_ipv6_csum,
1732 .set_sg = ethtool_op_set_sg,
1733 .get_link = ethtool_op_get_link,
1734 .get_strings = get_strings,
1735 .phys_id = identify_port,
1736 .nway_reset = restart_autoneg,
1737 .get_sset_count = get_sset_count,
1738 .get_ethtool_stats = get_stats,
1739 .get_regs_len = get_regs_len,
1740 .get_regs = get_regs,
1741 .get_wol = get_wol,
1742 .set_wol = set_wol,
1743 .set_tso = set_tso,
1744 .flash_device = set_flash,
1745};
1746
1747/*
1748 * debugfs support
1749 */
1750
1751static int mem_open(struct inode *inode, struct file *file)
1752{
1753 file->private_data = inode->i_private;
1754 return 0;
1755}
1756
1757static ssize_t mem_read(struct file *file, char __user *buf, size_t count,
1758 loff_t *ppos)
1759{
1760 loff_t pos = *ppos;
1761 loff_t avail = file->f_path.dentry->d_inode->i_size;
1762 unsigned int mem = (uintptr_t)file->private_data & 3;
1763 struct adapter *adap = file->private_data - mem;
1764
1765 if (pos < 0)
1766 return -EINVAL;
1767 if (pos >= avail)
1768 return 0;
1769 if (count > avail - pos)
1770 count = avail - pos;
1771
1772 while (count) {
1773 size_t len;
1774 int ret, ofst;
1775 __be32 data[16];
1776
1777 if (mem == MEM_MC)
1778 ret = t4_mc_read(adap, pos, data, NULL);
1779 else
1780 ret = t4_edc_read(adap, mem, pos, data, NULL);
1781 if (ret)
1782 return ret;
1783
1784 ofst = pos % sizeof(data);
1785 len = min(count, sizeof(data) - ofst);
1786 if (copy_to_user(buf, (u8 *)data + ofst, len))
1787 return -EFAULT;
1788
1789 buf += len;
1790 pos += len;
1791 count -= len;
1792 }
1793 count = pos - *ppos;
1794 *ppos = pos;
1795 return count;
1796}
1797
1798static const struct file_operations mem_debugfs_fops = {
1799 .owner = THIS_MODULE,
1800 .open = mem_open,
1801 .read = mem_read,
1802};
1803
1804static void __devinit add_debugfs_mem(struct adapter *adap, const char *name,
1805 unsigned int idx, unsigned int size_mb)
1806{
1807 struct dentry *de;
1808
1809 de = debugfs_create_file(name, S_IRUSR, adap->debugfs_root,
1810 (void *)adap + idx, &mem_debugfs_fops);
1811 if (de && de->d_inode)
1812 de->d_inode->i_size = size_mb << 20;
1813}
1814
1815static int __devinit setup_debugfs(struct adapter *adap)
1816{
1817 int i;
1818
1819 if (IS_ERR_OR_NULL(adap->debugfs_root))
1820 return -1;
1821
1822 i = t4_read_reg(adap, MA_TARGET_MEM_ENABLE);
1823 if (i & EDRAM0_ENABLE)
1824 add_debugfs_mem(adap, "edc0", MEM_EDC0, 5);
1825 if (i & EDRAM1_ENABLE)
1826 add_debugfs_mem(adap, "edc1", MEM_EDC1, 5);
1827 if (i & EXT_MEM_ENABLE)
1828 add_debugfs_mem(adap, "mc", MEM_MC,
1829 EXT_MEM_SIZE_GET(t4_read_reg(adap, MA_EXT_MEMORY_BAR)));
1830 if (adap->l2t)
1831 debugfs_create_file("l2t", S_IRUSR, adap->debugfs_root, adap,
1832 &t4_l2t_fops);
1833 return 0;
1834}
1835
1836/*
1837 * upper-layer driver support
1838 */
1839
1840/*
1841 * Allocate an active-open TID and set it to the supplied value.
1842 */
1843int cxgb4_alloc_atid(struct tid_info *t, void *data)
1844{
1845 int atid = -1;
1846
1847 spin_lock_bh(&t->atid_lock);
1848 if (t->afree) {
1849 union aopen_entry *p = t->afree;
1850
1851 atid = p - t->atid_tab;
1852 t->afree = p->next;
1853 p->data = data;
1854 t->atids_in_use++;
1855 }
1856 spin_unlock_bh(&t->atid_lock);
1857 return atid;
1858}
1859EXPORT_SYMBOL(cxgb4_alloc_atid);
1860
1861/*
1862 * Release an active-open TID.
1863 */
1864void cxgb4_free_atid(struct tid_info *t, unsigned int atid)
1865{
1866 union aopen_entry *p = &t->atid_tab[atid];
1867
1868 spin_lock_bh(&t->atid_lock);
1869 p->next = t->afree;
1870 t->afree = p;
1871 t->atids_in_use--;
1872 spin_unlock_bh(&t->atid_lock);
1873}
1874EXPORT_SYMBOL(cxgb4_free_atid);
1875
1876/*
1877 * Allocate a server TID and set it to the supplied value.
1878 */
1879int cxgb4_alloc_stid(struct tid_info *t, int family, void *data)
1880{
1881 int stid;
1882
1883 spin_lock_bh(&t->stid_lock);
1884 if (family == PF_INET) {
1885 stid = find_first_zero_bit(t->stid_bmap, t->nstids);
1886 if (stid < t->nstids)
1887 __set_bit(stid, t->stid_bmap);
1888 else
1889 stid = -1;
1890 } else {
1891 stid = bitmap_find_free_region(t->stid_bmap, t->nstids, 2);
1892 if (stid < 0)
1893 stid = -1;
1894 }
1895 if (stid >= 0) {
1896 t->stid_tab[stid].data = data;
1897 stid += t->stid_base;
1898 t->stids_in_use++;
1899 }
1900 spin_unlock_bh(&t->stid_lock);
1901 return stid;
1902}
1903EXPORT_SYMBOL(cxgb4_alloc_stid);
1904
1905/*
1906 * Release a server TID.
1907 */
1908void cxgb4_free_stid(struct tid_info *t, unsigned int stid, int family)
1909{
1910 stid -= t->stid_base;
1911 spin_lock_bh(&t->stid_lock);
1912 if (family == PF_INET)
1913 __clear_bit(stid, t->stid_bmap);
1914 else
1915 bitmap_release_region(t->stid_bmap, stid, 2);
1916 t->stid_tab[stid].data = NULL;
1917 t->stids_in_use--;
1918 spin_unlock_bh(&t->stid_lock);
1919}
1920EXPORT_SYMBOL(cxgb4_free_stid);
1921
1922/*
1923 * Populate a TID_RELEASE WR. Caller must properly size the skb.
1924 */
1925static void mk_tid_release(struct sk_buff *skb, unsigned int chan,
1926 unsigned int tid)
1927{
1928 struct cpl_tid_release *req;
1929
1930 set_wr_txq(skb, CPL_PRIORITY_SETUP, chan);
1931 req = (struct cpl_tid_release *)__skb_put(skb, sizeof(*req));
1932 INIT_TP_WR(req, tid);
1933 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_TID_RELEASE, tid));
1934}
1935
1936/*
1937 * Queue a TID release request and if necessary schedule a work queue to
1938 * process it.
1939 */
1940void cxgb4_queue_tid_release(struct tid_info *t, unsigned int chan,
1941 unsigned int tid)
1942{
1943 void **p = &t->tid_tab[tid];
1944 struct adapter *adap = container_of(t, struct adapter, tids);
1945
1946 spin_lock_bh(&adap->tid_release_lock);
1947 *p = adap->tid_release_head;
1948 /* Low 2 bits encode the Tx channel number */
1949 adap->tid_release_head = (void **)((uintptr_t)p | chan);
1950 if (!adap->tid_release_task_busy) {
1951 adap->tid_release_task_busy = true;
1952 schedule_work(&adap->tid_release_task);
1953 }
1954 spin_unlock_bh(&adap->tid_release_lock);
1955}
1956EXPORT_SYMBOL(cxgb4_queue_tid_release);
1957
1958/*
1959 * Process the list of pending TID release requests.
1960 */
1961static void process_tid_release_list(struct work_struct *work)
1962{
1963 struct sk_buff *skb;
1964 struct adapter *adap;
1965
1966 adap = container_of(work, struct adapter, tid_release_task);
1967
1968 spin_lock_bh(&adap->tid_release_lock);
1969 while (adap->tid_release_head) {
1970 void **p = adap->tid_release_head;
1971 unsigned int chan = (uintptr_t)p & 3;
1972 p = (void *)p - chan;
1973
1974 adap->tid_release_head = *p;
1975 *p = NULL;
1976 spin_unlock_bh(&adap->tid_release_lock);
1977
1978 while (!(skb = alloc_skb(sizeof(struct cpl_tid_release),
1979 GFP_KERNEL)))
1980 schedule_timeout_uninterruptible(1);
1981
1982 mk_tid_release(skb, chan, p - adap->tids.tid_tab);
1983 t4_ofld_send(adap, skb);
1984 spin_lock_bh(&adap->tid_release_lock);
1985 }
1986 adap->tid_release_task_busy = false;
1987 spin_unlock_bh(&adap->tid_release_lock);
1988}
1989
1990/*
1991 * Release a TID and inform HW. If we are unable to allocate the release
1992 * message we defer to a work queue.
1993 */
1994void cxgb4_remove_tid(struct tid_info *t, unsigned int chan, unsigned int tid)
1995{
1996 void *old;
1997 struct sk_buff *skb;
1998 struct adapter *adap = container_of(t, struct adapter, tids);
1999
2000 old = t->tid_tab[tid];
2001 skb = alloc_skb(sizeof(struct cpl_tid_release), GFP_ATOMIC);
2002 if (likely(skb)) {
2003 t->tid_tab[tid] = NULL;
2004 mk_tid_release(skb, chan, tid);
2005 t4_ofld_send(adap, skb);
2006 } else
2007 cxgb4_queue_tid_release(t, chan, tid);
2008 if (old)
2009 atomic_dec(&t->tids_in_use);
2010}
2011EXPORT_SYMBOL(cxgb4_remove_tid);
2012
2013/*
2014 * Allocate and initialize the TID tables. Returns 0 on success.
2015 */
2016static int tid_init(struct tid_info *t)
2017{
2018 size_t size;
2019 unsigned int natids = t->natids;
2020
2021 size = t->ntids * sizeof(*t->tid_tab) + natids * sizeof(*t->atid_tab) +
2022 t->nstids * sizeof(*t->stid_tab) +
2023 BITS_TO_LONGS(t->nstids) * sizeof(long);
2024 t->tid_tab = t4_alloc_mem(size);
2025 if (!t->tid_tab)
2026 return -ENOMEM;
2027
2028 t->atid_tab = (union aopen_entry *)&t->tid_tab[t->ntids];
2029 t->stid_tab = (struct serv_entry *)&t->atid_tab[natids];
2030 t->stid_bmap = (unsigned long *)&t->stid_tab[t->nstids];
2031 spin_lock_init(&t->stid_lock);
2032 spin_lock_init(&t->atid_lock);
2033
2034 t->stids_in_use = 0;
2035 t->afree = NULL;
2036 t->atids_in_use = 0;
2037 atomic_set(&t->tids_in_use, 0);
2038
2039 /* Setup the free list for atid_tab and clear the stid bitmap. */
2040 if (natids) {
2041 while (--natids)
2042 t->atid_tab[natids - 1].next = &t->atid_tab[natids];
2043 t->afree = t->atid_tab;
2044 }
2045 bitmap_zero(t->stid_bmap, t->nstids);
2046 return 0;
2047}
2048
2049/**
2050 * cxgb4_create_server - create an IP server
2051 * @dev: the device
2052 * @stid: the server TID
2053 * @sip: local IP address to bind server to
2054 * @sport: the server's TCP port
2055 * @queue: queue to direct messages from this server to
2056 *
2057 * Create an IP server for the given port and address.
2058 * Returns <0 on error and one of the %NET_XMIT_* values on success.
2059 */
2060int cxgb4_create_server(const struct net_device *dev, unsigned int stid,
2061 __be32 sip, __be16 sport, unsigned int queue)
2062{
2063 unsigned int chan;
2064 struct sk_buff *skb;
2065 struct adapter *adap;
2066 struct cpl_pass_open_req *req;
2067
2068 skb = alloc_skb(sizeof(*req), GFP_KERNEL);
2069 if (!skb)
2070 return -ENOMEM;
2071
2072 adap = netdev2adap(dev);
2073 req = (struct cpl_pass_open_req *)__skb_put(skb, sizeof(*req));
2074 INIT_TP_WR(req, 0);
2075 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_PASS_OPEN_REQ, stid));
2076 req->local_port = sport;
2077 req->peer_port = htons(0);
2078 req->local_ip = sip;
2079 req->peer_ip = htonl(0);
2080 chan = netdev2pinfo(adap->sge.ingr_map[queue]->netdev)->tx_chan;
2081 req->opt0 = cpu_to_be64(TX_CHAN(chan));
2082 req->opt1 = cpu_to_be64(CONN_POLICY_ASK |
2083 SYN_RSS_ENABLE | SYN_RSS_QUEUE(queue));
2084 return t4_mgmt_tx(adap, skb);
2085}
2086EXPORT_SYMBOL(cxgb4_create_server);
2087
2088/**
2089 * cxgb4_create_server6 - create an IPv6 server
2090 * @dev: the device
2091 * @stid: the server TID
2092 * @sip: local IPv6 address to bind server to
2093 * @sport: the server's TCP port
2094 * @queue: queue to direct messages from this server to
2095 *
2096 * Create an IPv6 server for the given port and address.
2097 * Returns <0 on error and one of the %NET_XMIT_* values on success.
2098 */
2099int cxgb4_create_server6(const struct net_device *dev, unsigned int stid,
2100 const struct in6_addr *sip, __be16 sport,
2101 unsigned int queue)
2102{
2103 unsigned int chan;
2104 struct sk_buff *skb;
2105 struct adapter *adap;
2106 struct cpl_pass_open_req6 *req;
2107
2108 skb = alloc_skb(sizeof(*req), GFP_KERNEL);
2109 if (!skb)
2110 return -ENOMEM;
2111
2112 adap = netdev2adap(dev);
2113 req = (struct cpl_pass_open_req6 *)__skb_put(skb, sizeof(*req));
2114 INIT_TP_WR(req, 0);
2115 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_PASS_OPEN_REQ6, stid));
2116 req->local_port = sport;
2117 req->peer_port = htons(0);
2118 req->local_ip_hi = *(__be64 *)(sip->s6_addr);
2119 req->local_ip_lo = *(__be64 *)(sip->s6_addr + 8);
2120 req->peer_ip_hi = cpu_to_be64(0);
2121 req->peer_ip_lo = cpu_to_be64(0);
2122 chan = netdev2pinfo(adap->sge.ingr_map[queue]->netdev)->tx_chan;
2123 req->opt0 = cpu_to_be64(TX_CHAN(chan));
2124 req->opt1 = cpu_to_be64(CONN_POLICY_ASK |
2125 SYN_RSS_ENABLE | SYN_RSS_QUEUE(queue));
2126 return t4_mgmt_tx(adap, skb);
2127}
2128EXPORT_SYMBOL(cxgb4_create_server6);
2129
2130/**
2131 * cxgb4_best_mtu - find the entry in the MTU table closest to an MTU
2132 * @mtus: the HW MTU table
2133 * @mtu: the target MTU
2134 * @idx: index of selected entry in the MTU table
2135 *
2136 * Returns the index and the value in the HW MTU table that is closest to
2137 * but does not exceed @mtu, unless @mtu is smaller than any value in the
2138 * table, in which case that smallest available value is selected.
2139 */
2140unsigned int cxgb4_best_mtu(const unsigned short *mtus, unsigned short mtu,
2141 unsigned int *idx)
2142{
2143 unsigned int i = 0;
2144
2145 while (i < NMTUS - 1 && mtus[i + 1] <= mtu)
2146 ++i;
2147 if (idx)
2148 *idx = i;
2149 return mtus[i];
2150}
2151EXPORT_SYMBOL(cxgb4_best_mtu);
2152
2153/**
2154 * cxgb4_port_chan - get the HW channel of a port
2155 * @dev: the net device for the port
2156 *
2157 * Return the HW Tx channel of the given port.
2158 */
2159unsigned int cxgb4_port_chan(const struct net_device *dev)
2160{
2161 return netdev2pinfo(dev)->tx_chan;
2162}
2163EXPORT_SYMBOL(cxgb4_port_chan);
2164
2165/**
2166 * cxgb4_port_viid - get the VI id of a port
2167 * @dev: the net device for the port
2168 *
2169 * Return the VI id of the given port.
2170 */
2171unsigned int cxgb4_port_viid(const struct net_device *dev)
2172{
2173 return netdev2pinfo(dev)->viid;
2174}
2175EXPORT_SYMBOL(cxgb4_port_viid);
2176
2177/**
2178 * cxgb4_port_idx - get the index of a port
2179 * @dev: the net device for the port
2180 *
2181 * Return the index of the given port.
2182 */
2183unsigned int cxgb4_port_idx(const struct net_device *dev)
2184{
2185 return netdev2pinfo(dev)->port_id;
2186}
2187EXPORT_SYMBOL(cxgb4_port_idx);
2188
2189/**
2190 * cxgb4_netdev_by_hwid - return the net device of a HW port
2191 * @pdev: identifies the adapter
2192 * @id: the HW port id
2193 *
2194 * Return the net device associated with the interface with the given HW
2195 * id.
2196 */
2197struct net_device *cxgb4_netdev_by_hwid(struct pci_dev *pdev, unsigned int id)
2198{
2199 const struct adapter *adap = pci_get_drvdata(pdev);
2200
2201 if (!adap || id >= NCHAN)
2202 return NULL;
2203 id = adap->chan_map[id];
2204 return id < MAX_NPORTS ? adap->port[id] : NULL;
2205}
2206EXPORT_SYMBOL(cxgb4_netdev_by_hwid);
2207
2208void cxgb4_get_tcp_stats(struct pci_dev *pdev, struct tp_tcp_stats *v4,
2209 struct tp_tcp_stats *v6)
2210{
2211 struct adapter *adap = pci_get_drvdata(pdev);
2212
2213 spin_lock(&adap->stats_lock);
2214 t4_tp_get_tcp_stats(adap, v4, v6);
2215 spin_unlock(&adap->stats_lock);
2216}
2217EXPORT_SYMBOL(cxgb4_get_tcp_stats);
2218
2219void cxgb4_iscsi_init(struct net_device *dev, unsigned int tag_mask,
2220 const unsigned int *pgsz_order)
2221{
2222 struct adapter *adap = netdev2adap(dev);
2223
2224 t4_write_reg(adap, ULP_RX_ISCSI_TAGMASK, tag_mask);
2225 t4_write_reg(adap, ULP_RX_ISCSI_PSZ, HPZ0(pgsz_order[0]) |
2226 HPZ1(pgsz_order[1]) | HPZ2(pgsz_order[2]) |
2227 HPZ3(pgsz_order[3]));
2228}
2229EXPORT_SYMBOL(cxgb4_iscsi_init);
2230
2231static struct pci_driver cxgb4_driver;
2232
2233static void check_neigh_update(struct neighbour *neigh)
2234{
2235 const struct device *parent;
2236 const struct net_device *netdev = neigh->dev;
2237
2238 if (netdev->priv_flags & IFF_802_1Q_VLAN)
2239 netdev = vlan_dev_real_dev(netdev);
2240 parent = netdev->dev.parent;
2241 if (parent && parent->driver == &cxgb4_driver.driver)
2242 t4_l2t_update(dev_get_drvdata(parent), neigh);
2243}
2244
2245static int netevent_cb(struct notifier_block *nb, unsigned long event,
2246 void *data)
2247{
2248 switch (event) {
2249 case NETEVENT_NEIGH_UPDATE:
2250 check_neigh_update(data);
2251 break;
2252 case NETEVENT_PMTU_UPDATE:
2253 case NETEVENT_REDIRECT:
2254 default:
2255 break;
2256 }
2257 return 0;
2258}
2259
2260static bool netevent_registered;
2261static struct notifier_block cxgb4_netevent_nb = {
2262 .notifier_call = netevent_cb
2263};
2264
2265static void uld_attach(struct adapter *adap, unsigned int uld)
2266{
2267 void *handle;
2268 struct cxgb4_lld_info lli;
2269
2270 lli.pdev = adap->pdev;
2271 lli.l2t = adap->l2t;
2272 lli.tids = &adap->tids;
2273 lli.ports = adap->port;
2274 lli.vr = &adap->vres;
2275 lli.mtus = adap->params.mtus;
2276 if (uld == CXGB4_ULD_RDMA) {
2277 lli.rxq_ids = adap->sge.rdma_rxq;
2278 lli.nrxq = adap->sge.rdmaqs;
2279 } else if (uld == CXGB4_ULD_ISCSI) {
2280 lli.rxq_ids = adap->sge.ofld_rxq;
2281 lli.nrxq = adap->sge.ofldqsets;
2282 }
2283 lli.ntxq = adap->sge.ofldqsets;
2284 lli.nchan = adap->params.nports;
2285 lli.nports = adap->params.nports;
2286 lli.wr_cred = adap->params.ofldq_wr_cred;
2287 lli.adapter_type = adap->params.rev;
2288 lli.iscsi_iolen = MAXRXDATA_GET(t4_read_reg(adap, TP_PARA_REG2));
2289 lli.udb_density = 1 << QUEUESPERPAGEPF0_GET(
2290 t4_read_reg(adap, SGE_EGRESS_QUEUES_PER_PAGE_PF));
2291 lli.ucq_density = 1 << QUEUESPERPAGEPF0_GET(
2292 t4_read_reg(adap, SGE_INGRESS_QUEUES_PER_PAGE_PF));
2293 lli.gts_reg = adap->regs + MYPF_REG(SGE_PF_GTS);
2294 lli.db_reg = adap->regs + MYPF_REG(SGE_PF_KDOORBELL);
2295 lli.fw_vers = adap->params.fw_vers;
2296
2297 handle = ulds[uld].add(&lli);
2298 if (IS_ERR(handle)) {
2299 dev_warn(adap->pdev_dev,
2300 "could not attach to the %s driver, error %ld\n",
2301 uld_str[uld], PTR_ERR(handle));
2302 return;
2303 }
2304
2305 adap->uld_handle[uld] = handle;
2306
2307 if (!netevent_registered) {
2308 register_netevent_notifier(&cxgb4_netevent_nb);
2309 netevent_registered = true;
2310 }
2311}
2312
2313static void attach_ulds(struct adapter *adap)
2314{
2315 unsigned int i;
2316
2317 mutex_lock(&uld_mutex);
2318 list_add_tail(&adap->list_node, &adapter_list);
2319 for (i = 0; i < CXGB4_ULD_MAX; i++)
2320 if (ulds[i].add)
2321 uld_attach(adap, i);
2322 mutex_unlock(&uld_mutex);
2323}
2324
2325static void detach_ulds(struct adapter *adap)
2326{
2327 unsigned int i;
2328
2329 mutex_lock(&uld_mutex);
2330 list_del(&adap->list_node);
2331 for (i = 0; i < CXGB4_ULD_MAX; i++)
2332 if (adap->uld_handle[i]) {
2333 ulds[i].state_change(adap->uld_handle[i],
2334 CXGB4_STATE_DETACH);
2335 adap->uld_handle[i] = NULL;
2336 }
2337 if (netevent_registered && list_empty(&adapter_list)) {
2338 unregister_netevent_notifier(&cxgb4_netevent_nb);
2339 netevent_registered = false;
2340 }
2341 mutex_unlock(&uld_mutex);
2342}
2343
2344static void notify_ulds(struct adapter *adap, enum cxgb4_state new_state)
2345{
2346 unsigned int i;
2347
2348 mutex_lock(&uld_mutex);
2349 for (i = 0; i < CXGB4_ULD_MAX; i++)
2350 if (adap->uld_handle[i])
2351 ulds[i].state_change(adap->uld_handle[i], new_state);
2352 mutex_unlock(&uld_mutex);
2353}
2354
2355/**
2356 * cxgb4_register_uld - register an upper-layer driver
2357 * @type: the ULD type
2358 * @p: the ULD methods
2359 *
2360 * Registers an upper-layer driver with this driver and notifies the ULD
2361 * about any presently available devices that support its type. Returns
2362 * %-EBUSY if a ULD of the same type is already registered.
2363 */
2364int cxgb4_register_uld(enum cxgb4_uld type, const struct cxgb4_uld_info *p)
2365{
2366 int ret = 0;
2367 struct adapter *adap;
2368
2369 if (type >= CXGB4_ULD_MAX)
2370 return -EINVAL;
2371 mutex_lock(&uld_mutex);
2372 if (ulds[type].add) {
2373 ret = -EBUSY;
2374 goto out;
2375 }
2376 ulds[type] = *p;
2377 list_for_each_entry(adap, &adapter_list, list_node)
2378 uld_attach(adap, type);
2379out: mutex_unlock(&uld_mutex);
2380 return ret;
2381}
2382EXPORT_SYMBOL(cxgb4_register_uld);
2383
2384/**
2385 * cxgb4_unregister_uld - unregister an upper-layer driver
2386 * @type: the ULD type
2387 *
2388 * Unregisters an existing upper-layer driver.
2389 */
2390int cxgb4_unregister_uld(enum cxgb4_uld type)
2391{
2392 struct adapter *adap;
2393
2394 if (type >= CXGB4_ULD_MAX)
2395 return -EINVAL;
2396 mutex_lock(&uld_mutex);
2397 list_for_each_entry(adap, &adapter_list, list_node)
2398 adap->uld_handle[type] = NULL;
2399 ulds[type].add = NULL;
2400 mutex_unlock(&uld_mutex);
2401 return 0;
2402}
2403EXPORT_SYMBOL(cxgb4_unregister_uld);
2404
2405/**
2406 * cxgb_up - enable the adapter
2407 * @adap: adapter being enabled
2408 *
2409 * Called when the first port is enabled, this function performs the
2410 * actions necessary to make an adapter operational, such as completing
2411 * the initialization of HW modules, and enabling interrupts.
2412 *
2413 * Must be called with the rtnl lock held.
2414 */
2415static int cxgb_up(struct adapter *adap)
2416{
2417 int err = 0;
2418
2419 if (!(adap->flags & FULL_INIT_DONE)) {
2420 err = setup_sge_queues(adap);
2421 if (err)
2422 goto out;
2423 err = setup_rss(adap);
2424 if (err) {
2425 t4_free_sge_resources(adap);
2426 goto out;
2427 }
2428 if (adap->flags & USING_MSIX)
2429 name_msix_vecs(adap);
2430 adap->flags |= FULL_INIT_DONE;
2431 }
2432
2433 if (adap->flags & USING_MSIX) {
2434 err = request_irq(adap->msix_info[0].vec, t4_nondata_intr, 0,
2435 adap->msix_info[0].desc, adap);
2436 if (err)
2437 goto irq_err;
2438
2439 err = request_msix_queue_irqs(adap);
2440 if (err) {
2441 free_irq(adap->msix_info[0].vec, adap);
2442 goto irq_err;
2443 }
2444 } else {
2445 err = request_irq(adap->pdev->irq, t4_intr_handler(adap),
2446 (adap->flags & USING_MSI) ? 0 : IRQF_SHARED,
2447 adap->name, adap);
2448 if (err)
2449 goto irq_err;
2450 }
2451 enable_rx(adap);
2452 t4_sge_start(adap);
2453 t4_intr_enable(adap);
2454 notify_ulds(adap, CXGB4_STATE_UP);
2455 out:
2456 return err;
2457 irq_err:
2458 dev_err(adap->pdev_dev, "request_irq failed, err %d\n", err);
2459 goto out;
2460}
2461
2462static void cxgb_down(struct adapter *adapter)
2463{
2464 t4_intr_disable(adapter);
2465 cancel_work_sync(&adapter->tid_release_task);
2466 adapter->tid_release_task_busy = false;
2467
2468 if (adapter->flags & USING_MSIX) {
2469 free_msix_queue_irqs(adapter);
2470 free_irq(adapter->msix_info[0].vec, adapter);
2471 } else
2472 free_irq(adapter->pdev->irq, adapter);
2473 quiesce_rx(adapter);
2474}
2475
2476/*
2477 * net_device operations
2478 */
2479static int cxgb_open(struct net_device *dev)
2480{
2481 int err;
2482 struct port_info *pi = netdev_priv(dev);
2483 struct adapter *adapter = pi->adapter;
2484
2485 if (!adapter->open_device_map && (err = cxgb_up(adapter)) < 0)
2486 return err;
2487
2488 dev->real_num_tx_queues = pi->nqsets;
2489 set_bit(pi->tx_chan, &adapter->open_device_map);
2490 link_start(dev);
2491 netif_tx_start_all_queues(dev);
2492 return 0;
2493}
2494
2495static int cxgb_close(struct net_device *dev)
2496{
2497 int ret;
2498 struct port_info *pi = netdev_priv(dev);
2499 struct adapter *adapter = pi->adapter;
2500
2501 netif_tx_stop_all_queues(dev);
2502 netif_carrier_off(dev);
2503 ret = t4_enable_vi(adapter, 0, pi->viid, false, false);
2504
2505 clear_bit(pi->tx_chan, &adapter->open_device_map);
2506
2507 if (!adapter->open_device_map)
2508 cxgb_down(adapter);
2509 return 0;
2510}
2511
2512static struct net_device_stats *cxgb_get_stats(struct net_device *dev)
2513{
2514 struct port_stats stats;
2515 struct port_info *p = netdev_priv(dev);
2516 struct adapter *adapter = p->adapter;
2517 struct net_device_stats *ns = &dev->stats;
2518
2519 spin_lock(&adapter->stats_lock);
2520 t4_get_port_stats(adapter, p->tx_chan, &stats);
2521 spin_unlock(&adapter->stats_lock);
2522
2523 ns->tx_bytes = stats.tx_octets;
2524 ns->tx_packets = stats.tx_frames;
2525 ns->rx_bytes = stats.rx_octets;
2526 ns->rx_packets = stats.rx_frames;
2527 ns->multicast = stats.rx_mcast_frames;
2528
2529 /* detailed rx_errors */
2530 ns->rx_length_errors = stats.rx_jabber + stats.rx_too_long +
2531 stats.rx_runt;
2532 ns->rx_over_errors = 0;
2533 ns->rx_crc_errors = stats.rx_fcs_err;
2534 ns->rx_frame_errors = stats.rx_symbol_err;
2535 ns->rx_fifo_errors = stats.rx_ovflow0 + stats.rx_ovflow1 +
2536 stats.rx_ovflow2 + stats.rx_ovflow3 +
2537 stats.rx_trunc0 + stats.rx_trunc1 +
2538 stats.rx_trunc2 + stats.rx_trunc3;
2539 ns->rx_missed_errors = 0;
2540
2541 /* detailed tx_errors */
2542 ns->tx_aborted_errors = 0;
2543 ns->tx_carrier_errors = 0;
2544 ns->tx_fifo_errors = 0;
2545 ns->tx_heartbeat_errors = 0;
2546 ns->tx_window_errors = 0;
2547
2548 ns->tx_errors = stats.tx_error_frames;
2549 ns->rx_errors = stats.rx_symbol_err + stats.rx_fcs_err +
2550 ns->rx_length_errors + stats.rx_len_err + ns->rx_fifo_errors;
2551 return ns;
2552}
2553
2554static int cxgb_ioctl(struct net_device *dev, struct ifreq *req, int cmd)
2555{
2556 int ret = 0, prtad, devad;
2557 struct port_info *pi = netdev_priv(dev);
2558 struct mii_ioctl_data *data = (struct mii_ioctl_data *)&req->ifr_data;
2559
2560 switch (cmd) {
2561 case SIOCGMIIPHY:
2562 if (pi->mdio_addr < 0)
2563 return -EOPNOTSUPP;
2564 data->phy_id = pi->mdio_addr;
2565 break;
2566 case SIOCGMIIREG:
2567 case SIOCSMIIREG:
2568 if (mdio_phy_id_is_c45(data->phy_id)) {
2569 prtad = mdio_phy_id_prtad(data->phy_id);
2570 devad = mdio_phy_id_devad(data->phy_id);
2571 } else if (data->phy_id < 32) {
2572 prtad = data->phy_id;
2573 devad = 0;
2574 data->reg_num &= 0x1f;
2575 } else
2576 return -EINVAL;
2577
2578 if (cmd == SIOCGMIIREG)
2579 ret = t4_mdio_rd(pi->adapter, 0, prtad, devad,
2580 data->reg_num, &data->val_out);
2581 else
2582 ret = t4_mdio_wr(pi->adapter, 0, prtad, devad,
2583 data->reg_num, data->val_in);
2584 break;
2585 default:
2586 return -EOPNOTSUPP;
2587 }
2588 return ret;
2589}
2590
2591static void cxgb_set_rxmode(struct net_device *dev)
2592{
2593 /* unfortunately we can't return errors to the stack */
2594 set_rxmode(dev, -1, false);
2595}
2596
2597static int cxgb_change_mtu(struct net_device *dev, int new_mtu)
2598{
2599 int ret;
2600 struct port_info *pi = netdev_priv(dev);
2601
2602 if (new_mtu < 81 || new_mtu > MAX_MTU) /* accommodate SACK */
2603 return -EINVAL;
2604 ret = t4_set_rxmode(pi->adapter, 0, pi->viid, new_mtu, -1, -1, -1,
2605 true);
2606 if (!ret)
2607 dev->mtu = new_mtu;
2608 return ret;
2609}
2610
2611static int cxgb_set_mac_addr(struct net_device *dev, void *p)
2612{
2613 int ret;
2614 struct sockaddr *addr = p;
2615 struct port_info *pi = netdev_priv(dev);
2616
2617 if (!is_valid_ether_addr(addr->sa_data))
2618 return -EINVAL;
2619
2620 ret = t4_change_mac(pi->adapter, 0, pi->viid, pi->xact_addr_filt,
2621 addr->sa_data, true, true);
2622 if (ret < 0)
2623 return ret;
2624
2625 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
2626 pi->xact_addr_filt = ret;
2627 return 0;
2628}
2629
2630static void vlan_rx_register(struct net_device *dev, struct vlan_group *grp)
2631{
2632 struct port_info *pi = netdev_priv(dev);
2633
2634 pi->vlan_grp = grp;
2635 t4_set_vlan_accel(pi->adapter, 1 << pi->tx_chan, grp != NULL);
2636}
2637
2638#ifdef CONFIG_NET_POLL_CONTROLLER
2639static void cxgb_netpoll(struct net_device *dev)
2640{
2641 struct port_info *pi = netdev_priv(dev);
2642 struct adapter *adap = pi->adapter;
2643
2644 if (adap->flags & USING_MSIX) {
2645 int i;
2646 struct sge_eth_rxq *rx = &adap->sge.ethrxq[pi->first_qset];
2647
2648 for (i = pi->nqsets; i; i--, rx++)
2649 t4_sge_intr_msix(0, &rx->rspq);
2650 } else
2651 t4_intr_handler(adap)(0, adap);
2652}
2653#endif
2654
2655static const struct net_device_ops cxgb4_netdev_ops = {
2656 .ndo_open = cxgb_open,
2657 .ndo_stop = cxgb_close,
2658 .ndo_start_xmit = t4_eth_xmit,
2659 .ndo_get_stats = cxgb_get_stats,
2660 .ndo_set_rx_mode = cxgb_set_rxmode,
2661 .ndo_set_mac_address = cxgb_set_mac_addr,
2662 .ndo_validate_addr = eth_validate_addr,
2663 .ndo_do_ioctl = cxgb_ioctl,
2664 .ndo_change_mtu = cxgb_change_mtu,
2665 .ndo_vlan_rx_register = vlan_rx_register,
2666#ifdef CONFIG_NET_POLL_CONTROLLER
2667 .ndo_poll_controller = cxgb_netpoll,
2668#endif
2669};
2670
2671void t4_fatal_err(struct adapter *adap)
2672{
2673 t4_set_reg_field(adap, SGE_CONTROL, GLOBALENABLE, 0);
2674 t4_intr_disable(adap);
2675 dev_alert(adap->pdev_dev, "encountered fatal error, adapter stopped\n");
2676}
2677
2678static void setup_memwin(struct adapter *adap)
2679{
2680 u32 bar0;
2681
2682 bar0 = pci_resource_start(adap->pdev, 0); /* truncation intentional */
2683 t4_write_reg(adap, PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN, 0),
2684 (bar0 + MEMWIN0_BASE) | BIR(0) |
2685 WINDOW(ilog2(MEMWIN0_APERTURE) - 10));
2686 t4_write_reg(adap, PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN, 1),
2687 (bar0 + MEMWIN1_BASE) | BIR(0) |
2688 WINDOW(ilog2(MEMWIN1_APERTURE) - 10));
2689 t4_write_reg(adap, PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN, 2),
2690 (bar0 + MEMWIN2_BASE) | BIR(0) |
2691 WINDOW(ilog2(MEMWIN2_APERTURE) - 10));
2692}
2693
2694/*
2695 * Max # of ATIDs. The absolute HW max is 16K but we keep it lower.
2696 */
2697#define MAX_ATIDS 8192U
2698
2699/*
2700 * Phase 0 of initialization: contact FW, obtain config, perform basic init.
2701 */
2702static int adap_init0(struct adapter *adap)
2703{
2704 int ret;
2705 u32 v, port_vec;
2706 enum dev_state state;
2707 u32 params[7], val[7];
2708 struct fw_caps_config_cmd c;
2709
2710 ret = t4_check_fw_version(adap);
2711 if (ret == -EINVAL || ret > 0) {
2712 if (upgrade_fw(adap) >= 0) /* recache FW version */
2713 ret = t4_check_fw_version(adap);
2714 }
2715 if (ret < 0)
2716 return ret;
2717
2718 /* contact FW, request master */
2719 ret = t4_fw_hello(adap, 0, 0, MASTER_MUST, &state);
2720 if (ret < 0) {
2721 dev_err(adap->pdev_dev, "could not connect to FW, error %d\n",
2722 ret);
2723 return ret;
2724 }
2725
2726 /* reset device */
2727 ret = t4_fw_reset(adap, 0, PIORSTMODE | PIORST);
2728 if (ret < 0)
2729 goto bye;
2730
2731 /* get device capabilities */
2732 memset(&c, 0, sizeof(c));
2733 c.op_to_write = htonl(FW_CMD_OP(FW_CAPS_CONFIG_CMD) |
2734 FW_CMD_REQUEST | FW_CMD_READ);
2735 c.retval_len16 = htonl(FW_LEN16(c));
2736 ret = t4_wr_mbox(adap, 0, &c, sizeof(c), &c);
2737 if (ret < 0)
2738 goto bye;
2739
2740 /* select capabilities we'll be using */
2741 if (c.niccaps & htons(FW_CAPS_CONFIG_NIC_VM)) {
2742 if (!vf_acls)
2743 c.niccaps ^= htons(FW_CAPS_CONFIG_NIC_VM);
2744 else
2745 c.niccaps = htons(FW_CAPS_CONFIG_NIC_VM);
2746 } else if (vf_acls) {
2747 dev_err(adap->pdev_dev, "virtualization ACLs not supported");
2748 goto bye;
2749 }
2750 c.op_to_write = htonl(FW_CMD_OP(FW_CAPS_CONFIG_CMD) |
2751 FW_CMD_REQUEST | FW_CMD_WRITE);
2752 ret = t4_wr_mbox(adap, 0, &c, sizeof(c), NULL);
2753 if (ret < 0)
2754 goto bye;
2755
2756 ret = t4_config_glbl_rss(adap, 0,
2757 FW_RSS_GLB_CONFIG_CMD_MODE_BASICVIRTUAL,
2758 FW_RSS_GLB_CONFIG_CMD_TNLMAPEN |
2759 FW_RSS_GLB_CONFIG_CMD_TNLALLLKP);
2760 if (ret < 0)
2761 goto bye;
2762
2763 ret = t4_cfg_pfvf(adap, 0, 0, 0, 64, 64, 64, 0, 0, 4, 0xf, 0xf, 16,
2764 FW_CMD_CAP_PF, FW_CMD_CAP_PF);
2765 if (ret < 0)
2766 goto bye;
2767
2768 for (v = 0; v < SGE_NTIMERS - 1; v++)
2769 adap->sge.timer_val[v] = min(intr_holdoff[v], MAX_SGE_TIMERVAL);
2770 adap->sge.timer_val[SGE_NTIMERS - 1] = MAX_SGE_TIMERVAL;
2771 adap->sge.counter_val[0] = 1;
2772 for (v = 1; v < SGE_NCOUNTERS; v++)
2773 adap->sge.counter_val[v] = min(intr_cnt[v - 1],
2774 THRESHOLD_3_MASK);
2775 t4_sge_init(adap);
2776
2777 /* get basic stuff going */
2778 ret = t4_early_init(adap, 0);
2779 if (ret < 0)
2780 goto bye;
2781
2782#define FW_PARAM_DEV(param) \
2783 (FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) | \
2784 FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_##param))
2785
2786#define FW_PARAM_PFVF(param) \
2787 (FW_PARAMS_MNEM(FW_PARAMS_MNEM_PFVF) | \
2788 FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_PFVF_##param))
2789
2790 params[0] = FW_PARAM_DEV(PORTVEC);
2791 params[1] = FW_PARAM_PFVF(L2T_START);
2792 params[2] = FW_PARAM_PFVF(L2T_END);
2793 params[3] = FW_PARAM_PFVF(FILTER_START);
2794 params[4] = FW_PARAM_PFVF(FILTER_END);
2795 ret = t4_query_params(adap, 0, 0, 0, 5, params, val);
2796 if (ret < 0)
2797 goto bye;
2798 port_vec = val[0];
2799 adap->tids.ftid_base = val[3];
2800 adap->tids.nftids = val[4] - val[3] + 1;
2801
2802 if (c.ofldcaps) {
2803 /* query offload-related parameters */
2804 params[0] = FW_PARAM_DEV(NTID);
2805 params[1] = FW_PARAM_PFVF(SERVER_START);
2806 params[2] = FW_PARAM_PFVF(SERVER_END);
2807 params[3] = FW_PARAM_PFVF(TDDP_START);
2808 params[4] = FW_PARAM_PFVF(TDDP_END);
2809 params[5] = FW_PARAM_DEV(FLOWC_BUFFIFO_SZ);
2810 ret = t4_query_params(adap, 0, 0, 0, 6, params, val);
2811 if (ret < 0)
2812 goto bye;
2813 adap->tids.ntids = val[0];
2814 adap->tids.natids = min(adap->tids.ntids / 2, MAX_ATIDS);
2815 adap->tids.stid_base = val[1];
2816 adap->tids.nstids = val[2] - val[1] + 1;
2817 adap->vres.ddp.start = val[3];
2818 adap->vres.ddp.size = val[4] - val[3] + 1;
2819 adap->params.ofldq_wr_cred = val[5];
2820 adap->params.offload = 1;
2821 }
2822 if (c.rdmacaps) {
2823 params[0] = FW_PARAM_PFVF(STAG_START);
2824 params[1] = FW_PARAM_PFVF(STAG_END);
2825 params[2] = FW_PARAM_PFVF(RQ_START);
2826 params[3] = FW_PARAM_PFVF(RQ_END);
2827 params[4] = FW_PARAM_PFVF(PBL_START);
2828 params[5] = FW_PARAM_PFVF(PBL_END);
2829 ret = t4_query_params(adap, 0, 0, 0, 6, params, val);
2830 if (ret < 0)
2831 goto bye;
2832 adap->vres.stag.start = val[0];
2833 adap->vres.stag.size = val[1] - val[0] + 1;
2834 adap->vres.rq.start = val[2];
2835 adap->vres.rq.size = val[3] - val[2] + 1;
2836 adap->vres.pbl.start = val[4];
2837 adap->vres.pbl.size = val[5] - val[4] + 1;
2838 }
2839 if (c.iscsicaps) {
2840 params[0] = FW_PARAM_PFVF(ISCSI_START);
2841 params[1] = FW_PARAM_PFVF(ISCSI_END);
2842 ret = t4_query_params(adap, 0, 0, 0, 2, params, val);
2843 if (ret < 0)
2844 goto bye;
2845 adap->vres.iscsi.start = val[0];
2846 adap->vres.iscsi.size = val[1] - val[0] + 1;
2847 }
2848#undef FW_PARAM_PFVF
2849#undef FW_PARAM_DEV
2850
2851 adap->params.nports = hweight32(port_vec);
2852 adap->params.portvec = port_vec;
2853 adap->flags |= FW_OK;
2854
2855 /* These are finalized by FW initialization, load their values now */
2856 v = t4_read_reg(adap, TP_TIMER_RESOLUTION);
2857 adap->params.tp.tre = TIMERRESOLUTION_GET(v);
2858 t4_read_mtu_tbl(adap, adap->params.mtus, NULL);
2859 t4_load_mtus(adap, adap->params.mtus, adap->params.a_wnd,
2860 adap->params.b_wnd);
2861
2862 /* tweak some settings */
2863 t4_write_reg(adap, TP_SHIFT_CNT, 0x64f8849);
2864 t4_write_reg(adap, ULP_RX_TDDP_PSZ, HPZ0(PAGE_SHIFT - 12));
2865 t4_write_reg(adap, TP_PIO_ADDR, TP_INGRESS_CONFIG);
2866 v = t4_read_reg(adap, TP_PIO_DATA);
2867 t4_write_reg(adap, TP_PIO_DATA, v & ~CSUM_HAS_PSEUDO_HDR);
2868 setup_memwin(adap);
2869 return 0;
2870
2871 /*
2872 * If a command timed out or failed with EIO FW does not operate within
2873 * its spec or something catastrophic happened to HW/FW, stop issuing
2874 * commands.
2875 */
2876bye: if (ret != -ETIMEDOUT && ret != -EIO)
2877 t4_fw_bye(adap, 0);
2878 return ret;
2879}
2880
2881static inline bool is_10g_port(const struct link_config *lc)
2882{
2883 return (lc->supported & FW_PORT_CAP_SPEED_10G) != 0;
2884}
2885
2886static inline void init_rspq(struct sge_rspq *q, u8 timer_idx, u8 pkt_cnt_idx,
2887 unsigned int size, unsigned int iqe_size)
2888{
2889 q->intr_params = QINTR_TIMER_IDX(timer_idx) |
2890 (pkt_cnt_idx < SGE_NCOUNTERS ? QINTR_CNT_EN : 0);
2891 q->pktcnt_idx = pkt_cnt_idx < SGE_NCOUNTERS ? pkt_cnt_idx : 0;
2892 q->iqe_len = iqe_size;
2893 q->size = size;
2894}
2895
2896/*
2897 * Perform default configuration of DMA queues depending on the number and type
2898 * of ports we found and the number of available CPUs. Most settings can be
2899 * modified by the admin prior to actual use.
2900 */
2901static void __devinit cfg_queues(struct adapter *adap)
2902{
2903 struct sge *s = &adap->sge;
2904 int i, q10g = 0, n10g = 0, qidx = 0;
2905
2906 for_each_port(adap, i)
2907 n10g += is_10g_port(&adap2pinfo(adap, i)->link_cfg);
2908
2909 /*
2910 * We default to 1 queue per non-10G port and up to # of cores queues
2911 * per 10G port.
2912 */
2913 if (n10g)
2914 q10g = (MAX_ETH_QSETS - (adap->params.nports - n10g)) / n10g;
2915 if (q10g > num_online_cpus())
2916 q10g = num_online_cpus();
2917
2918 for_each_port(adap, i) {
2919 struct port_info *pi = adap2pinfo(adap, i);
2920
2921 pi->first_qset = qidx;
2922 pi->nqsets = is_10g_port(&pi->link_cfg) ? q10g : 1;
2923 qidx += pi->nqsets;
2924 }
2925
2926 s->ethqsets = qidx;
2927 s->max_ethqsets = qidx; /* MSI-X may lower it later */
2928
2929 if (is_offload(adap)) {
2930 /*
2931 * For offload we use 1 queue/channel if all ports are up to 1G,
2932 * otherwise we divide all available queues amongst the channels
2933 * capped by the number of available cores.
2934 */
2935 if (n10g) {
2936 i = min_t(int, ARRAY_SIZE(s->ofldrxq),
2937 num_online_cpus());
2938 s->ofldqsets = roundup(i, adap->params.nports);
2939 } else
2940 s->ofldqsets = adap->params.nports;
2941 /* For RDMA one Rx queue per channel suffices */
2942 s->rdmaqs = adap->params.nports;
2943 }
2944
2945 for (i = 0; i < ARRAY_SIZE(s->ethrxq); i++) {
2946 struct sge_eth_rxq *r = &s->ethrxq[i];
2947
2948 init_rspq(&r->rspq, 0, 0, 1024, 64);
2949 r->fl.size = 72;
2950 }
2951
2952 for (i = 0; i < ARRAY_SIZE(s->ethtxq); i++)
2953 s->ethtxq[i].q.size = 1024;
2954
2955 for (i = 0; i < ARRAY_SIZE(s->ctrlq); i++)
2956 s->ctrlq[i].q.size = 512;
2957
2958 for (i = 0; i < ARRAY_SIZE(s->ofldtxq); i++)
2959 s->ofldtxq[i].q.size = 1024;
2960
2961 for (i = 0; i < ARRAY_SIZE(s->ofldrxq); i++) {
2962 struct sge_ofld_rxq *r = &s->ofldrxq[i];
2963
2964 init_rspq(&r->rspq, 0, 0, 1024, 64);
2965 r->rspq.uld = CXGB4_ULD_ISCSI;
2966 r->fl.size = 72;
2967 }
2968
2969 for (i = 0; i < ARRAY_SIZE(s->rdmarxq); i++) {
2970 struct sge_ofld_rxq *r = &s->rdmarxq[i];
2971
2972 init_rspq(&r->rspq, 0, 0, 511, 64);
2973 r->rspq.uld = CXGB4_ULD_RDMA;
2974 r->fl.size = 72;
2975 }
2976
2977 init_rspq(&s->fw_evtq, 6, 0, 512, 64);
2978 init_rspq(&s->intrq, 6, 0, 2 * MAX_INGQ, 64);
2979}
2980
2981/*
2982 * Reduce the number of Ethernet queues across all ports to at most n.
2983 * n provides at least one queue per port.
2984 */
2985static void __devinit reduce_ethqs(struct adapter *adap, int n)
2986{
2987 int i;
2988 struct port_info *pi;
2989
2990 while (n < adap->sge.ethqsets)
2991 for_each_port(adap, i) {
2992 pi = adap2pinfo(adap, i);
2993 if (pi->nqsets > 1) {
2994 pi->nqsets--;
2995 adap->sge.ethqsets--;
2996 if (adap->sge.ethqsets <= n)
2997 break;
2998 }
2999 }
3000
3001 n = 0;
3002 for_each_port(adap, i) {
3003 pi = adap2pinfo(adap, i);
3004 pi->first_qset = n;
3005 n += pi->nqsets;
3006 }
3007}
3008
3009/* 2 MSI-X vectors needed for the FW queue and non-data interrupts */
3010#define EXTRA_VECS 2
3011
3012static int __devinit enable_msix(struct adapter *adap)
3013{
3014 int ofld_need = 0;
3015 int i, err, want, need;
3016 struct sge *s = &adap->sge;
3017 unsigned int nchan = adap->params.nports;
3018 struct msix_entry entries[MAX_INGQ + 1];
3019
3020 for (i = 0; i < ARRAY_SIZE(entries); ++i)
3021 entries[i].entry = i;
3022
3023 want = s->max_ethqsets + EXTRA_VECS;
3024 if (is_offload(adap)) {
3025 want += s->rdmaqs + s->ofldqsets;
3026 /* need nchan for each possible ULD */
3027 ofld_need = 2 * nchan;
3028 }
3029 need = adap->params.nports + EXTRA_VECS + ofld_need;
3030
3031 while ((err = pci_enable_msix(adap->pdev, entries, want)) >= need)
3032 want = err;
3033
3034 if (!err) {
3035 /*
3036 * Distribute available vectors to the various queue groups.
3037 * Every group gets its minimum requirement and NIC gets top
3038 * priority for leftovers.
3039 */
3040 i = want - EXTRA_VECS - ofld_need;
3041 if (i < s->max_ethqsets) {
3042 s->max_ethqsets = i;
3043 if (i < s->ethqsets)
3044 reduce_ethqs(adap, i);
3045 }
3046 if (is_offload(adap)) {
3047 i = want - EXTRA_VECS - s->max_ethqsets;
3048 i -= ofld_need - nchan;
3049 s->ofldqsets = (i / nchan) * nchan; /* round down */
3050 }
3051 for (i = 0; i < want; ++i)
3052 adap->msix_info[i].vec = entries[i].vector;
3053 } else if (err > 0)
3054 dev_info(adap->pdev_dev,
3055 "only %d MSI-X vectors left, not using MSI-X\n", err);
3056 return err;
3057}
3058
3059#undef EXTRA_VECS
3060
3061static void __devinit print_port_info(struct adapter *adap)
3062{
3063 static const char *base[] = {
3064 "R", "KX4", "T", "KX", "T", "KR", "CX4"
3065 };
3066
3067 int i;
3068 char buf[80];
3069
3070 for_each_port(adap, i) {
3071 struct net_device *dev = adap->port[i];
3072 const struct port_info *pi = netdev_priv(dev);
3073 char *bufp = buf;
3074
3075 if (!test_bit(i, &adap->registered_device_map))
3076 continue;
3077
3078 if (pi->link_cfg.supported & FW_PORT_CAP_SPEED_100M)
3079 bufp += sprintf(bufp, "100/");
3080 if (pi->link_cfg.supported & FW_PORT_CAP_SPEED_1G)
3081 bufp += sprintf(bufp, "1000/");
3082 if (pi->link_cfg.supported & FW_PORT_CAP_SPEED_10G)
3083 bufp += sprintf(bufp, "10G/");
3084 if (bufp != buf)
3085 --bufp;
3086 sprintf(bufp, "BASE-%s", base[pi->port_type]);
3087
3088 netdev_info(dev, "Chelsio %s rev %d %s %sNIC PCIe x%d%s\n",
3089 adap->params.vpd.id, adap->params.rev,
3090 buf, is_offload(adap) ? "R" : "",
3091 adap->params.pci.width,
3092 (adap->flags & USING_MSIX) ? " MSI-X" :
3093 (adap->flags & USING_MSI) ? " MSI" : "");
3094 if (adap->name == dev->name)
3095 netdev_info(dev, "S/N: %s, E/C: %s\n",
3096 adap->params.vpd.sn, adap->params.vpd.ec);
3097 }
3098}
3099
3100#define VLAN_FEAT (NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO | NETIF_F_TSO6 |\
3101 NETIF_F_IPV6_CSUM | NETIF_F_HIGHDMA)
3102
3103static int __devinit init_one(struct pci_dev *pdev,
3104 const struct pci_device_id *ent)
3105{
3106 int func, i, err;
3107 struct port_info *pi;
3108 unsigned int highdma = 0;
3109 struct adapter *adapter = NULL;
3110
3111 printk_once(KERN_INFO "%s - version %s\n", DRV_DESC, DRV_VERSION);
3112
3113 err = pci_request_regions(pdev, KBUILD_MODNAME);
3114 if (err) {
3115 /* Just info, some other driver may have claimed the device. */
3116 dev_info(&pdev->dev, "cannot obtain PCI resources\n");
3117 return err;
3118 }
3119
3120 /* We control everything through PF 0 */
3121 func = PCI_FUNC(pdev->devfn);
3122 if (func > 0)
3123 goto sriov;
3124
3125 err = pci_enable_device(pdev);
3126 if (err) {
3127 dev_err(&pdev->dev, "cannot enable PCI device\n");
3128 goto out_release_regions;
3129 }
3130
3131 if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) {
3132 highdma = NETIF_F_HIGHDMA;
3133 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
3134 if (err) {
3135 dev_err(&pdev->dev, "unable to obtain 64-bit DMA for "
3136 "coherent allocations\n");
3137 goto out_disable_device;
3138 }
3139 } else {
3140 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
3141 if (err) {
3142 dev_err(&pdev->dev, "no usable DMA configuration\n");
3143 goto out_disable_device;
3144 }
3145 }
3146
3147 pci_enable_pcie_error_reporting(pdev);
3148 pci_set_master(pdev);
3149 pci_save_state(pdev);
3150
3151 adapter = kzalloc(sizeof(*adapter), GFP_KERNEL);
3152 if (!adapter) {
3153 err = -ENOMEM;
3154 goto out_disable_device;
3155 }
3156
3157 adapter->regs = pci_ioremap_bar(pdev, 0);
3158 if (!adapter->regs) {
3159 dev_err(&pdev->dev, "cannot map device registers\n");
3160 err = -ENOMEM;
3161 goto out_free_adapter;
3162 }
3163
3164 adapter->pdev = pdev;
3165 adapter->pdev_dev = &pdev->dev;
3166 adapter->name = pci_name(pdev);
3167 adapter->msg_enable = dflt_msg_enable;
3168 memset(adapter->chan_map, 0xff, sizeof(adapter->chan_map));
3169
3170 spin_lock_init(&adapter->stats_lock);
3171 spin_lock_init(&adapter->tid_release_lock);
3172
3173 INIT_WORK(&adapter->tid_release_task, process_tid_release_list);
3174
3175 err = t4_prep_adapter(adapter);
3176 if (err)
3177 goto out_unmap_bar;
3178 err = adap_init0(adapter);
3179 if (err)
3180 goto out_unmap_bar;
3181
3182 for_each_port(adapter, i) {
3183 struct net_device *netdev;
3184
3185 netdev = alloc_etherdev_mq(sizeof(struct port_info),
3186 MAX_ETH_QSETS);
3187 if (!netdev) {
3188 err = -ENOMEM;
3189 goto out_free_dev;
3190 }
3191
3192 SET_NETDEV_DEV(netdev, &pdev->dev);
3193
3194 adapter->port[i] = netdev;
3195 pi = netdev_priv(netdev);
3196 pi->adapter = adapter;
3197 pi->xact_addr_filt = -1;
3198 pi->rx_offload = RX_CSO;
3199 pi->port_id = i;
3200 netif_carrier_off(netdev);
3201 netif_tx_stop_all_queues(netdev);
3202 netdev->irq = pdev->irq;
3203
3204 netdev->features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6;
3205 netdev->features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
3206 netdev->features |= NETIF_F_GRO | highdma;
3207 netdev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
3208 netdev->vlan_features = netdev->features & VLAN_FEAT;
3209
3210 netdev->netdev_ops = &cxgb4_netdev_ops;
3211 SET_ETHTOOL_OPS(netdev, &cxgb_ethtool_ops);
3212 }
3213
3214 pci_set_drvdata(pdev, adapter);
3215
3216 if (adapter->flags & FW_OK) {
3217 err = t4_port_init(adapter, 0, 0, 0);
3218 if (err)
3219 goto out_free_dev;
3220 }
3221
3222 /*
3223 * Configure queues and allocate tables now, they can be needed as
3224 * soon as the first register_netdev completes.
3225 */
3226 cfg_queues(adapter);
3227
3228 adapter->l2t = t4_init_l2t();
3229 if (!adapter->l2t) {
3230 /* We tolerate a lack of L2T, giving up some functionality */
3231 dev_warn(&pdev->dev, "could not allocate L2T, continuing\n");
3232 adapter->params.offload = 0;
3233 }
3234
3235 if (is_offload(adapter) && tid_init(&adapter->tids) < 0) {
3236 dev_warn(&pdev->dev, "could not allocate TID table, "
3237 "continuing\n");
3238 adapter->params.offload = 0;
3239 }
3240
3241 /*
3242 * The card is now ready to go. If any errors occur during device
3243 * registration we do not fail the whole card but rather proceed only
3244 * with the ports we manage to register successfully. However we must
3245 * register at least one net device.
3246 */
3247 for_each_port(adapter, i) {
3248 err = register_netdev(adapter->port[i]);
3249 if (err)
3250 dev_warn(&pdev->dev,
3251 "cannot register net device %s, skipping\n",
3252 adapter->port[i]->name);
3253 else {
3254 /*
3255 * Change the name we use for messages to the name of
3256 * the first successfully registered interface.
3257 */
3258 if (!adapter->registered_device_map)
3259 adapter->name = adapter->port[i]->name;
3260
3261 __set_bit(i, &adapter->registered_device_map);
3262 adapter->chan_map[adap2pinfo(adapter, i)->tx_chan] = i;
3263 }
3264 }
3265 if (!adapter->registered_device_map) {
3266 dev_err(&pdev->dev, "could not register any net devices\n");
3267 goto out_free_dev;
3268 }
3269
3270 if (cxgb4_debugfs_root) {
3271 adapter->debugfs_root = debugfs_create_dir(pci_name(pdev),
3272 cxgb4_debugfs_root);
3273 setup_debugfs(adapter);
3274 }
3275
3276 /* See what interrupts we'll be using */
3277 if (msi > 1 && enable_msix(adapter) == 0)
3278 adapter->flags |= USING_MSIX;
3279 else if (msi > 0 && pci_enable_msi(pdev) == 0)
3280 adapter->flags |= USING_MSI;
3281
3282 if (is_offload(adapter))
3283 attach_ulds(adapter);
3284
3285 print_port_info(adapter);
3286
3287sriov:
3288#ifdef CONFIG_PCI_IOV
3289 if (func < ARRAY_SIZE(num_vf) && num_vf[func] > 0)
3290 if (pci_enable_sriov(pdev, num_vf[func]) == 0)
3291 dev_info(&pdev->dev,
3292 "instantiated %u virtual functions\n",
3293 num_vf[func]);
3294#endif
3295 return 0;
3296
3297 out_free_dev:
3298 t4_free_mem(adapter->tids.tid_tab);
3299 t4_free_mem(adapter->l2t);
3300 for_each_port(adapter, i)
3301 if (adapter->port[i])
3302 free_netdev(adapter->port[i]);
3303 if (adapter->flags & FW_OK)
3304 t4_fw_bye(adapter, 0);
3305 out_unmap_bar:
3306 iounmap(adapter->regs);
3307 out_free_adapter:
3308 kfree(adapter);
3309 out_disable_device:
3310 pci_disable_pcie_error_reporting(pdev);
3311 pci_disable_device(pdev);
3312 out_release_regions:
3313 pci_release_regions(pdev);
3314 pci_set_drvdata(pdev, NULL);
3315 return err;
3316}
3317
3318static void __devexit remove_one(struct pci_dev *pdev)
3319{
3320 struct adapter *adapter = pci_get_drvdata(pdev);
3321
3322 pci_disable_sriov(pdev);
3323
3324 if (adapter) {
3325 int i;
3326
3327 if (is_offload(adapter))
3328 detach_ulds(adapter);
3329
3330 for_each_port(adapter, i)
3331 if (test_bit(i, &adapter->registered_device_map))
3332 unregister_netdev(adapter->port[i]);
3333
3334 if (adapter->debugfs_root)
3335 debugfs_remove_recursive(adapter->debugfs_root);
3336
3337 t4_sge_stop(adapter);
3338 t4_free_sge_resources(adapter);
3339 t4_free_mem(adapter->l2t);
3340 t4_free_mem(adapter->tids.tid_tab);
3341 disable_msi(adapter);
3342
3343 for_each_port(adapter, i)
3344 if (adapter->port[i])
3345 free_netdev(adapter->port[i]);
3346
3347 if (adapter->flags & FW_OK)
3348 t4_fw_bye(adapter, 0);
3349 iounmap(adapter->regs);
3350 kfree(adapter);
3351 pci_disable_pcie_error_reporting(pdev);
3352 pci_disable_device(pdev);
3353 pci_release_regions(pdev);
3354 pci_set_drvdata(pdev, NULL);
3355 } else if (PCI_FUNC(pdev->devfn) > 0)
3356 pci_release_regions(pdev);
3357}
3358
3359static struct pci_driver cxgb4_driver = {
3360 .name = KBUILD_MODNAME,
3361 .id_table = cxgb4_pci_tbl,
3362 .probe = init_one,
3363 .remove = __devexit_p(remove_one),
3364};
3365
3366static int __init cxgb4_init_module(void)
3367{
3368 int ret;
3369
3370 /* Debugfs support is optional, just warn if this fails */
3371 cxgb4_debugfs_root = debugfs_create_dir(KBUILD_MODNAME, NULL);
3372 if (!cxgb4_debugfs_root)
3373 pr_warning("could not create debugfs entry, continuing\n");
3374
3375 ret = pci_register_driver(&cxgb4_driver);
3376 if (ret < 0)
3377 debugfs_remove(cxgb4_debugfs_root);
3378 return ret;
3379}
3380
3381static void __exit cxgb4_cleanup_module(void)
3382{
3383 pci_unregister_driver(&cxgb4_driver);
3384 debugfs_remove(cxgb4_debugfs_root); /* NULL ok */
3385}
3386
3387module_init(cxgb4_init_module);
3388module_exit(cxgb4_cleanup_module);
diff --git a/drivers/net/cxgb4/cxgb4_uld.h b/drivers/net/cxgb4/cxgb4_uld.h
new file mode 100644
index 000000000000..5b98546ac92d
--- /dev/null
+++ b/drivers/net/cxgb4/cxgb4_uld.h
@@ -0,0 +1,239 @@
1/*
2 * This file is part of the Chelsio T4 Ethernet driver for Linux.
3 *
4 * Copyright (c) 2003-2010 Chelsio Communications, Inc. All rights reserved.
5 *
6 * This software is available to you under a choice of one of two
7 * licenses. You may choose to be licensed under the terms of the GNU
8 * General Public License (GPL) Version 2, available from the file
9 * COPYING in the main directory of this source tree, or the
10 * OpenIB.org BSD license below:
11 *
12 * Redistribution and use in source and binary forms, with or
13 * without modification, are permitted provided that the following
14 * conditions are met:
15 *
16 * - Redistributions of source code must retain the above
17 * copyright notice, this list of conditions and the following
18 * disclaimer.
19 *
20 * - Redistributions in binary form must reproduce the above
21 * copyright notice, this list of conditions and the following
22 * disclaimer in the documentation and/or other materials
23 * provided with the distribution.
24 *
25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
32 * SOFTWARE.
33 */
34
35#ifndef __CXGB4_OFLD_H
36#define __CXGB4_OFLD_H
37
38#include <linux/cache.h>
39#include <linux/spinlock.h>
40#include <linux/skbuff.h>
41#include <asm/atomic.h>
42
43/* CPL message priority levels */
44enum {
45 CPL_PRIORITY_DATA = 0, /* data messages */
46 CPL_PRIORITY_SETUP = 1, /* connection setup messages */
47 CPL_PRIORITY_TEARDOWN = 0, /* connection teardown messages */
48 CPL_PRIORITY_LISTEN = 1, /* listen start/stop messages */
49 CPL_PRIORITY_ACK = 1, /* RX ACK messages */
50 CPL_PRIORITY_CONTROL = 1 /* control messages */
51};
52
53#define INIT_TP_WR(w, tid) do { \
54 (w)->wr.wr_hi = htonl(FW_WR_OP(FW_TP_WR) | \
55 FW_WR_IMMDLEN(sizeof(*w) - sizeof(w->wr))); \
56 (w)->wr.wr_mid = htonl(FW_WR_LEN16(DIV_ROUND_UP(sizeof(*w), 16)) | \
57 FW_WR_FLOWID(tid)); \
58 (w)->wr.wr_lo = cpu_to_be64(0); \
59} while (0)
60
61#define INIT_TP_WR_CPL(w, cpl, tid) do { \
62 INIT_TP_WR(w, tid); \
63 OPCODE_TID(w) = htonl(MK_OPCODE_TID(cpl, tid)); \
64} while (0)
65
66#define INIT_ULPTX_WR(w, wrlen, atomic, tid) do { \
67 (w)->wr.wr_hi = htonl(FW_WR_OP(FW_ULPTX_WR) | FW_WR_ATOMIC(atomic)); \
68 (w)->wr.wr_mid = htonl(FW_WR_LEN16(DIV_ROUND_UP(wrlen, 16)) | \
69 FW_WR_FLOWID(tid)); \
70 (w)->wr.wr_lo = cpu_to_be64(0); \
71} while (0)
72
73/* Special asynchronous notification message */
74#define CXGB4_MSG_AN ((void *)1)
75
76struct serv_entry {
77 void *data;
78};
79
80union aopen_entry {
81 void *data;
82 union aopen_entry *next;
83};
84
85/*
86 * Holds the size, base address, free list start, etc of the TID, server TID,
87 * and active-open TID tables. The tables themselves are allocated dynamically.
88 */
89struct tid_info {
90 void **tid_tab;
91 unsigned int ntids;
92
93 struct serv_entry *stid_tab;
94 unsigned long *stid_bmap;
95 unsigned int nstids;
96 unsigned int stid_base;
97
98 union aopen_entry *atid_tab;
99 unsigned int natids;
100
101 unsigned int nftids;
102 unsigned int ftid_base;
103
104 spinlock_t atid_lock ____cacheline_aligned_in_smp;
105 union aopen_entry *afree;
106 unsigned int atids_in_use;
107
108 spinlock_t stid_lock;
109 unsigned int stids_in_use;
110
111 atomic_t tids_in_use;
112};
113
114static inline void *lookup_tid(const struct tid_info *t, unsigned int tid)
115{
116 return tid < t->ntids ? t->tid_tab[tid] : NULL;
117}
118
119static inline void *lookup_atid(const struct tid_info *t, unsigned int atid)
120{
121 return atid < t->natids ? t->atid_tab[atid].data : NULL;
122}
123
124static inline void *lookup_stid(const struct tid_info *t, unsigned int stid)
125{
126 stid -= t->stid_base;
127 return stid < t->nstids ? t->stid_tab[stid].data : NULL;
128}
129
130static inline void cxgb4_insert_tid(struct tid_info *t, void *data,
131 unsigned int tid)
132{
133 t->tid_tab[tid] = data;
134 atomic_inc(&t->tids_in_use);
135}
136
137int cxgb4_alloc_atid(struct tid_info *t, void *data);
138int cxgb4_alloc_stid(struct tid_info *t, int family, void *data);
139void cxgb4_free_atid(struct tid_info *t, unsigned int atid);
140void cxgb4_free_stid(struct tid_info *t, unsigned int stid, int family);
141void cxgb4_remove_tid(struct tid_info *t, unsigned int qid, unsigned int tid);
142void cxgb4_queue_tid_release(struct tid_info *t, unsigned int chan,
143 unsigned int tid);
144
145struct in6_addr;
146
147int cxgb4_create_server(const struct net_device *dev, unsigned int stid,
148 __be32 sip, __be16 sport, unsigned int queue);
149int cxgb4_create_server6(const struct net_device *dev, unsigned int stid,
150 const struct in6_addr *sip, __be16 sport,
151 unsigned int queue);
152
153static inline void set_wr_txq(struct sk_buff *skb, int prio, int queue)
154{
155 skb_set_queue_mapping(skb, (queue << 1) | prio);
156}
157
158enum cxgb4_uld {
159 CXGB4_ULD_RDMA,
160 CXGB4_ULD_ISCSI,
161 CXGB4_ULD_MAX
162};
163
164enum cxgb4_state {
165 CXGB4_STATE_UP,
166 CXGB4_STATE_START_RECOVERY,
167 CXGB4_STATE_DOWN,
168 CXGB4_STATE_DETACH
169};
170
171struct pci_dev;
172struct l2t_data;
173struct net_device;
174struct pkt_gl;
175struct tp_tcp_stats;
176
177struct cxgb4_range {
178 unsigned int start;
179 unsigned int size;
180};
181
182struct cxgb4_virt_res { /* virtualized HW resources */
183 struct cxgb4_range ddp;
184 struct cxgb4_range iscsi;
185 struct cxgb4_range stag;
186 struct cxgb4_range rq;
187 struct cxgb4_range pbl;
188};
189
190/*
191 * Block of information the LLD provides to ULDs attaching to a device.
192 */
193struct cxgb4_lld_info {
194 struct pci_dev *pdev; /* associated PCI device */
195 struct l2t_data *l2t; /* L2 table */
196 struct tid_info *tids; /* TID table */
197 struct net_device **ports; /* device ports */
198 const struct cxgb4_virt_res *vr; /* assorted HW resources */
199 const unsigned short *mtus; /* MTU table */
200 const unsigned short *rxq_ids; /* the ULD's Rx queue ids */
201 unsigned short nrxq; /* # of Rx queues */
202 unsigned short ntxq; /* # of Tx queues */
203 unsigned char nchan:4; /* # of channels */
204 unsigned char nports:4; /* # of ports */
205 unsigned char wr_cred; /* WR 16-byte credits */
206 unsigned char adapter_type; /* type of adapter */
207 unsigned char fw_api_ver; /* FW API version */
208 unsigned int fw_vers; /* FW version */
209 unsigned int iscsi_iolen; /* iSCSI max I/O length */
210 unsigned short udb_density; /* # of user DB/page */
211 unsigned short ucq_density; /* # of user CQs/page */
212 void __iomem *gts_reg; /* address of GTS register */
213 void __iomem *db_reg; /* address of kernel doorbell */
214};
215
216struct cxgb4_uld_info {
217 const char *name;
218 void *(*add)(const struct cxgb4_lld_info *p);
219 int (*rx_handler)(void *handle, const __be64 *rsp,
220 const struct pkt_gl *gl);
221 int (*state_change)(void *handle, enum cxgb4_state new_state);
222};
223
224int cxgb4_register_uld(enum cxgb4_uld type, const struct cxgb4_uld_info *p);
225int cxgb4_unregister_uld(enum cxgb4_uld type);
226int cxgb4_ofld_send(struct net_device *dev, struct sk_buff *skb);
227unsigned int cxgb4_port_chan(const struct net_device *dev);
228unsigned int cxgb4_port_viid(const struct net_device *dev);
229unsigned int cxgb4_port_idx(const struct net_device *dev);
230struct net_device *cxgb4_netdev_by_hwid(struct pci_dev *pdev, unsigned int id);
231unsigned int cxgb4_best_mtu(const unsigned short *mtus, unsigned short mtu,
232 unsigned int *idx);
233void cxgb4_get_tcp_stats(struct pci_dev *pdev, struct tp_tcp_stats *v4,
234 struct tp_tcp_stats *v6);
235void cxgb4_iscsi_init(struct net_device *dev, unsigned int tag_mask,
236 const unsigned int *pgsz_order);
237struct sk_buff *cxgb4_pktgl_to_skb(const struct pkt_gl *gl,
238 unsigned int skb_len, unsigned int pull_len);
239#endif /* !__CXGB4_OFLD_H */
diff --git a/drivers/net/cxgb4/l2t.c b/drivers/net/cxgb4/l2t.c
new file mode 100644
index 000000000000..9f96724a133a
--- /dev/null
+++ b/drivers/net/cxgb4/l2t.c
@@ -0,0 +1,624 @@
1/*
2 * This file is part of the Chelsio T4 Ethernet driver for Linux.
3 *
4 * Copyright (c) 2003-2010 Chelsio Communications, Inc. All rights reserved.
5 *
6 * This software is available to you under a choice of one of two
7 * licenses. You may choose to be licensed under the terms of the GNU
8 * General Public License (GPL) Version 2, available from the file
9 * COPYING in the main directory of this source tree, or the
10 * OpenIB.org BSD license below:
11 *
12 * Redistribution and use in source and binary forms, with or
13 * without modification, are permitted provided that the following
14 * conditions are met:
15 *
16 * - Redistributions of source code must retain the above
17 * copyright notice, this list of conditions and the following
18 * disclaimer.
19 *
20 * - Redistributions in binary form must reproduce the above
21 * copyright notice, this list of conditions and the following
22 * disclaimer in the documentation and/or other materials
23 * provided with the distribution.
24 *
25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
32 * SOFTWARE.
33 */
34
35#include <linux/skbuff.h>
36#include <linux/netdevice.h>
37#include <linux/if.h>
38#include <linux/if_vlan.h>
39#include <linux/jhash.h>
40#include <net/neighbour.h>
41#include "cxgb4.h"
42#include "l2t.h"
43#include "t4_msg.h"
44#include "t4fw_api.h"
45
46#define VLAN_NONE 0xfff
47
48/* identifies sync vs async L2T_WRITE_REQs */
49#define F_SYNC_WR (1 << 12)
50
51enum {
52 L2T_STATE_VALID, /* entry is up to date */
53 L2T_STATE_STALE, /* entry may be used but needs revalidation */
54 L2T_STATE_RESOLVING, /* entry needs address resolution */
55 L2T_STATE_SYNC_WRITE, /* synchronous write of entry underway */
56
57 /* when state is one of the below the entry is not hashed */
58 L2T_STATE_SWITCHING, /* entry is being used by a switching filter */
59 L2T_STATE_UNUSED /* entry not in use */
60};
61
62struct l2t_data {
63 rwlock_t lock;
64 atomic_t nfree; /* number of free entries */
65 struct l2t_entry *rover; /* starting point for next allocation */
66 struct l2t_entry l2tab[L2T_SIZE];
67};
68
69static inline unsigned int vlan_prio(const struct l2t_entry *e)
70{
71 return e->vlan >> 13;
72}
73
74static inline void l2t_hold(struct l2t_data *d, struct l2t_entry *e)
75{
76 if (atomic_add_return(1, &e->refcnt) == 1) /* 0 -> 1 transition */
77 atomic_dec(&d->nfree);
78}
79
80/*
81 * To avoid having to check address families we do not allow v4 and v6
82 * neighbors to be on the same hash chain. We keep v4 entries in the first
83 * half of available hash buckets and v6 in the second.
84 */
85enum {
86 L2T_SZ_HALF = L2T_SIZE / 2,
87 L2T_HASH_MASK = L2T_SZ_HALF - 1
88};
89
90static inline unsigned int arp_hash(const u32 *key, int ifindex)
91{
92 return jhash_2words(*key, ifindex, 0) & L2T_HASH_MASK;
93}
94
95static inline unsigned int ipv6_hash(const u32 *key, int ifindex)
96{
97 u32 xor = key[0] ^ key[1] ^ key[2] ^ key[3];
98
99 return L2T_SZ_HALF + (jhash_2words(xor, ifindex, 0) & L2T_HASH_MASK);
100}
101
102static unsigned int addr_hash(const u32 *addr, int addr_len, int ifindex)
103{
104 return addr_len == 4 ? arp_hash(addr, ifindex) :
105 ipv6_hash(addr, ifindex);
106}
107
108/*
109 * Checks if an L2T entry is for the given IP/IPv6 address. It does not check
110 * whether the L2T entry and the address are of the same address family.
111 * Callers ensure an address is only checked against L2T entries of the same
112 * family, something made trivial by the separation of IP and IPv6 hash chains
113 * mentioned above. Returns 0 if there's a match,
114 */
115static int addreq(const struct l2t_entry *e, const u32 *addr)
116{
117 if (e->v6)
118 return (e->addr[0] ^ addr[0]) | (e->addr[1] ^ addr[1]) |
119 (e->addr[2] ^ addr[2]) | (e->addr[3] ^ addr[3]);
120 return e->addr[0] ^ addr[0];
121}
122
123static void neigh_replace(struct l2t_entry *e, struct neighbour *n)
124{
125 neigh_hold(n);
126 if (e->neigh)
127 neigh_release(e->neigh);
128 e->neigh = n;
129}
130
131/*
132 * Write an L2T entry. Must be called with the entry locked.
133 * The write may be synchronous or asynchronous.
134 */
135static int write_l2e(struct adapter *adap, struct l2t_entry *e, int sync)
136{
137 struct sk_buff *skb;
138 struct cpl_l2t_write_req *req;
139
140 skb = alloc_skb(sizeof(*req), GFP_ATOMIC);
141 if (!skb)
142 return -ENOMEM;
143
144 req = (struct cpl_l2t_write_req *)__skb_put(skb, sizeof(*req));
145 INIT_TP_WR(req, 0);
146
147 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_L2T_WRITE_REQ,
148 e->idx | (sync ? F_SYNC_WR : 0) |
149 TID_QID(adap->sge.fw_evtq.abs_id)));
150 req->params = htons(L2T_W_PORT(e->lport) | L2T_W_NOREPLY(!sync));
151 req->l2t_idx = htons(e->idx);
152 req->vlan = htons(e->vlan);
153 if (e->neigh)
154 memcpy(e->dmac, e->neigh->ha, sizeof(e->dmac));
155 memcpy(req->dst_mac, e->dmac, sizeof(req->dst_mac));
156
157 set_wr_txq(skb, CPL_PRIORITY_CONTROL, 0);
158 t4_ofld_send(adap, skb);
159
160 if (sync && e->state != L2T_STATE_SWITCHING)
161 e->state = L2T_STATE_SYNC_WRITE;
162 return 0;
163}
164
165/*
166 * Send packets waiting in an L2T entry's ARP queue. Must be called with the
167 * entry locked.
168 */
169static void send_pending(struct adapter *adap, struct l2t_entry *e)
170{
171 while (e->arpq_head) {
172 struct sk_buff *skb = e->arpq_head;
173
174 e->arpq_head = skb->next;
175 skb->next = NULL;
176 t4_ofld_send(adap, skb);
177 }
178 e->arpq_tail = NULL;
179}
180
181/*
182 * Process a CPL_L2T_WRITE_RPL. Wake up the ARP queue if it completes a
183 * synchronous L2T_WRITE. Note that the TID in the reply is really the L2T
184 * index it refers to.
185 */
186void do_l2t_write_rpl(struct adapter *adap, const struct cpl_l2t_write_rpl *rpl)
187{
188 unsigned int tid = GET_TID(rpl);
189 unsigned int idx = tid & (L2T_SIZE - 1);
190
191 if (unlikely(rpl->status != CPL_ERR_NONE)) {
192 dev_err(adap->pdev_dev,
193 "Unexpected L2T_WRITE_RPL status %u for entry %u\n",
194 rpl->status, idx);
195 return;
196 }
197
198 if (tid & F_SYNC_WR) {
199 struct l2t_entry *e = &adap->l2t->l2tab[idx];
200
201 spin_lock(&e->lock);
202 if (e->state != L2T_STATE_SWITCHING) {
203 send_pending(adap, e);
204 e->state = (e->neigh->nud_state & NUD_STALE) ?
205 L2T_STATE_STALE : L2T_STATE_VALID;
206 }
207 spin_unlock(&e->lock);
208 }
209}
210
211/*
212 * Add a packet to an L2T entry's queue of packets awaiting resolution.
213 * Must be called with the entry's lock held.
214 */
215static inline void arpq_enqueue(struct l2t_entry *e, struct sk_buff *skb)
216{
217 skb->next = NULL;
218 if (e->arpq_head)
219 e->arpq_tail->next = skb;
220 else
221 e->arpq_head = skb;
222 e->arpq_tail = skb;
223}
224
225int cxgb4_l2t_send(struct net_device *dev, struct sk_buff *skb,
226 struct l2t_entry *e)
227{
228 struct adapter *adap = netdev2adap(dev);
229
230again:
231 switch (e->state) {
232 case L2T_STATE_STALE: /* entry is stale, kick off revalidation */
233 neigh_event_send(e->neigh, NULL);
234 spin_lock_bh(&e->lock);
235 if (e->state == L2T_STATE_STALE)
236 e->state = L2T_STATE_VALID;
237 spin_unlock_bh(&e->lock);
238 case L2T_STATE_VALID: /* fast-path, send the packet on */
239 return t4_ofld_send(adap, skb);
240 case L2T_STATE_RESOLVING:
241 case L2T_STATE_SYNC_WRITE:
242 spin_lock_bh(&e->lock);
243 if (e->state != L2T_STATE_SYNC_WRITE &&
244 e->state != L2T_STATE_RESOLVING) {
245 spin_unlock_bh(&e->lock);
246 goto again;
247 }
248 arpq_enqueue(e, skb);
249 spin_unlock_bh(&e->lock);
250
251 if (e->state == L2T_STATE_RESOLVING &&
252 !neigh_event_send(e->neigh, NULL)) {
253 spin_lock_bh(&e->lock);
254 if (e->state == L2T_STATE_RESOLVING && e->arpq_head)
255 write_l2e(adap, e, 1);
256 spin_unlock_bh(&e->lock);
257 }
258 }
259 return 0;
260}
261EXPORT_SYMBOL(cxgb4_l2t_send);
262
263/*
264 * Allocate a free L2T entry. Must be called with l2t_data.lock held.
265 */
266static struct l2t_entry *alloc_l2e(struct l2t_data *d)
267{
268 struct l2t_entry *end, *e, **p;
269
270 if (!atomic_read(&d->nfree))
271 return NULL;
272
273 /* there's definitely a free entry */
274 for (e = d->rover, end = &d->l2tab[L2T_SIZE]; e != end; ++e)
275 if (atomic_read(&e->refcnt) == 0)
276 goto found;
277
278 for (e = d->l2tab; atomic_read(&e->refcnt); ++e)
279 ;
280found:
281 d->rover = e + 1;
282 atomic_dec(&d->nfree);
283
284 /*
285 * The entry we found may be an inactive entry that is
286 * presently in the hash table. We need to remove it.
287 */
288 if (e->state < L2T_STATE_SWITCHING)
289 for (p = &d->l2tab[e->hash].first; *p; p = &(*p)->next)
290 if (*p == e) {
291 *p = e->next;
292 e->next = NULL;
293 break;
294 }
295
296 e->state = L2T_STATE_UNUSED;
297 return e;
298}
299
300/*
301 * Called when an L2T entry has no more users.
302 */
303static void t4_l2e_free(struct l2t_entry *e)
304{
305 struct l2t_data *d;
306
307 spin_lock_bh(&e->lock);
308 if (atomic_read(&e->refcnt) == 0) { /* hasn't been recycled */
309 if (e->neigh) {
310 neigh_release(e->neigh);
311 e->neigh = NULL;
312 }
313 }
314 spin_unlock_bh(&e->lock);
315
316 d = container_of(e, struct l2t_data, l2tab[e->idx]);
317 atomic_inc(&d->nfree);
318}
319
320void cxgb4_l2t_release(struct l2t_entry *e)
321{
322 if (atomic_dec_and_test(&e->refcnt))
323 t4_l2e_free(e);
324}
325EXPORT_SYMBOL(cxgb4_l2t_release);
326
327/*
328 * Update an L2T entry that was previously used for the same next hop as neigh.
329 * Must be called with softirqs disabled.
330 */
331static void reuse_entry(struct l2t_entry *e, struct neighbour *neigh)
332{
333 unsigned int nud_state;
334
335 spin_lock(&e->lock); /* avoid race with t4_l2t_free */
336 if (neigh != e->neigh)
337 neigh_replace(e, neigh);
338 nud_state = neigh->nud_state;
339 if (memcmp(e->dmac, neigh->ha, sizeof(e->dmac)) ||
340 !(nud_state & NUD_VALID))
341 e->state = L2T_STATE_RESOLVING;
342 else if (nud_state & NUD_CONNECTED)
343 e->state = L2T_STATE_VALID;
344 else
345 e->state = L2T_STATE_STALE;
346 spin_unlock(&e->lock);
347}
348
349struct l2t_entry *cxgb4_l2t_get(struct l2t_data *d, struct neighbour *neigh,
350 const struct net_device *physdev,
351 unsigned int priority)
352{
353 u8 lport;
354 u16 vlan;
355 struct l2t_entry *e;
356 int addr_len = neigh->tbl->key_len;
357 u32 *addr = (u32 *)neigh->primary_key;
358 int ifidx = neigh->dev->ifindex;
359 int hash = addr_hash(addr, addr_len, ifidx);
360
361 if (neigh->dev->flags & IFF_LOOPBACK)
362 lport = netdev2pinfo(physdev)->tx_chan + 4;
363 else
364 lport = netdev2pinfo(physdev)->lport;
365
366 if (neigh->dev->priv_flags & IFF_802_1Q_VLAN)
367 vlan = vlan_dev_vlan_id(neigh->dev);
368 else
369 vlan = VLAN_NONE;
370
371 write_lock_bh(&d->lock);
372 for (e = d->l2tab[hash].first; e; e = e->next)
373 if (!addreq(e, addr) && e->ifindex == ifidx &&
374 e->vlan == vlan && e->lport == lport) {
375 l2t_hold(d, e);
376 if (atomic_read(&e->refcnt) == 1)
377 reuse_entry(e, neigh);
378 goto done;
379 }
380
381 /* Need to allocate a new entry */
382 e = alloc_l2e(d);
383 if (e) {
384 spin_lock(&e->lock); /* avoid race with t4_l2t_free */
385 e->state = L2T_STATE_RESOLVING;
386 memcpy(e->addr, addr, addr_len);
387 e->ifindex = ifidx;
388 e->hash = hash;
389 e->lport = lport;
390 e->v6 = addr_len == 16;
391 atomic_set(&e->refcnt, 1);
392 neigh_replace(e, neigh);
393 e->vlan = vlan;
394 e->next = d->l2tab[hash].first;
395 d->l2tab[hash].first = e;
396 spin_unlock(&e->lock);
397 }
398done:
399 write_unlock_bh(&d->lock);
400 return e;
401}
402EXPORT_SYMBOL(cxgb4_l2t_get);
403
404/*
405 * Called when address resolution fails for an L2T entry to handle packets
406 * on the arpq head. If a packet specifies a failure handler it is invoked,
407 * otherwise the packet is sent to the device.
408 */
409static void handle_failed_resolution(struct adapter *adap, struct sk_buff *arpq)
410{
411 while (arpq) {
412 struct sk_buff *skb = arpq;
413 const struct l2t_skb_cb *cb = L2T_SKB_CB(skb);
414
415 arpq = skb->next;
416 skb->next = NULL;
417 if (cb->arp_err_handler)
418 cb->arp_err_handler(cb->handle, skb);
419 else
420 t4_ofld_send(adap, skb);
421 }
422}
423
424/*
425 * Called when the host's neighbor layer makes a change to some entry that is
426 * loaded into the HW L2 table.
427 */
428void t4_l2t_update(struct adapter *adap, struct neighbour *neigh)
429{
430 struct l2t_entry *e;
431 struct sk_buff *arpq = NULL;
432 struct l2t_data *d = adap->l2t;
433 int addr_len = neigh->tbl->key_len;
434 u32 *addr = (u32 *) neigh->primary_key;
435 int ifidx = neigh->dev->ifindex;
436 int hash = addr_hash(addr, addr_len, ifidx);
437
438 read_lock_bh(&d->lock);
439 for (e = d->l2tab[hash].first; e; e = e->next)
440 if (!addreq(e, addr) && e->ifindex == ifidx) {
441 spin_lock(&e->lock);
442 if (atomic_read(&e->refcnt))
443 goto found;
444 spin_unlock(&e->lock);
445 break;
446 }
447 read_unlock_bh(&d->lock);
448 return;
449
450 found:
451 read_unlock(&d->lock);
452
453 if (neigh != e->neigh)
454 neigh_replace(e, neigh);
455
456 if (e->state == L2T_STATE_RESOLVING) {
457 if (neigh->nud_state & NUD_FAILED) {
458 arpq = e->arpq_head;
459 e->arpq_head = e->arpq_tail = NULL;
460 } else if ((neigh->nud_state & (NUD_CONNECTED | NUD_STALE)) &&
461 e->arpq_head) {
462 write_l2e(adap, e, 1);
463 }
464 } else {
465 e->state = neigh->nud_state & NUD_CONNECTED ?
466 L2T_STATE_VALID : L2T_STATE_STALE;
467 if (memcmp(e->dmac, neigh->ha, sizeof(e->dmac)))
468 write_l2e(adap, e, 0);
469 }
470
471 spin_unlock_bh(&e->lock);
472
473 if (arpq)
474 handle_failed_resolution(adap, arpq);
475}
476
477/*
478 * Allocate an L2T entry for use by a switching rule. Such entries need to be
479 * explicitly freed and while busy they are not on any hash chain, so normal
480 * address resolution updates do not see them.
481 */
482struct l2t_entry *t4_l2t_alloc_switching(struct l2t_data *d)
483{
484 struct l2t_entry *e;
485
486 write_lock_bh(&d->lock);
487 e = alloc_l2e(d);
488 if (e) {
489 spin_lock(&e->lock); /* avoid race with t4_l2t_free */
490 e->state = L2T_STATE_SWITCHING;
491 atomic_set(&e->refcnt, 1);
492 spin_unlock(&e->lock);
493 }
494 write_unlock_bh(&d->lock);
495 return e;
496}
497
498/*
499 * Sets/updates the contents of a switching L2T entry that has been allocated
500 * with an earlier call to @t4_l2t_alloc_switching.
501 */
502int t4_l2t_set_switching(struct adapter *adap, struct l2t_entry *e, u16 vlan,
503 u8 port, u8 *eth_addr)
504{
505 e->vlan = vlan;
506 e->lport = port;
507 memcpy(e->dmac, eth_addr, ETH_ALEN);
508 return write_l2e(adap, e, 0);
509}
510
511struct l2t_data *t4_init_l2t(void)
512{
513 int i;
514 struct l2t_data *d;
515
516 d = t4_alloc_mem(sizeof(*d));
517 if (!d)
518 return NULL;
519
520 d->rover = d->l2tab;
521 atomic_set(&d->nfree, L2T_SIZE);
522 rwlock_init(&d->lock);
523
524 for (i = 0; i < L2T_SIZE; ++i) {
525 d->l2tab[i].idx = i;
526 d->l2tab[i].state = L2T_STATE_UNUSED;
527 spin_lock_init(&d->l2tab[i].lock);
528 atomic_set(&d->l2tab[i].refcnt, 0);
529 }
530 return d;
531}
532
533#include <linux/module.h>
534#include <linux/debugfs.h>
535#include <linux/seq_file.h>
536
537static inline void *l2t_get_idx(struct seq_file *seq, loff_t pos)
538{
539 struct l2t_entry *l2tab = seq->private;
540
541 return pos >= L2T_SIZE ? NULL : &l2tab[pos];
542}
543
544static void *l2t_seq_start(struct seq_file *seq, loff_t *pos)
545{
546 return *pos ? l2t_get_idx(seq, *pos - 1) : SEQ_START_TOKEN;
547}
548
549static void *l2t_seq_next(struct seq_file *seq, void *v, loff_t *pos)
550{
551 v = l2t_get_idx(seq, *pos);
552 if (v)
553 ++*pos;
554 return v;
555}
556
557static void l2t_seq_stop(struct seq_file *seq, void *v)
558{
559}
560
561static char l2e_state(const struct l2t_entry *e)
562{
563 switch (e->state) {
564 case L2T_STATE_VALID: return 'V';
565 case L2T_STATE_STALE: return 'S';
566 case L2T_STATE_SYNC_WRITE: return 'W';
567 case L2T_STATE_RESOLVING: return e->arpq_head ? 'A' : 'R';
568 case L2T_STATE_SWITCHING: return 'X';
569 default:
570 return 'U';
571 }
572}
573
574static int l2t_seq_show(struct seq_file *seq, void *v)
575{
576 if (v == SEQ_START_TOKEN)
577 seq_puts(seq, " Idx IP address "
578 "Ethernet address VLAN/P LP State Users Port\n");
579 else {
580 char ip[60];
581 struct l2t_entry *e = v;
582
583 spin_lock_bh(&e->lock);
584 if (e->state == L2T_STATE_SWITCHING)
585 ip[0] = '\0';
586 else
587 sprintf(ip, e->v6 ? "%pI6c" : "%pI4", e->addr);
588 seq_printf(seq, "%4u %-25s %17pM %4d %u %2u %c %5u %s\n",
589 e->idx, ip, e->dmac,
590 e->vlan & VLAN_VID_MASK, vlan_prio(e), e->lport,
591 l2e_state(e), atomic_read(&e->refcnt),
592 e->neigh ? e->neigh->dev->name : "");
593 spin_unlock_bh(&e->lock);
594 }
595 return 0;
596}
597
598static const struct seq_operations l2t_seq_ops = {
599 .start = l2t_seq_start,
600 .next = l2t_seq_next,
601 .stop = l2t_seq_stop,
602 .show = l2t_seq_show
603};
604
605static int l2t_seq_open(struct inode *inode, struct file *file)
606{
607 int rc = seq_open(file, &l2t_seq_ops);
608
609 if (!rc) {
610 struct adapter *adap = inode->i_private;
611 struct seq_file *seq = file->private_data;
612
613 seq->private = adap->l2t->l2tab;
614 }
615 return rc;
616}
617
618const struct file_operations t4_l2t_fops = {
619 .owner = THIS_MODULE,
620 .open = l2t_seq_open,
621 .read = seq_read,
622 .llseek = seq_lseek,
623 .release = seq_release,
624};
diff --git a/drivers/net/cxgb4/l2t.h b/drivers/net/cxgb4/l2t.h
new file mode 100644
index 000000000000..643f27ed3cf4
--- /dev/null
+++ b/drivers/net/cxgb4/l2t.h
@@ -0,0 +1,110 @@
1/*
2 * This file is part of the Chelsio T4 Ethernet driver for Linux.
3 *
4 * Copyright (c) 2003-2010 Chelsio Communications, Inc. All rights reserved.
5 *
6 * This software is available to you under a choice of one of two
7 * licenses. You may choose to be licensed under the terms of the GNU
8 * General Public License (GPL) Version 2, available from the file
9 * COPYING in the main directory of this source tree, or the
10 * OpenIB.org BSD license below:
11 *
12 * Redistribution and use in source and binary forms, with or
13 * without modification, are permitted provided that the following
14 * conditions are met:
15 *
16 * - Redistributions of source code must retain the above
17 * copyright notice, this list of conditions and the following
18 * disclaimer.
19 *
20 * - Redistributions in binary form must reproduce the above
21 * copyright notice, this list of conditions and the following
22 * disclaimer in the documentation and/or other materials
23 * provided with the distribution.
24 *
25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
32 * SOFTWARE.
33 */
34
35#ifndef __CXGB4_L2T_H
36#define __CXGB4_L2T_H
37
38#include <linux/spinlock.h>
39#include <linux/if_ether.h>
40#include <asm/atomic.h>
41
42struct adapter;
43struct l2t_data;
44struct neighbour;
45struct net_device;
46struct file_operations;
47struct cpl_l2t_write_rpl;
48
49/*
50 * Each L2T entry plays multiple roles. First of all, it keeps state for the
51 * corresponding entry of the HW L2 table and maintains a queue of offload
52 * packets awaiting address resolution. Second, it is a node of a hash table
53 * chain, where the nodes of the chain are linked together through their next
54 * pointer. Finally, each node is a bucket of a hash table, pointing to the
55 * first element in its chain through its first pointer.
56 */
57struct l2t_entry {
58 u16 state; /* entry state */
59 u16 idx; /* entry index */
60 u32 addr[4]; /* next hop IP or IPv6 address */
61 int ifindex; /* neighbor's net_device's ifindex */
62 struct neighbour *neigh; /* associated neighbour */
63 struct l2t_entry *first; /* start of hash chain */
64 struct l2t_entry *next; /* next l2t_entry on chain */
65 struct sk_buff *arpq_head; /* queue of packets awaiting resolution */
66 struct sk_buff *arpq_tail;
67 spinlock_t lock;
68 atomic_t refcnt; /* entry reference count */
69 u16 hash; /* hash bucket the entry is on */
70 u16 vlan; /* VLAN TCI (id: bits 0-11, prio: 13-15 */
71 u8 v6; /* whether entry is for IPv6 */
72 u8 lport; /* associated offload logical interface */
73 u8 dmac[ETH_ALEN]; /* neighbour's MAC address */
74};
75
76typedef void (*arp_err_handler_t)(void *handle, struct sk_buff *skb);
77
78/*
79 * Callback stored in an skb to handle address resolution failure.
80 */
81struct l2t_skb_cb {
82 void *handle;
83 arp_err_handler_t arp_err_handler;
84};
85
86#define L2T_SKB_CB(skb) ((struct l2t_skb_cb *)(skb)->cb)
87
88static inline void t4_set_arp_err_handler(struct sk_buff *skb, void *handle,
89 arp_err_handler_t handler)
90{
91 L2T_SKB_CB(skb)->handle = handle;
92 L2T_SKB_CB(skb)->arp_err_handler = handler;
93}
94
95void cxgb4_l2t_release(struct l2t_entry *e);
96int cxgb4_l2t_send(struct net_device *dev, struct sk_buff *skb,
97 struct l2t_entry *e);
98struct l2t_entry *cxgb4_l2t_get(struct l2t_data *d, struct neighbour *neigh,
99 const struct net_device *physdev,
100 unsigned int priority);
101
102void t4_l2t_update(struct adapter *adap, struct neighbour *neigh);
103struct l2t_entry *t4_l2t_alloc_switching(struct l2t_data *d);
104int t4_l2t_set_switching(struct adapter *adap, struct l2t_entry *e, u16 vlan,
105 u8 port, u8 *eth_addr);
106struct l2t_data *t4_init_l2t(void);
107void do_l2t_write_rpl(struct adapter *p, const struct cpl_l2t_write_rpl *rpl);
108
109extern const struct file_operations t4_l2t_fops;
110#endif /* __CXGB4_L2T_H */
diff --git a/drivers/net/cxgb4/sge.c b/drivers/net/cxgb4/sge.c
new file mode 100644
index 000000000000..14adc58e71c3
--- /dev/null
+++ b/drivers/net/cxgb4/sge.c
@@ -0,0 +1,2431 @@
1/*
2 * This file is part of the Chelsio T4 Ethernet driver for Linux.
3 *
4 * Copyright (c) 2003-2010 Chelsio Communications, Inc. All rights reserved.
5 *
6 * This software is available to you under a choice of one of two
7 * licenses. You may choose to be licensed under the terms of the GNU
8 * General Public License (GPL) Version 2, available from the file
9 * COPYING in the main directory of this source tree, or the
10 * OpenIB.org BSD license below:
11 *
12 * Redistribution and use in source and binary forms, with or
13 * without modification, are permitted provided that the following
14 * conditions are met:
15 *
16 * - Redistributions of source code must retain the above
17 * copyright notice, this list of conditions and the following
18 * disclaimer.
19 *
20 * - Redistributions in binary form must reproduce the above
21 * copyright notice, this list of conditions and the following
22 * disclaimer in the documentation and/or other materials
23 * provided with the distribution.
24 *
25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
32 * SOFTWARE.
33 */
34
35#include <linux/skbuff.h>
36#include <linux/netdevice.h>
37#include <linux/etherdevice.h>
38#include <linux/if_vlan.h>
39#include <linux/ip.h>
40#include <linux/dma-mapping.h>
41#include <linux/jiffies.h>
42#include <net/ipv6.h>
43#include <net/tcp.h>
44#include "cxgb4.h"
45#include "t4_regs.h"
46#include "t4_msg.h"
47#include "t4fw_api.h"
48
49/*
50 * Rx buffer size. We use largish buffers if possible but settle for single
51 * pages under memory shortage.
52 */
53#if PAGE_SHIFT >= 16
54# define FL_PG_ORDER 0
55#else
56# define FL_PG_ORDER (16 - PAGE_SHIFT)
57#endif
58
59/* RX_PULL_LEN should be <= RX_COPY_THRES */
60#define RX_COPY_THRES 256
61#define RX_PULL_LEN 128
62
63/*
64 * Main body length for sk_buffs used for Rx Ethernet packets with fragments.
65 * Should be >= RX_PULL_LEN but possibly bigger to give pskb_may_pull some room.
66 */
67#define RX_PKT_SKB_LEN 512
68
69/* Ethernet header padding prepended to RX_PKTs */
70#define RX_PKT_PAD 2
71
72/*
73 * Max number of Tx descriptors we clean up at a time. Should be modest as
74 * freeing skbs isn't cheap and it happens while holding locks. We just need
75 * to free packets faster than they arrive, we eventually catch up and keep
76 * the amortized cost reasonable. Must be >= 2 * TXQ_STOP_THRES.
77 */
78#define MAX_TX_RECLAIM 16
79
80/*
81 * Max number of Rx buffers we replenish at a time. Again keep this modest,
82 * allocating buffers isn't cheap either.
83 */
84#define MAX_RX_REFILL 16U
85
86/*
87 * Period of the Rx queue check timer. This timer is infrequent as it has
88 * something to do only when the system experiences severe memory shortage.
89 */
90#define RX_QCHECK_PERIOD (HZ / 2)
91
92/*
93 * Period of the Tx queue check timer.
94 */
95#define TX_QCHECK_PERIOD (HZ / 2)
96
97/*
98 * Max number of Tx descriptors to be reclaimed by the Tx timer.
99 */
100#define MAX_TIMER_TX_RECLAIM 100
101
102/*
103 * Timer index used when backing off due to memory shortage.
104 */
105#define NOMEM_TMR_IDX (SGE_NTIMERS - 1)
106
107/*
108 * An FL with <= FL_STARVE_THRES buffers is starving and a periodic timer will
109 * attempt to refill it.
110 */
111#define FL_STARVE_THRES 4
112
113/*
114 * Suspend an Ethernet Tx queue with fewer available descriptors than this.
115 * This is the same as calc_tx_descs() for a TSO packet with
116 * nr_frags == MAX_SKB_FRAGS.
117 */
118#define ETHTXQ_STOP_THRES \
119 (1 + DIV_ROUND_UP((3 * MAX_SKB_FRAGS) / 2 + (MAX_SKB_FRAGS & 1), 8))
120
121/*
122 * Suspension threshold for non-Ethernet Tx queues. We require enough room
123 * for a full sized WR.
124 */
125#define TXQ_STOP_THRES (SGE_MAX_WR_LEN / sizeof(struct tx_desc))
126
127/*
128 * Max Tx descriptor space we allow for an Ethernet packet to be inlined
129 * into a WR.
130 */
131#define MAX_IMM_TX_PKT_LEN 128
132
133/*
134 * Max size of a WR sent through a control Tx queue.
135 */
136#define MAX_CTRL_WR_LEN SGE_MAX_WR_LEN
137
138enum {
139 /* packet alignment in FL buffers */
140 FL_ALIGN = L1_CACHE_BYTES < 32 ? 32 : L1_CACHE_BYTES,
141 /* egress status entry size */
142 STAT_LEN = L1_CACHE_BYTES > 64 ? 128 : 64
143};
144
145struct tx_sw_desc { /* SW state per Tx descriptor */
146 struct sk_buff *skb;
147 struct ulptx_sgl *sgl;
148};
149
150struct rx_sw_desc { /* SW state per Rx descriptor */
151 struct page *page;
152 dma_addr_t dma_addr;
153};
154
155/*
156 * The low bits of rx_sw_desc.dma_addr have special meaning.
157 */
158enum {
159 RX_LARGE_BUF = 1 << 0, /* buffer is larger than PAGE_SIZE */
160 RX_UNMAPPED_BUF = 1 << 1, /* buffer is not mapped */
161};
162
163static inline dma_addr_t get_buf_addr(const struct rx_sw_desc *d)
164{
165 return d->dma_addr & ~(dma_addr_t)(RX_LARGE_BUF | RX_UNMAPPED_BUF);
166}
167
168static inline bool is_buf_mapped(const struct rx_sw_desc *d)
169{
170 return !(d->dma_addr & RX_UNMAPPED_BUF);
171}
172
173/**
174 * txq_avail - return the number of available slots in a Tx queue
175 * @q: the Tx queue
176 *
177 * Returns the number of descriptors in a Tx queue available to write new
178 * packets.
179 */
180static inline unsigned int txq_avail(const struct sge_txq *q)
181{
182 return q->size - 1 - q->in_use;
183}
184
185/**
186 * fl_cap - return the capacity of a free-buffer list
187 * @fl: the FL
188 *
189 * Returns the capacity of a free-buffer list. The capacity is less than
190 * the size because one descriptor needs to be left unpopulated, otherwise
191 * HW will think the FL is empty.
192 */
193static inline unsigned int fl_cap(const struct sge_fl *fl)
194{
195 return fl->size - 8; /* 1 descriptor = 8 buffers */
196}
197
198static inline bool fl_starving(const struct sge_fl *fl)
199{
200 return fl->avail - fl->pend_cred <= FL_STARVE_THRES;
201}
202
203static int map_skb(struct device *dev, const struct sk_buff *skb,
204 dma_addr_t *addr)
205{
206 const skb_frag_t *fp, *end;
207 const struct skb_shared_info *si;
208
209 *addr = dma_map_single(dev, skb->data, skb_headlen(skb), DMA_TO_DEVICE);
210 if (dma_mapping_error(dev, *addr))
211 goto out_err;
212
213 si = skb_shinfo(skb);
214 end = &si->frags[si->nr_frags];
215
216 for (fp = si->frags; fp < end; fp++) {
217 *++addr = dma_map_page(dev, fp->page, fp->page_offset, fp->size,
218 DMA_TO_DEVICE);
219 if (dma_mapping_error(dev, *addr))
220 goto unwind;
221 }
222 return 0;
223
224unwind:
225 while (fp-- > si->frags)
226 dma_unmap_page(dev, *--addr, fp->size, DMA_TO_DEVICE);
227
228 dma_unmap_single(dev, addr[-1], skb_headlen(skb), DMA_TO_DEVICE);
229out_err:
230 return -ENOMEM;
231}
232
233#ifdef CONFIG_NEED_DMA_MAP_STATE
234static void unmap_skb(struct device *dev, const struct sk_buff *skb,
235 const dma_addr_t *addr)
236{
237 const skb_frag_t *fp, *end;
238 const struct skb_shared_info *si;
239
240 dma_unmap_single(dev, *addr++, skb_headlen(skb), DMA_TO_DEVICE);
241
242 si = skb_shinfo(skb);
243 end = &si->frags[si->nr_frags];
244 for (fp = si->frags; fp < end; fp++)
245 dma_unmap_page(dev, *addr++, fp->size, DMA_TO_DEVICE);
246}
247
248/**
249 * deferred_unmap_destructor - unmap a packet when it is freed
250 * @skb: the packet
251 *
252 * This is the packet destructor used for Tx packets that need to remain
253 * mapped until they are freed rather than until their Tx descriptors are
254 * freed.
255 */
256static void deferred_unmap_destructor(struct sk_buff *skb)
257{
258 unmap_skb(skb->dev->dev.parent, skb, (dma_addr_t *)skb->head);
259}
260#endif
261
262static void unmap_sgl(struct device *dev, const struct sk_buff *skb,
263 const struct ulptx_sgl *sgl, const struct sge_txq *q)
264{
265 const struct ulptx_sge_pair *p;
266 unsigned int nfrags = skb_shinfo(skb)->nr_frags;
267
268 if (likely(skb_headlen(skb)))
269 dma_unmap_single(dev, be64_to_cpu(sgl->addr0), ntohl(sgl->len0),
270 DMA_TO_DEVICE);
271 else {
272 dma_unmap_page(dev, be64_to_cpu(sgl->addr0), ntohl(sgl->len0),
273 DMA_TO_DEVICE);
274 nfrags--;
275 }
276
277 /*
278 * the complexity below is because of the possibility of a wrap-around
279 * in the middle of an SGL
280 */
281 for (p = sgl->sge; nfrags >= 2; nfrags -= 2) {
282 if (likely((u8 *)(p + 1) <= (u8 *)q->stat)) {
283unmap: dma_unmap_page(dev, be64_to_cpu(p->addr[0]),
284 ntohl(p->len[0]), DMA_TO_DEVICE);
285 dma_unmap_page(dev, be64_to_cpu(p->addr[1]),
286 ntohl(p->len[1]), DMA_TO_DEVICE);
287 p++;
288 } else if ((u8 *)p == (u8 *)q->stat) {
289 p = (const struct ulptx_sge_pair *)q->desc;
290 goto unmap;
291 } else if ((u8 *)p + 8 == (u8 *)q->stat) {
292 const __be64 *addr = (const __be64 *)q->desc;
293
294 dma_unmap_page(dev, be64_to_cpu(addr[0]),
295 ntohl(p->len[0]), DMA_TO_DEVICE);
296 dma_unmap_page(dev, be64_to_cpu(addr[1]),
297 ntohl(p->len[1]), DMA_TO_DEVICE);
298 p = (const struct ulptx_sge_pair *)&addr[2];
299 } else {
300 const __be64 *addr = (const __be64 *)q->desc;
301
302 dma_unmap_page(dev, be64_to_cpu(p->addr[0]),
303 ntohl(p->len[0]), DMA_TO_DEVICE);
304 dma_unmap_page(dev, be64_to_cpu(addr[0]),
305 ntohl(p->len[1]), DMA_TO_DEVICE);
306 p = (const struct ulptx_sge_pair *)&addr[1];
307 }
308 }
309 if (nfrags) {
310 __be64 addr;
311
312 if ((u8 *)p == (u8 *)q->stat)
313 p = (const struct ulptx_sge_pair *)q->desc;
314 addr = (u8 *)p + 16 <= (u8 *)q->stat ? p->addr[0] :
315 *(const __be64 *)q->desc;
316 dma_unmap_page(dev, be64_to_cpu(addr), ntohl(p->len[0]),
317 DMA_TO_DEVICE);
318 }
319}
320
321/**
322 * free_tx_desc - reclaims Tx descriptors and their buffers
323 * @adapter: the adapter
324 * @q: the Tx queue to reclaim descriptors from
325 * @n: the number of descriptors to reclaim
326 * @unmap: whether the buffers should be unmapped for DMA
327 *
328 * Reclaims Tx descriptors from an SGE Tx queue and frees the associated
329 * Tx buffers. Called with the Tx queue lock held.
330 */
331static void free_tx_desc(struct adapter *adap, struct sge_txq *q,
332 unsigned int n, bool unmap)
333{
334 struct tx_sw_desc *d;
335 unsigned int cidx = q->cidx;
336 struct device *dev = adap->pdev_dev;
337
338 d = &q->sdesc[cidx];
339 while (n--) {
340 if (d->skb) { /* an SGL is present */
341 if (unmap)
342 unmap_sgl(dev, d->skb, d->sgl, q);
343 kfree_skb(d->skb);
344 d->skb = NULL;
345 }
346 ++d;
347 if (++cidx == q->size) {
348 cidx = 0;
349 d = q->sdesc;
350 }
351 }
352 q->cidx = cidx;
353}
354
355/*
356 * Return the number of reclaimable descriptors in a Tx queue.
357 */
358static inline int reclaimable(const struct sge_txq *q)
359{
360 int hw_cidx = ntohs(q->stat->cidx);
361 hw_cidx -= q->cidx;
362 return hw_cidx < 0 ? hw_cidx + q->size : hw_cidx;
363}
364
365/**
366 * reclaim_completed_tx - reclaims completed Tx descriptors
367 * @adap: the adapter
368 * @q: the Tx queue to reclaim completed descriptors from
369 * @unmap: whether the buffers should be unmapped for DMA
370 *
371 * Reclaims Tx descriptors that the SGE has indicated it has processed,
372 * and frees the associated buffers if possible. Called with the Tx
373 * queue locked.
374 */
375static inline void reclaim_completed_tx(struct adapter *adap, struct sge_txq *q,
376 bool unmap)
377{
378 int avail = reclaimable(q);
379
380 if (avail) {
381 /*
382 * Limit the amount of clean up work we do at a time to keep
383 * the Tx lock hold time O(1).
384 */
385 if (avail > MAX_TX_RECLAIM)
386 avail = MAX_TX_RECLAIM;
387
388 free_tx_desc(adap, q, avail, unmap);
389 q->in_use -= avail;
390 }
391}
392
393static inline int get_buf_size(const struct rx_sw_desc *d)
394{
395#if FL_PG_ORDER > 0
396 return (d->dma_addr & RX_LARGE_BUF) ? (PAGE_SIZE << FL_PG_ORDER) :
397 PAGE_SIZE;
398#else
399 return PAGE_SIZE;
400#endif
401}
402
403/**
404 * free_rx_bufs - free the Rx buffers on an SGE free list
405 * @adap: the adapter
406 * @q: the SGE free list to free buffers from
407 * @n: how many buffers to free
408 *
409 * Release the next @n buffers on an SGE free-buffer Rx queue. The
410 * buffers must be made inaccessible to HW before calling this function.
411 */
412static void free_rx_bufs(struct adapter *adap, struct sge_fl *q, int n)
413{
414 while (n--) {
415 struct rx_sw_desc *d = &q->sdesc[q->cidx];
416
417 if (is_buf_mapped(d))
418 dma_unmap_page(adap->pdev_dev, get_buf_addr(d),
419 get_buf_size(d), PCI_DMA_FROMDEVICE);
420 put_page(d->page);
421 d->page = NULL;
422 if (++q->cidx == q->size)
423 q->cidx = 0;
424 q->avail--;
425 }
426}
427
428/**
429 * unmap_rx_buf - unmap the current Rx buffer on an SGE free list
430 * @adap: the adapter
431 * @q: the SGE free list
432 *
433 * Unmap the current buffer on an SGE free-buffer Rx queue. The
434 * buffer must be made inaccessible to HW before calling this function.
435 *
436 * This is similar to @free_rx_bufs above but does not free the buffer.
437 * Do note that the FL still loses any further access to the buffer.
438 */
439static void unmap_rx_buf(struct adapter *adap, struct sge_fl *q)
440{
441 struct rx_sw_desc *d = &q->sdesc[q->cidx];
442
443 if (is_buf_mapped(d))
444 dma_unmap_page(adap->pdev_dev, get_buf_addr(d),
445 get_buf_size(d), PCI_DMA_FROMDEVICE);
446 d->page = NULL;
447 if (++q->cidx == q->size)
448 q->cidx = 0;
449 q->avail--;
450}
451
452static inline void ring_fl_db(struct adapter *adap, struct sge_fl *q)
453{
454 if (q->pend_cred >= 8) {
455 wmb();
456 t4_write_reg(adap, MYPF_REG(SGE_PF_KDOORBELL), DBPRIO |
457 QID(q->cntxt_id) | PIDX(q->pend_cred / 8));
458 q->pend_cred &= 7;
459 }
460}
461
462static inline void set_rx_sw_desc(struct rx_sw_desc *sd, struct page *pg,
463 dma_addr_t mapping)
464{
465 sd->page = pg;
466 sd->dma_addr = mapping; /* includes size low bits */
467}
468
469/**
470 * refill_fl - refill an SGE Rx buffer ring
471 * @adap: the adapter
472 * @q: the ring to refill
473 * @n: the number of new buffers to allocate
474 * @gfp: the gfp flags for the allocations
475 *
476 * (Re)populate an SGE free-buffer queue with up to @n new packet buffers,
477 * allocated with the supplied gfp flags. The caller must assure that
478 * @n does not exceed the queue's capacity. If afterwards the queue is
479 * found critically low mark it as starving in the bitmap of starving FLs.
480 *
481 * Returns the number of buffers allocated.
482 */
483static unsigned int refill_fl(struct adapter *adap, struct sge_fl *q, int n,
484 gfp_t gfp)
485{
486 struct page *pg;
487 dma_addr_t mapping;
488 unsigned int cred = q->avail;
489 __be64 *d = &q->desc[q->pidx];
490 struct rx_sw_desc *sd = &q->sdesc[q->pidx];
491
492 gfp |= __GFP_NOWARN; /* failures are expected */
493
494#if FL_PG_ORDER > 0
495 /*
496 * Prefer large buffers
497 */
498 while (n) {
499 pg = alloc_pages(gfp | __GFP_COMP, FL_PG_ORDER);
500 if (unlikely(!pg)) {
501 q->large_alloc_failed++;
502 break; /* fall back to single pages */
503 }
504
505 mapping = dma_map_page(adap->pdev_dev, pg, 0,
506 PAGE_SIZE << FL_PG_ORDER,
507 PCI_DMA_FROMDEVICE);
508 if (unlikely(dma_mapping_error(adap->pdev_dev, mapping))) {
509 __free_pages(pg, FL_PG_ORDER);
510 goto out; /* do not try small pages for this error */
511 }
512 mapping |= RX_LARGE_BUF;
513 *d++ = cpu_to_be64(mapping);
514
515 set_rx_sw_desc(sd, pg, mapping);
516 sd++;
517
518 q->avail++;
519 if (++q->pidx == q->size) {
520 q->pidx = 0;
521 sd = q->sdesc;
522 d = q->desc;
523 }
524 n--;
525 }
526#endif
527
528 while (n--) {
529 pg = __netdev_alloc_page(adap->port[0], gfp);
530 if (unlikely(!pg)) {
531 q->alloc_failed++;
532 break;
533 }
534
535 mapping = dma_map_page(adap->pdev_dev, pg, 0, PAGE_SIZE,
536 PCI_DMA_FROMDEVICE);
537 if (unlikely(dma_mapping_error(adap->pdev_dev, mapping))) {
538 netdev_free_page(adap->port[0], pg);
539 goto out;
540 }
541 *d++ = cpu_to_be64(mapping);
542
543 set_rx_sw_desc(sd, pg, mapping);
544 sd++;
545
546 q->avail++;
547 if (++q->pidx == q->size) {
548 q->pidx = 0;
549 sd = q->sdesc;
550 d = q->desc;
551 }
552 }
553
554out: cred = q->avail - cred;
555 q->pend_cred += cred;
556 ring_fl_db(adap, q);
557
558 if (unlikely(fl_starving(q))) {
559 smp_wmb();
560 set_bit(q->cntxt_id, adap->sge.starving_fl);
561 }
562
563 return cred;
564}
565
566static inline void __refill_fl(struct adapter *adap, struct sge_fl *fl)
567{
568 refill_fl(adap, fl, min(MAX_RX_REFILL, fl_cap(fl) - fl->avail),
569 GFP_ATOMIC);
570}
571
572/**
573 * alloc_ring - allocate resources for an SGE descriptor ring
574 * @dev: the PCI device's core device
575 * @nelem: the number of descriptors
576 * @elem_size: the size of each descriptor
577 * @sw_size: the size of the SW state associated with each ring element
578 * @phys: the physical address of the allocated ring
579 * @metadata: address of the array holding the SW state for the ring
580 * @stat_size: extra space in HW ring for status information
581 *
582 * Allocates resources for an SGE descriptor ring, such as Tx queues,
583 * free buffer lists, or response queues. Each SGE ring requires
584 * space for its HW descriptors plus, optionally, space for the SW state
585 * associated with each HW entry (the metadata). The function returns
586 * three values: the virtual address for the HW ring (the return value
587 * of the function), the bus address of the HW ring, and the address
588 * of the SW ring.
589 */
590static void *alloc_ring(struct device *dev, size_t nelem, size_t elem_size,
591 size_t sw_size, dma_addr_t *phys, void *metadata,
592 size_t stat_size)
593{
594 size_t len = nelem * elem_size + stat_size;
595 void *s = NULL;
596 void *p = dma_alloc_coherent(dev, len, phys, GFP_KERNEL);
597
598 if (!p)
599 return NULL;
600 if (sw_size) {
601 s = kcalloc(nelem, sw_size, GFP_KERNEL);
602
603 if (!s) {
604 dma_free_coherent(dev, len, p, *phys);
605 return NULL;
606 }
607 }
608 if (metadata)
609 *(void **)metadata = s;
610 memset(p, 0, len);
611 return p;
612}
613
614/**
615 * sgl_len - calculates the size of an SGL of the given capacity
616 * @n: the number of SGL entries
617 *
618 * Calculates the number of flits needed for a scatter/gather list that
619 * can hold the given number of entries.
620 */
621static inline unsigned int sgl_len(unsigned int n)
622{
623 n--;
624 return (3 * n) / 2 + (n & 1) + 2;
625}
626
627/**
628 * flits_to_desc - returns the num of Tx descriptors for the given flits
629 * @n: the number of flits
630 *
631 * Returns the number of Tx descriptors needed for the supplied number
632 * of flits.
633 */
634static inline unsigned int flits_to_desc(unsigned int n)
635{
636 BUG_ON(n > SGE_MAX_WR_LEN / 8);
637 return DIV_ROUND_UP(n, 8);
638}
639
640/**
641 * is_eth_imm - can an Ethernet packet be sent as immediate data?
642 * @skb: the packet
643 *
644 * Returns whether an Ethernet packet is small enough to fit as
645 * immediate data.
646 */
647static inline int is_eth_imm(const struct sk_buff *skb)
648{
649 return skb->len <= MAX_IMM_TX_PKT_LEN - sizeof(struct cpl_tx_pkt);
650}
651
652/**
653 * calc_tx_flits - calculate the number of flits for a packet Tx WR
654 * @skb: the packet
655 *
656 * Returns the number of flits needed for a Tx WR for the given Ethernet
657 * packet, including the needed WR and CPL headers.
658 */
659static inline unsigned int calc_tx_flits(const struct sk_buff *skb)
660{
661 unsigned int flits;
662
663 if (is_eth_imm(skb))
664 return DIV_ROUND_UP(skb->len + sizeof(struct cpl_tx_pkt), 8);
665
666 flits = sgl_len(skb_shinfo(skb)->nr_frags + 1) + 4;
667 if (skb_shinfo(skb)->gso_size)
668 flits += 2;
669 return flits;
670}
671
672/**
673 * calc_tx_descs - calculate the number of Tx descriptors for a packet
674 * @skb: the packet
675 *
676 * Returns the number of Tx descriptors needed for the given Ethernet
677 * packet, including the needed WR and CPL headers.
678 */
679static inline unsigned int calc_tx_descs(const struct sk_buff *skb)
680{
681 return flits_to_desc(calc_tx_flits(skb));
682}
683
684/**
685 * write_sgl - populate a scatter/gather list for a packet
686 * @skb: the packet
687 * @q: the Tx queue we are writing into
688 * @sgl: starting location for writing the SGL
689 * @end: points right after the end of the SGL
690 * @start: start offset into skb main-body data to include in the SGL
691 * @addr: the list of bus addresses for the SGL elements
692 *
693 * Generates a gather list for the buffers that make up a packet.
694 * The caller must provide adequate space for the SGL that will be written.
695 * The SGL includes all of the packet's page fragments and the data in its
696 * main body except for the first @start bytes. @sgl must be 16-byte
697 * aligned and within a Tx descriptor with available space. @end points
698 * right after the end of the SGL but does not account for any potential
699 * wrap around, i.e., @end > @sgl.
700 */
701static void write_sgl(const struct sk_buff *skb, struct sge_txq *q,
702 struct ulptx_sgl *sgl, u64 *end, unsigned int start,
703 const dma_addr_t *addr)
704{
705 unsigned int i, len;
706 struct ulptx_sge_pair *to;
707 const struct skb_shared_info *si = skb_shinfo(skb);
708 unsigned int nfrags = si->nr_frags;
709 struct ulptx_sge_pair buf[MAX_SKB_FRAGS / 2 + 1];
710
711 len = skb_headlen(skb) - start;
712 if (likely(len)) {
713 sgl->len0 = htonl(len);
714 sgl->addr0 = cpu_to_be64(addr[0] + start);
715 nfrags++;
716 } else {
717 sgl->len0 = htonl(si->frags[0].size);
718 sgl->addr0 = cpu_to_be64(addr[1]);
719 }
720
721 sgl->cmd_nsge = htonl(ULPTX_CMD(ULP_TX_SC_DSGL) | ULPTX_NSGE(nfrags));
722 if (likely(--nfrags == 0))
723 return;
724 /*
725 * Most of the complexity below deals with the possibility we hit the
726 * end of the queue in the middle of writing the SGL. For this case
727 * only we create the SGL in a temporary buffer and then copy it.
728 */
729 to = (u8 *)end > (u8 *)q->stat ? buf : sgl->sge;
730
731 for (i = (nfrags != si->nr_frags); nfrags >= 2; nfrags -= 2, to++) {
732 to->len[0] = cpu_to_be32(si->frags[i].size);
733 to->len[1] = cpu_to_be32(si->frags[++i].size);
734 to->addr[0] = cpu_to_be64(addr[i]);
735 to->addr[1] = cpu_to_be64(addr[++i]);
736 }
737 if (nfrags) {
738 to->len[0] = cpu_to_be32(si->frags[i].size);
739 to->len[1] = cpu_to_be32(0);
740 to->addr[0] = cpu_to_be64(addr[i + 1]);
741 }
742 if (unlikely((u8 *)end > (u8 *)q->stat)) {
743 unsigned int part0 = (u8 *)q->stat - (u8 *)sgl->sge, part1;
744
745 if (likely(part0))
746 memcpy(sgl->sge, buf, part0);
747 part1 = (u8 *)end - (u8 *)q->stat;
748 memcpy(q->desc, (u8 *)buf + part0, part1);
749 end = (void *)q->desc + part1;
750 }
751 if ((uintptr_t)end & 8) /* 0-pad to multiple of 16 */
752 *(u64 *)end = 0;
753}
754
755/**
756 * ring_tx_db - check and potentially ring a Tx queue's doorbell
757 * @adap: the adapter
758 * @q: the Tx queue
759 * @n: number of new descriptors to give to HW
760 *
761 * Ring the doorbel for a Tx queue.
762 */
763static inline void ring_tx_db(struct adapter *adap, struct sge_txq *q, int n)
764{
765 wmb(); /* write descriptors before telling HW */
766 t4_write_reg(adap, MYPF_REG(SGE_PF_KDOORBELL),
767 QID(q->cntxt_id) | PIDX(n));
768}
769
770/**
771 * inline_tx_skb - inline a packet's data into Tx descriptors
772 * @skb: the packet
773 * @q: the Tx queue where the packet will be inlined
774 * @pos: starting position in the Tx queue where to inline the packet
775 *
776 * Inline a packet's contents directly into Tx descriptors, starting at
777 * the given position within the Tx DMA ring.
778 * Most of the complexity of this operation is dealing with wrap arounds
779 * in the middle of the packet we want to inline.
780 */
781static void inline_tx_skb(const struct sk_buff *skb, const struct sge_txq *q,
782 void *pos)
783{
784 u64 *p;
785 int left = (void *)q->stat - pos;
786
787 if (likely(skb->len <= left)) {
788 if (likely(!skb->data_len))
789 skb_copy_from_linear_data(skb, pos, skb->len);
790 else
791 skb_copy_bits(skb, 0, pos, skb->len);
792 pos += skb->len;
793 } else {
794 skb_copy_bits(skb, 0, pos, left);
795 skb_copy_bits(skb, left, q->desc, skb->len - left);
796 pos = (void *)q->desc + (skb->len - left);
797 }
798
799 /* 0-pad to multiple of 16 */
800 p = PTR_ALIGN(pos, 8);
801 if ((uintptr_t)p & 8)
802 *p = 0;
803}
804
805/*
806 * Figure out what HW csum a packet wants and return the appropriate control
807 * bits.
808 */
809static u64 hwcsum(const struct sk_buff *skb)
810{
811 int csum_type;
812 const struct iphdr *iph = ip_hdr(skb);
813
814 if (iph->version == 4) {
815 if (iph->protocol == IPPROTO_TCP)
816 csum_type = TX_CSUM_TCPIP;
817 else if (iph->protocol == IPPROTO_UDP)
818 csum_type = TX_CSUM_UDPIP;
819 else {
820nocsum: /*
821 * unknown protocol, disable HW csum
822 * and hope a bad packet is detected
823 */
824 return TXPKT_L4CSUM_DIS;
825 }
826 } else {
827 /*
828 * this doesn't work with extension headers
829 */
830 const struct ipv6hdr *ip6h = (const struct ipv6hdr *)iph;
831
832 if (ip6h->nexthdr == IPPROTO_TCP)
833 csum_type = TX_CSUM_TCPIP6;
834 else if (ip6h->nexthdr == IPPROTO_UDP)
835 csum_type = TX_CSUM_UDPIP6;
836 else
837 goto nocsum;
838 }
839
840 if (likely(csum_type >= TX_CSUM_TCPIP))
841 return TXPKT_CSUM_TYPE(csum_type) |
842 TXPKT_IPHDR_LEN(skb_network_header_len(skb)) |
843 TXPKT_ETHHDR_LEN(skb_network_offset(skb) - ETH_HLEN);
844 else {
845 int start = skb_transport_offset(skb);
846
847 return TXPKT_CSUM_TYPE(csum_type) | TXPKT_CSUM_START(start) |
848 TXPKT_CSUM_LOC(start + skb->csum_offset);
849 }
850}
851
852static void eth_txq_stop(struct sge_eth_txq *q)
853{
854 netif_tx_stop_queue(q->txq);
855 q->q.stops++;
856}
857
858static inline void txq_advance(struct sge_txq *q, unsigned int n)
859{
860 q->in_use += n;
861 q->pidx += n;
862 if (q->pidx >= q->size)
863 q->pidx -= q->size;
864}
865
866/**
867 * t4_eth_xmit - add a packet to an Ethernet Tx queue
868 * @skb: the packet
869 * @dev: the egress net device
870 *
871 * Add a packet to an SGE Ethernet Tx queue. Runs with softirqs disabled.
872 */
873netdev_tx_t t4_eth_xmit(struct sk_buff *skb, struct net_device *dev)
874{
875 u32 wr_mid;
876 u64 cntrl, *end;
877 int qidx, credits;
878 unsigned int flits, ndesc;
879 struct adapter *adap;
880 struct sge_eth_txq *q;
881 const struct port_info *pi;
882 struct fw_eth_tx_pkt_wr *wr;
883 struct cpl_tx_pkt_core *cpl;
884 const struct skb_shared_info *ssi;
885 dma_addr_t addr[MAX_SKB_FRAGS + 1];
886
887 /*
888 * The chip min packet length is 10 octets but play safe and reject
889 * anything shorter than an Ethernet header.
890 */
891 if (unlikely(skb->len < ETH_HLEN)) {
892out_free: dev_kfree_skb(skb);
893 return NETDEV_TX_OK;
894 }
895
896 pi = netdev_priv(dev);
897 adap = pi->adapter;
898 qidx = skb_get_queue_mapping(skb);
899 q = &adap->sge.ethtxq[qidx + pi->first_qset];
900
901 reclaim_completed_tx(adap, &q->q, true);
902
903 flits = calc_tx_flits(skb);
904 ndesc = flits_to_desc(flits);
905 credits = txq_avail(&q->q) - ndesc;
906
907 if (unlikely(credits < 0)) {
908 eth_txq_stop(q);
909 dev_err(adap->pdev_dev,
910 "%s: Tx ring %u full while queue awake!\n",
911 dev->name, qidx);
912 return NETDEV_TX_BUSY;
913 }
914
915 if (!is_eth_imm(skb) &&
916 unlikely(map_skb(adap->pdev_dev, skb, addr) < 0)) {
917 q->mapping_err++;
918 goto out_free;
919 }
920
921 wr_mid = FW_WR_LEN16(DIV_ROUND_UP(flits, 2));
922 if (unlikely(credits < ETHTXQ_STOP_THRES)) {
923 eth_txq_stop(q);
924 wr_mid |= FW_WR_EQUEQ | FW_WR_EQUIQ;
925 }
926
927 wr = (void *)&q->q.desc[q->q.pidx];
928 wr->equiq_to_len16 = htonl(wr_mid);
929 wr->r3 = cpu_to_be64(0);
930 end = (u64 *)wr + flits;
931
932 ssi = skb_shinfo(skb);
933 if (ssi->gso_size) {
934 struct cpl_tx_pkt_lso *lso = (void *)wr;
935 bool v6 = (ssi->gso_type & SKB_GSO_TCPV6) != 0;
936 int l3hdr_len = skb_network_header_len(skb);
937 int eth_xtra_len = skb_network_offset(skb) - ETH_HLEN;
938
939 wr->op_immdlen = htonl(FW_WR_OP(FW_ETH_TX_PKT_WR) |
940 FW_WR_IMMDLEN(sizeof(*lso)));
941 lso->lso_ctrl = htonl(LSO_OPCODE(CPL_TX_PKT_LSO) |
942 LSO_FIRST_SLICE | LSO_LAST_SLICE |
943 LSO_IPV6(v6) |
944 LSO_ETHHDR_LEN(eth_xtra_len / 4) |
945 LSO_IPHDR_LEN(l3hdr_len / 4) |
946 LSO_TCPHDR_LEN(tcp_hdr(skb)->doff));
947 lso->ipid_ofst = htons(0);
948 lso->mss = htons(ssi->gso_size);
949 lso->seqno_offset = htonl(0);
950 lso->len = htonl(skb->len);
951 cpl = (void *)(lso + 1);
952 cntrl = TXPKT_CSUM_TYPE(v6 ? TX_CSUM_TCPIP6 : TX_CSUM_TCPIP) |
953 TXPKT_IPHDR_LEN(l3hdr_len) |
954 TXPKT_ETHHDR_LEN(eth_xtra_len);
955 q->tso++;
956 q->tx_cso += ssi->gso_segs;
957 } else {
958 int len;
959
960 len = is_eth_imm(skb) ? skb->len + sizeof(*cpl) : sizeof(*cpl);
961 wr->op_immdlen = htonl(FW_WR_OP(FW_ETH_TX_PKT_WR) |
962 FW_WR_IMMDLEN(len));
963 cpl = (void *)(wr + 1);
964 if (skb->ip_summed == CHECKSUM_PARTIAL) {
965 cntrl = hwcsum(skb) | TXPKT_IPCSUM_DIS;
966 q->tx_cso++;
967 } else
968 cntrl = TXPKT_L4CSUM_DIS | TXPKT_IPCSUM_DIS;
969 }
970
971 if (vlan_tx_tag_present(skb)) {
972 q->vlan_ins++;
973 cntrl |= TXPKT_VLAN_VLD | TXPKT_VLAN(vlan_tx_tag_get(skb));
974 }
975
976 cpl->ctrl0 = htonl(TXPKT_OPCODE(CPL_TX_PKT_XT) |
977 TXPKT_INTF(pi->tx_chan) | TXPKT_PF(0));
978 cpl->pack = htons(0);
979 cpl->len = htons(skb->len);
980 cpl->ctrl1 = cpu_to_be64(cntrl);
981
982 if (is_eth_imm(skb)) {
983 inline_tx_skb(skb, &q->q, cpl + 1);
984 dev_kfree_skb(skb);
985 } else {
986 int last_desc;
987
988 write_sgl(skb, &q->q, (struct ulptx_sgl *)(cpl + 1), end, 0,
989 addr);
990 skb_orphan(skb);
991
992 last_desc = q->q.pidx + ndesc - 1;
993 if (last_desc >= q->q.size)
994 last_desc -= q->q.size;
995 q->q.sdesc[last_desc].skb = skb;
996 q->q.sdesc[last_desc].sgl = (struct ulptx_sgl *)(cpl + 1);
997 }
998
999 txq_advance(&q->q, ndesc);
1000
1001 ring_tx_db(adap, &q->q, ndesc);
1002 return NETDEV_TX_OK;
1003}
1004
1005/**
1006 * reclaim_completed_tx_imm - reclaim completed control-queue Tx descs
1007 * @q: the SGE control Tx queue
1008 *
1009 * This is a variant of reclaim_completed_tx() that is used for Tx queues
1010 * that send only immediate data (presently just the control queues) and
1011 * thus do not have any sk_buffs to release.
1012 */
1013static inline void reclaim_completed_tx_imm(struct sge_txq *q)
1014{
1015 int hw_cidx = ntohs(q->stat->cidx);
1016 int reclaim = hw_cidx - q->cidx;
1017
1018 if (reclaim < 0)
1019 reclaim += q->size;
1020
1021 q->in_use -= reclaim;
1022 q->cidx = hw_cidx;
1023}
1024
1025/**
1026 * is_imm - check whether a packet can be sent as immediate data
1027 * @skb: the packet
1028 *
1029 * Returns true if a packet can be sent as a WR with immediate data.
1030 */
1031static inline int is_imm(const struct sk_buff *skb)
1032{
1033 return skb->len <= MAX_CTRL_WR_LEN;
1034}
1035
1036/**
1037 * ctrlq_check_stop - check if a control queue is full and should stop
1038 * @q: the queue
1039 * @wr: most recent WR written to the queue
1040 *
1041 * Check if a control queue has become full and should be stopped.
1042 * We clean up control queue descriptors very lazily, only when we are out.
1043 * If the queue is still full after reclaiming any completed descriptors
1044 * we suspend it and have the last WR wake it up.
1045 */
1046static void ctrlq_check_stop(struct sge_ctrl_txq *q, struct fw_wr_hdr *wr)
1047{
1048 reclaim_completed_tx_imm(&q->q);
1049 if (unlikely(txq_avail(&q->q) < TXQ_STOP_THRES)) {
1050 wr->lo |= htonl(FW_WR_EQUEQ | FW_WR_EQUIQ);
1051 q->q.stops++;
1052 q->full = 1;
1053 }
1054}
1055
1056/**
1057 * ctrl_xmit - send a packet through an SGE control Tx queue
1058 * @q: the control queue
1059 * @skb: the packet
1060 *
1061 * Send a packet through an SGE control Tx queue. Packets sent through
1062 * a control queue must fit entirely as immediate data.
1063 */
1064static int ctrl_xmit(struct sge_ctrl_txq *q, struct sk_buff *skb)
1065{
1066 unsigned int ndesc;
1067 struct fw_wr_hdr *wr;
1068
1069 if (unlikely(!is_imm(skb))) {
1070 WARN_ON(1);
1071 dev_kfree_skb(skb);
1072 return NET_XMIT_DROP;
1073 }
1074
1075 ndesc = DIV_ROUND_UP(skb->len, sizeof(struct tx_desc));
1076 spin_lock(&q->sendq.lock);
1077
1078 if (unlikely(q->full)) {
1079 skb->priority = ndesc; /* save for restart */
1080 __skb_queue_tail(&q->sendq, skb);
1081 spin_unlock(&q->sendq.lock);
1082 return NET_XMIT_CN;
1083 }
1084
1085 wr = (struct fw_wr_hdr *)&q->q.desc[q->q.pidx];
1086 inline_tx_skb(skb, &q->q, wr);
1087
1088 txq_advance(&q->q, ndesc);
1089 if (unlikely(txq_avail(&q->q) < TXQ_STOP_THRES))
1090 ctrlq_check_stop(q, wr);
1091
1092 ring_tx_db(q->adap, &q->q, ndesc);
1093 spin_unlock(&q->sendq.lock);
1094
1095 kfree_skb(skb);
1096 return NET_XMIT_SUCCESS;
1097}
1098
1099/**
1100 * restart_ctrlq - restart a suspended control queue
1101 * @data: the control queue to restart
1102 *
1103 * Resumes transmission on a suspended Tx control queue.
1104 */
1105static void restart_ctrlq(unsigned long data)
1106{
1107 struct sk_buff *skb;
1108 unsigned int written = 0;
1109 struct sge_ctrl_txq *q = (struct sge_ctrl_txq *)data;
1110
1111 spin_lock(&q->sendq.lock);
1112 reclaim_completed_tx_imm(&q->q);
1113 BUG_ON(txq_avail(&q->q) < TXQ_STOP_THRES); /* q should be empty */
1114
1115 while ((skb = __skb_dequeue(&q->sendq)) != NULL) {
1116 struct fw_wr_hdr *wr;
1117 unsigned int ndesc = skb->priority; /* previously saved */
1118
1119 /*
1120 * Write descriptors and free skbs outside the lock to limit
1121 * wait times. q->full is still set so new skbs will be queued.
1122 */
1123 spin_unlock(&q->sendq.lock);
1124
1125 wr = (struct fw_wr_hdr *)&q->q.desc[q->q.pidx];
1126 inline_tx_skb(skb, &q->q, wr);
1127 kfree_skb(skb);
1128
1129 written += ndesc;
1130 txq_advance(&q->q, ndesc);
1131 if (unlikely(txq_avail(&q->q) < TXQ_STOP_THRES)) {
1132 unsigned long old = q->q.stops;
1133
1134 ctrlq_check_stop(q, wr);
1135 if (q->q.stops != old) { /* suspended anew */
1136 spin_lock(&q->sendq.lock);
1137 goto ringdb;
1138 }
1139 }
1140 if (written > 16) {
1141 ring_tx_db(q->adap, &q->q, written);
1142 written = 0;
1143 }
1144 spin_lock(&q->sendq.lock);
1145 }
1146 q->full = 0;
1147ringdb: if (written)
1148 ring_tx_db(q->adap, &q->q, written);
1149 spin_unlock(&q->sendq.lock);
1150}
1151
1152/**
1153 * t4_mgmt_tx - send a management message
1154 * @adap: the adapter
1155 * @skb: the packet containing the management message
1156 *
1157 * Send a management message through control queue 0.
1158 */
1159int t4_mgmt_tx(struct adapter *adap, struct sk_buff *skb)
1160{
1161 int ret;
1162
1163 local_bh_disable();
1164 ret = ctrl_xmit(&adap->sge.ctrlq[0], skb);
1165 local_bh_enable();
1166 return ret;
1167}
1168
1169/**
1170 * is_ofld_imm - check whether a packet can be sent as immediate data
1171 * @skb: the packet
1172 *
1173 * Returns true if a packet can be sent as an offload WR with immediate
1174 * data. We currently use the same limit as for Ethernet packets.
1175 */
1176static inline int is_ofld_imm(const struct sk_buff *skb)
1177{
1178 return skb->len <= MAX_IMM_TX_PKT_LEN;
1179}
1180
1181/**
1182 * calc_tx_flits_ofld - calculate # of flits for an offload packet
1183 * @skb: the packet
1184 *
1185 * Returns the number of flits needed for the given offload packet.
1186 * These packets are already fully constructed and no additional headers
1187 * will be added.
1188 */
1189static inline unsigned int calc_tx_flits_ofld(const struct sk_buff *skb)
1190{
1191 unsigned int flits, cnt;
1192
1193 if (is_ofld_imm(skb))
1194 return DIV_ROUND_UP(skb->len, 8);
1195
1196 flits = skb_transport_offset(skb) / 8U; /* headers */
1197 cnt = skb_shinfo(skb)->nr_frags;
1198 if (skb->tail != skb->transport_header)
1199 cnt++;
1200 return flits + sgl_len(cnt);
1201}
1202
1203/**
1204 * txq_stop_maperr - stop a Tx queue due to I/O MMU exhaustion
1205 * @adap: the adapter
1206 * @q: the queue to stop
1207 *
1208 * Mark a Tx queue stopped due to I/O MMU exhaustion and resulting
1209 * inability to map packets. A periodic timer attempts to restart
1210 * queues so marked.
1211 */
1212static void txq_stop_maperr(struct sge_ofld_txq *q)
1213{
1214 q->mapping_err++;
1215 q->q.stops++;
1216 set_bit(q->q.cntxt_id, q->adap->sge.txq_maperr);
1217}
1218
1219/**
1220 * ofldtxq_stop - stop an offload Tx queue that has become full
1221 * @q: the queue to stop
1222 * @skb: the packet causing the queue to become full
1223 *
1224 * Stops an offload Tx queue that has become full and modifies the packet
1225 * being written to request a wakeup.
1226 */
1227static void ofldtxq_stop(struct sge_ofld_txq *q, struct sk_buff *skb)
1228{
1229 struct fw_wr_hdr *wr = (struct fw_wr_hdr *)skb->data;
1230
1231 wr->lo |= htonl(FW_WR_EQUEQ | FW_WR_EQUIQ);
1232 q->q.stops++;
1233 q->full = 1;
1234}
1235
1236/**
1237 * service_ofldq - restart a suspended offload queue
1238 * @q: the offload queue
1239 *
1240 * Services an offload Tx queue by moving packets from its packet queue
1241 * to the HW Tx ring. The function starts and ends with the queue locked.
1242 */
1243static void service_ofldq(struct sge_ofld_txq *q)
1244{
1245 u64 *pos;
1246 int credits;
1247 struct sk_buff *skb;
1248 unsigned int written = 0;
1249 unsigned int flits, ndesc;
1250
1251 while ((skb = skb_peek(&q->sendq)) != NULL && !q->full) {
1252 /*
1253 * We drop the lock but leave skb on sendq, thus retaining
1254 * exclusive access to the state of the queue.
1255 */
1256 spin_unlock(&q->sendq.lock);
1257
1258 reclaim_completed_tx(q->adap, &q->q, false);
1259
1260 flits = skb->priority; /* previously saved */
1261 ndesc = flits_to_desc(flits);
1262 credits = txq_avail(&q->q) - ndesc;
1263 BUG_ON(credits < 0);
1264 if (unlikely(credits < TXQ_STOP_THRES))
1265 ofldtxq_stop(q, skb);
1266
1267 pos = (u64 *)&q->q.desc[q->q.pidx];
1268 if (is_ofld_imm(skb))
1269 inline_tx_skb(skb, &q->q, pos);
1270 else if (map_skb(q->adap->pdev_dev, skb,
1271 (dma_addr_t *)skb->head)) {
1272 txq_stop_maperr(q);
1273 spin_lock(&q->sendq.lock);
1274 break;
1275 } else {
1276 int last_desc, hdr_len = skb_transport_offset(skb);
1277
1278 memcpy(pos, skb->data, hdr_len);
1279 write_sgl(skb, &q->q, (void *)pos + hdr_len,
1280 pos + flits, hdr_len,
1281 (dma_addr_t *)skb->head);
1282#ifdef CONFIG_NEED_DMA_MAP_STATE
1283 skb->dev = q->adap->port[0];
1284 skb->destructor = deferred_unmap_destructor;
1285#endif
1286 last_desc = q->q.pidx + ndesc - 1;
1287 if (last_desc >= q->q.size)
1288 last_desc -= q->q.size;
1289 q->q.sdesc[last_desc].skb = skb;
1290 }
1291
1292 txq_advance(&q->q, ndesc);
1293 written += ndesc;
1294 if (unlikely(written > 32)) {
1295 ring_tx_db(q->adap, &q->q, written);
1296 written = 0;
1297 }
1298
1299 spin_lock(&q->sendq.lock);
1300 __skb_unlink(skb, &q->sendq);
1301 if (is_ofld_imm(skb))
1302 kfree_skb(skb);
1303 }
1304 if (likely(written))
1305 ring_tx_db(q->adap, &q->q, written);
1306}
1307
1308/**
1309 * ofld_xmit - send a packet through an offload queue
1310 * @q: the Tx offload queue
1311 * @skb: the packet
1312 *
1313 * Send an offload packet through an SGE offload queue.
1314 */
1315static int ofld_xmit(struct sge_ofld_txq *q, struct sk_buff *skb)
1316{
1317 skb->priority = calc_tx_flits_ofld(skb); /* save for restart */
1318 spin_lock(&q->sendq.lock);
1319 __skb_queue_tail(&q->sendq, skb);
1320 if (q->sendq.qlen == 1)
1321 service_ofldq(q);
1322 spin_unlock(&q->sendq.lock);
1323 return NET_XMIT_SUCCESS;
1324}
1325
1326/**
1327 * restart_ofldq - restart a suspended offload queue
1328 * @data: the offload queue to restart
1329 *
1330 * Resumes transmission on a suspended Tx offload queue.
1331 */
1332static void restart_ofldq(unsigned long data)
1333{
1334 struct sge_ofld_txq *q = (struct sge_ofld_txq *)data;
1335
1336 spin_lock(&q->sendq.lock);
1337 q->full = 0; /* the queue actually is completely empty now */
1338 service_ofldq(q);
1339 spin_unlock(&q->sendq.lock);
1340}
1341
1342/**
1343 * skb_txq - return the Tx queue an offload packet should use
1344 * @skb: the packet
1345 *
1346 * Returns the Tx queue an offload packet should use as indicated by bits
1347 * 1-15 in the packet's queue_mapping.
1348 */
1349static inline unsigned int skb_txq(const struct sk_buff *skb)
1350{
1351 return skb->queue_mapping >> 1;
1352}
1353
1354/**
1355 * is_ctrl_pkt - return whether an offload packet is a control packet
1356 * @skb: the packet
1357 *
1358 * Returns whether an offload packet should use an OFLD or a CTRL
1359 * Tx queue as indicated by bit 0 in the packet's queue_mapping.
1360 */
1361static inline unsigned int is_ctrl_pkt(const struct sk_buff *skb)
1362{
1363 return skb->queue_mapping & 1;
1364}
1365
1366static inline int ofld_send(struct adapter *adap, struct sk_buff *skb)
1367{
1368 unsigned int idx = skb_txq(skb);
1369
1370 if (unlikely(is_ctrl_pkt(skb)))
1371 return ctrl_xmit(&adap->sge.ctrlq[idx], skb);
1372 return ofld_xmit(&adap->sge.ofldtxq[idx], skb);
1373}
1374
1375/**
1376 * t4_ofld_send - send an offload packet
1377 * @adap: the adapter
1378 * @skb: the packet
1379 *
1380 * Sends an offload packet. We use the packet queue_mapping to select the
1381 * appropriate Tx queue as follows: bit 0 indicates whether the packet
1382 * should be sent as regular or control, bits 1-15 select the queue.
1383 */
1384int t4_ofld_send(struct adapter *adap, struct sk_buff *skb)
1385{
1386 int ret;
1387
1388 local_bh_disable();
1389 ret = ofld_send(adap, skb);
1390 local_bh_enable();
1391 return ret;
1392}
1393
1394/**
1395 * cxgb4_ofld_send - send an offload packet
1396 * @dev: the net device
1397 * @skb: the packet
1398 *
1399 * Sends an offload packet. This is an exported version of @t4_ofld_send,
1400 * intended for ULDs.
1401 */
1402int cxgb4_ofld_send(struct net_device *dev, struct sk_buff *skb)
1403{
1404 return t4_ofld_send(netdev2adap(dev), skb);
1405}
1406EXPORT_SYMBOL(cxgb4_ofld_send);
1407
1408static inline void copy_frags(struct skb_shared_info *ssi,
1409 const struct pkt_gl *gl, unsigned int offset)
1410{
1411 unsigned int n;
1412
1413 /* usually there's just one frag */
1414 ssi->frags[0].page = gl->frags[0].page;
1415 ssi->frags[0].page_offset = gl->frags[0].page_offset + offset;
1416 ssi->frags[0].size = gl->frags[0].size - offset;
1417 ssi->nr_frags = gl->nfrags;
1418 n = gl->nfrags - 1;
1419 if (n)
1420 memcpy(&ssi->frags[1], &gl->frags[1], n * sizeof(skb_frag_t));
1421
1422 /* get a reference to the last page, we don't own it */
1423 get_page(gl->frags[n].page);
1424}
1425
1426/**
1427 * cxgb4_pktgl_to_skb - build an sk_buff from a packet gather list
1428 * @gl: the gather list
1429 * @skb_len: size of sk_buff main body if it carries fragments
1430 * @pull_len: amount of data to move to the sk_buff's main body
1431 *
1432 * Builds an sk_buff from the given packet gather list. Returns the
1433 * sk_buff or %NULL if sk_buff allocation failed.
1434 */
1435struct sk_buff *cxgb4_pktgl_to_skb(const struct pkt_gl *gl,
1436 unsigned int skb_len, unsigned int pull_len)
1437{
1438 struct sk_buff *skb;
1439
1440 /*
1441 * Below we rely on RX_COPY_THRES being less than the smallest Rx buffer
1442 * size, which is expected since buffers are at least PAGE_SIZEd.
1443 * In this case packets up to RX_COPY_THRES have only one fragment.
1444 */
1445 if (gl->tot_len <= RX_COPY_THRES) {
1446 skb = dev_alloc_skb(gl->tot_len);
1447 if (unlikely(!skb))
1448 goto out;
1449 __skb_put(skb, gl->tot_len);
1450 skb_copy_to_linear_data(skb, gl->va, gl->tot_len);
1451 } else {
1452 skb = dev_alloc_skb(skb_len);
1453 if (unlikely(!skb))
1454 goto out;
1455 __skb_put(skb, pull_len);
1456 skb_copy_to_linear_data(skb, gl->va, pull_len);
1457
1458 copy_frags(skb_shinfo(skb), gl, pull_len);
1459 skb->len = gl->tot_len;
1460 skb->data_len = skb->len - pull_len;
1461 skb->truesize += skb->data_len;
1462 }
1463out: return skb;
1464}
1465EXPORT_SYMBOL(cxgb4_pktgl_to_skb);
1466
1467/**
1468 * t4_pktgl_free - free a packet gather list
1469 * @gl: the gather list
1470 *
1471 * Releases the pages of a packet gather list. We do not own the last
1472 * page on the list and do not free it.
1473 */
1474void t4_pktgl_free(const struct pkt_gl *gl)
1475{
1476 int n;
1477 const skb_frag_t *p;
1478
1479 for (p = gl->frags, n = gl->nfrags - 1; n--; p++)
1480 put_page(p->page);
1481}
1482
1483/*
1484 * Process an MPS trace packet. Give it an unused protocol number so it won't
1485 * be delivered to anyone and send it to the stack for capture.
1486 */
1487static noinline int handle_trace_pkt(struct adapter *adap,
1488 const struct pkt_gl *gl)
1489{
1490 struct sk_buff *skb;
1491 struct cpl_trace_pkt *p;
1492
1493 skb = cxgb4_pktgl_to_skb(gl, RX_PULL_LEN, RX_PULL_LEN);
1494 if (unlikely(!skb)) {
1495 t4_pktgl_free(gl);
1496 return 0;
1497 }
1498
1499 p = (struct cpl_trace_pkt *)skb->data;
1500 __skb_pull(skb, sizeof(*p));
1501 skb_reset_mac_header(skb);
1502 skb->protocol = htons(0xffff);
1503 skb->dev = adap->port[0];
1504 netif_receive_skb(skb);
1505 return 0;
1506}
1507
1508static void do_gro(struct sge_eth_rxq *rxq, const struct pkt_gl *gl,
1509 const struct cpl_rx_pkt *pkt)
1510{
1511 int ret;
1512 struct sk_buff *skb;
1513
1514 skb = napi_get_frags(&rxq->rspq.napi);
1515 if (unlikely(!skb)) {
1516 t4_pktgl_free(gl);
1517 rxq->stats.rx_drops++;
1518 return;
1519 }
1520
1521 copy_frags(skb_shinfo(skb), gl, RX_PKT_PAD);
1522 skb->len = gl->tot_len - RX_PKT_PAD;
1523 skb->data_len = skb->len;
1524 skb->truesize += skb->data_len;
1525 skb->ip_summed = CHECKSUM_UNNECESSARY;
1526 skb_record_rx_queue(skb, rxq->rspq.idx);
1527
1528 if (unlikely(pkt->vlan_ex)) {
1529 struct port_info *pi = netdev_priv(rxq->rspq.netdev);
1530 struct vlan_group *grp = pi->vlan_grp;
1531
1532 rxq->stats.vlan_ex++;
1533 if (likely(grp)) {
1534 ret = vlan_gro_frags(&rxq->rspq.napi, grp,
1535 ntohs(pkt->vlan));
1536 goto stats;
1537 }
1538 }
1539 ret = napi_gro_frags(&rxq->rspq.napi);
1540stats: if (ret == GRO_HELD)
1541 rxq->stats.lro_pkts++;
1542 else if (ret == GRO_MERGED || ret == GRO_MERGED_FREE)
1543 rxq->stats.lro_merged++;
1544 rxq->stats.pkts++;
1545 rxq->stats.rx_cso++;
1546}
1547
1548/**
1549 * t4_ethrx_handler - process an ingress ethernet packet
1550 * @q: the response queue that received the packet
1551 * @rsp: the response queue descriptor holding the RX_PKT message
1552 * @si: the gather list of packet fragments
1553 *
1554 * Process an ingress ethernet packet and deliver it to the stack.
1555 */
1556int t4_ethrx_handler(struct sge_rspq *q, const __be64 *rsp,
1557 const struct pkt_gl *si)
1558{
1559 bool csum_ok;
1560 struct sk_buff *skb;
1561 struct port_info *pi;
1562 const struct cpl_rx_pkt *pkt;
1563 struct sge_eth_rxq *rxq = container_of(q, struct sge_eth_rxq, rspq);
1564
1565 if (unlikely(*(u8 *)rsp == CPL_TRACE_PKT))
1566 return handle_trace_pkt(q->adap, si);
1567
1568 pkt = (void *)&rsp[1];
1569 csum_ok = pkt->csum_calc && !pkt->err_vec;
1570 if ((pkt->l2info & htonl(RXF_TCP)) &&
1571 (q->netdev->features & NETIF_F_GRO) && csum_ok && !pkt->ip_frag) {
1572 do_gro(rxq, si, pkt);
1573 return 0;
1574 }
1575
1576 skb = cxgb4_pktgl_to_skb(si, RX_PKT_SKB_LEN, RX_PULL_LEN);
1577 if (unlikely(!skb)) {
1578 t4_pktgl_free(si);
1579 rxq->stats.rx_drops++;
1580 return 0;
1581 }
1582
1583 __skb_pull(skb, RX_PKT_PAD); /* remove ethernet header padding */
1584 skb->protocol = eth_type_trans(skb, q->netdev);
1585 skb_record_rx_queue(skb, q->idx);
1586 pi = netdev_priv(skb->dev);
1587 rxq->stats.pkts++;
1588
1589 if (csum_ok && (pi->rx_offload & RX_CSO) &&
1590 (pkt->l2info & htonl(RXF_UDP | RXF_TCP))) {
1591 if (!pkt->ip_frag)
1592 skb->ip_summed = CHECKSUM_UNNECESSARY;
1593 else {
1594 __sum16 c = (__force __sum16)pkt->csum;
1595 skb->csum = csum_unfold(c);
1596 skb->ip_summed = CHECKSUM_COMPLETE;
1597 }
1598 rxq->stats.rx_cso++;
1599 } else
1600 skb->ip_summed = CHECKSUM_NONE;
1601
1602 if (unlikely(pkt->vlan_ex)) {
1603 struct vlan_group *grp = pi->vlan_grp;
1604
1605 rxq->stats.vlan_ex++;
1606 if (likely(grp))
1607 vlan_hwaccel_receive_skb(skb, grp, ntohs(pkt->vlan));
1608 else
1609 dev_kfree_skb_any(skb);
1610 } else
1611 netif_receive_skb(skb);
1612
1613 return 0;
1614}
1615
1616/**
1617 * restore_rx_bufs - put back a packet's Rx buffers
1618 * @si: the packet gather list
1619 * @q: the SGE free list
1620 * @frags: number of FL buffers to restore
1621 *
1622 * Puts back on an FL the Rx buffers associated with @si. The buffers
1623 * have already been unmapped and are left unmapped, we mark them so to
1624 * prevent further unmapping attempts.
1625 *
1626 * This function undoes a series of @unmap_rx_buf calls when we find out
1627 * that the current packet can't be processed right away afterall and we
1628 * need to come back to it later. This is a very rare event and there's
1629 * no effort to make this particularly efficient.
1630 */
1631static void restore_rx_bufs(const struct pkt_gl *si, struct sge_fl *q,
1632 int frags)
1633{
1634 struct rx_sw_desc *d;
1635
1636 while (frags--) {
1637 if (q->cidx == 0)
1638 q->cidx = q->size - 1;
1639 else
1640 q->cidx--;
1641 d = &q->sdesc[q->cidx];
1642 d->page = si->frags[frags].page;
1643 d->dma_addr |= RX_UNMAPPED_BUF;
1644 q->avail++;
1645 }
1646}
1647
1648/**
1649 * is_new_response - check if a response is newly written
1650 * @r: the response descriptor
1651 * @q: the response queue
1652 *
1653 * Returns true if a response descriptor contains a yet unprocessed
1654 * response.
1655 */
1656static inline bool is_new_response(const struct rsp_ctrl *r,
1657 const struct sge_rspq *q)
1658{
1659 return RSPD_GEN(r->type_gen) == q->gen;
1660}
1661
1662/**
1663 * rspq_next - advance to the next entry in a response queue
1664 * @q: the queue
1665 *
1666 * Updates the state of a response queue to advance it to the next entry.
1667 */
1668static inline void rspq_next(struct sge_rspq *q)
1669{
1670 q->cur_desc = (void *)q->cur_desc + q->iqe_len;
1671 if (unlikely(++q->cidx == q->size)) {
1672 q->cidx = 0;
1673 q->gen ^= 1;
1674 q->cur_desc = q->desc;
1675 }
1676}
1677
1678/**
1679 * process_responses - process responses from an SGE response queue
1680 * @q: the ingress queue to process
1681 * @budget: how many responses can be processed in this round
1682 *
1683 * Process responses from an SGE response queue up to the supplied budget.
1684 * Responses include received packets as well as control messages from FW
1685 * or HW.
1686 *
1687 * Additionally choose the interrupt holdoff time for the next interrupt
1688 * on this queue. If the system is under memory shortage use a fairly
1689 * long delay to help recovery.
1690 */
1691static int process_responses(struct sge_rspq *q, int budget)
1692{
1693 int ret, rsp_type;
1694 int budget_left = budget;
1695 const struct rsp_ctrl *rc;
1696 struct sge_eth_rxq *rxq = container_of(q, struct sge_eth_rxq, rspq);
1697
1698 while (likely(budget_left)) {
1699 rc = (void *)q->cur_desc + (q->iqe_len - sizeof(*rc));
1700 if (!is_new_response(rc, q))
1701 break;
1702
1703 rmb();
1704 rsp_type = RSPD_TYPE(rc->type_gen);
1705 if (likely(rsp_type == RSP_TYPE_FLBUF)) {
1706 skb_frag_t *fp;
1707 struct pkt_gl si;
1708 const struct rx_sw_desc *rsd;
1709 u32 len = ntohl(rc->pldbuflen_qid), bufsz, frags;
1710
1711 if (len & RSPD_NEWBUF) {
1712 if (likely(q->offset > 0)) {
1713 free_rx_bufs(q->adap, &rxq->fl, 1);
1714 q->offset = 0;
1715 }
1716 len &= RSPD_LEN;
1717 }
1718 si.tot_len = len;
1719
1720 /* gather packet fragments */
1721 for (frags = 0, fp = si.frags; ; frags++, fp++) {
1722 rsd = &rxq->fl.sdesc[rxq->fl.cidx];
1723 bufsz = get_buf_size(rsd);
1724 fp->page = rsd->page;
1725 fp->page_offset = q->offset;
1726 fp->size = min(bufsz, len);
1727 len -= fp->size;
1728 if (!len)
1729 break;
1730 unmap_rx_buf(q->adap, &rxq->fl);
1731 }
1732
1733 /*
1734 * Last buffer remains mapped so explicitly make it
1735 * coherent for CPU access.
1736 */
1737 dma_sync_single_for_cpu(q->adap->pdev_dev,
1738 get_buf_addr(rsd),
1739 fp->size, DMA_FROM_DEVICE);
1740
1741 si.va = page_address(si.frags[0].page) +
1742 si.frags[0].page_offset;
1743 prefetch(si.va);
1744
1745 si.nfrags = frags + 1;
1746 ret = q->handler(q, q->cur_desc, &si);
1747 if (likely(ret == 0))
1748 q->offset += ALIGN(fp->size, FL_ALIGN);
1749 else
1750 restore_rx_bufs(&si, &rxq->fl, frags);
1751 } else if (likely(rsp_type == RSP_TYPE_CPL)) {
1752 ret = q->handler(q, q->cur_desc, NULL);
1753 } else {
1754 ret = q->handler(q, (const __be64 *)rc, CXGB4_MSG_AN);
1755 }
1756
1757 if (unlikely(ret)) {
1758 /* couldn't process descriptor, back off for recovery */
1759 q->next_intr_params = QINTR_TIMER_IDX(NOMEM_TMR_IDX);
1760 break;
1761 }
1762
1763 rspq_next(q);
1764 budget_left--;
1765 }
1766
1767 if (q->offset >= 0 && rxq->fl.size - rxq->fl.avail >= 16)
1768 __refill_fl(q->adap, &rxq->fl);
1769 return budget - budget_left;
1770}
1771
1772/**
1773 * napi_rx_handler - the NAPI handler for Rx processing
1774 * @napi: the napi instance
1775 * @budget: how many packets we can process in this round
1776 *
1777 * Handler for new data events when using NAPI. This does not need any
1778 * locking or protection from interrupts as data interrupts are off at
1779 * this point and other adapter interrupts do not interfere (the latter
1780 * in not a concern at all with MSI-X as non-data interrupts then have
1781 * a separate handler).
1782 */
1783static int napi_rx_handler(struct napi_struct *napi, int budget)
1784{
1785 unsigned int params;
1786 struct sge_rspq *q = container_of(napi, struct sge_rspq, napi);
1787 int work_done = process_responses(q, budget);
1788
1789 if (likely(work_done < budget)) {
1790 napi_complete(napi);
1791 params = q->next_intr_params;
1792 q->next_intr_params = q->intr_params;
1793 } else
1794 params = QINTR_TIMER_IDX(7);
1795
1796 t4_write_reg(q->adap, MYPF_REG(SGE_PF_GTS), CIDXINC(work_done) |
1797 INGRESSQID((u32)q->cntxt_id) | SEINTARM(params));
1798 return work_done;
1799}
1800
1801/*
1802 * The MSI-X interrupt handler for an SGE response queue.
1803 */
1804irqreturn_t t4_sge_intr_msix(int irq, void *cookie)
1805{
1806 struct sge_rspq *q = cookie;
1807
1808 napi_schedule(&q->napi);
1809 return IRQ_HANDLED;
1810}
1811
1812/*
1813 * Process the indirect interrupt entries in the interrupt queue and kick off
1814 * NAPI for each queue that has generated an entry.
1815 */
1816static unsigned int process_intrq(struct adapter *adap)
1817{
1818 unsigned int credits;
1819 const struct rsp_ctrl *rc;
1820 struct sge_rspq *q = &adap->sge.intrq;
1821
1822 spin_lock(&adap->sge.intrq_lock);
1823 for (credits = 0; ; credits++) {
1824 rc = (void *)q->cur_desc + (q->iqe_len - sizeof(*rc));
1825 if (!is_new_response(rc, q))
1826 break;
1827
1828 rmb();
1829 if (RSPD_TYPE(rc->type_gen) == RSP_TYPE_INTR) {
1830 unsigned int qid = ntohl(rc->pldbuflen_qid);
1831
1832 napi_schedule(&adap->sge.ingr_map[qid]->napi);
1833 }
1834
1835 rspq_next(q);
1836 }
1837
1838 t4_write_reg(adap, MYPF_REG(SGE_PF_GTS), CIDXINC(credits) |
1839 INGRESSQID(q->cntxt_id) | SEINTARM(q->intr_params));
1840 spin_unlock(&adap->sge.intrq_lock);
1841 return credits;
1842}
1843
1844/*
1845 * The MSI interrupt handler, which handles data events from SGE response queues
1846 * as well as error and other async events as they all use the same MSI vector.
1847 */
1848static irqreturn_t t4_intr_msi(int irq, void *cookie)
1849{
1850 struct adapter *adap = cookie;
1851
1852 t4_slow_intr_handler(adap);
1853 process_intrq(adap);
1854 return IRQ_HANDLED;
1855}
1856
1857/*
1858 * Interrupt handler for legacy INTx interrupts.
1859 * Handles data events from SGE response queues as well as error and other
1860 * async events as they all use the same interrupt line.
1861 */
1862static irqreturn_t t4_intr_intx(int irq, void *cookie)
1863{
1864 struct adapter *adap = cookie;
1865
1866 t4_write_reg(adap, MYPF_REG(PCIE_PF_CLI), 0);
1867 if (t4_slow_intr_handler(adap) | process_intrq(adap))
1868 return IRQ_HANDLED;
1869 return IRQ_NONE; /* probably shared interrupt */
1870}
1871
1872/**
1873 * t4_intr_handler - select the top-level interrupt handler
1874 * @adap: the adapter
1875 *
1876 * Selects the top-level interrupt handler based on the type of interrupts
1877 * (MSI-X, MSI, or INTx).
1878 */
1879irq_handler_t t4_intr_handler(struct adapter *adap)
1880{
1881 if (adap->flags & USING_MSIX)
1882 return t4_sge_intr_msix;
1883 if (adap->flags & USING_MSI)
1884 return t4_intr_msi;
1885 return t4_intr_intx;
1886}
1887
1888static void sge_rx_timer_cb(unsigned long data)
1889{
1890 unsigned long m;
1891 unsigned int i, cnt[2];
1892 struct adapter *adap = (struct adapter *)data;
1893 struct sge *s = &adap->sge;
1894
1895 for (i = 0; i < ARRAY_SIZE(s->starving_fl); i++)
1896 for (m = s->starving_fl[i]; m; m &= m - 1) {
1897 struct sge_eth_rxq *rxq;
1898 unsigned int id = __ffs(m) + i * BITS_PER_LONG;
1899 struct sge_fl *fl = s->egr_map[id];
1900
1901 clear_bit(id, s->starving_fl);
1902 smp_mb__after_clear_bit();
1903
1904 if (fl_starving(fl)) {
1905 rxq = container_of(fl, struct sge_eth_rxq, fl);
1906 if (napi_reschedule(&rxq->rspq.napi))
1907 fl->starving++;
1908 else
1909 set_bit(id, s->starving_fl);
1910 }
1911 }
1912
1913 t4_write_reg(adap, SGE_DEBUG_INDEX, 13);
1914 cnt[0] = t4_read_reg(adap, SGE_DEBUG_DATA_HIGH);
1915 cnt[1] = t4_read_reg(adap, SGE_DEBUG_DATA_LOW);
1916
1917 for (i = 0; i < 2; i++)
1918 if (cnt[i] >= s->starve_thres) {
1919 if (s->idma_state[i] || cnt[i] == 0xffffffff)
1920 continue;
1921 s->idma_state[i] = 1;
1922 t4_write_reg(adap, SGE_DEBUG_INDEX, 11);
1923 m = t4_read_reg(adap, SGE_DEBUG_DATA_LOW) >> (i * 16);
1924 dev_warn(adap->pdev_dev,
1925 "SGE idma%u starvation detected for "
1926 "queue %lu\n", i, m & 0xffff);
1927 } else if (s->idma_state[i])
1928 s->idma_state[i] = 0;
1929
1930 mod_timer(&s->rx_timer, jiffies + RX_QCHECK_PERIOD);
1931}
1932
1933static void sge_tx_timer_cb(unsigned long data)
1934{
1935 unsigned long m;
1936 unsigned int i, budget;
1937 struct adapter *adap = (struct adapter *)data;
1938 struct sge *s = &adap->sge;
1939
1940 for (i = 0; i < ARRAY_SIZE(s->txq_maperr); i++)
1941 for (m = s->txq_maperr[i]; m; m &= m - 1) {
1942 unsigned long id = __ffs(m) + i * BITS_PER_LONG;
1943 struct sge_ofld_txq *txq = s->egr_map[id];
1944
1945 clear_bit(id, s->txq_maperr);
1946 tasklet_schedule(&txq->qresume_tsk);
1947 }
1948
1949 budget = MAX_TIMER_TX_RECLAIM;
1950 i = s->ethtxq_rover;
1951 do {
1952 struct sge_eth_txq *q = &s->ethtxq[i];
1953
1954 if (q->q.in_use &&
1955 time_after_eq(jiffies, q->txq->trans_start + HZ / 100) &&
1956 __netif_tx_trylock(q->txq)) {
1957 int avail = reclaimable(&q->q);
1958
1959 if (avail) {
1960 if (avail > budget)
1961 avail = budget;
1962
1963 free_tx_desc(adap, &q->q, avail, true);
1964 q->q.in_use -= avail;
1965 budget -= avail;
1966 }
1967 __netif_tx_unlock(q->txq);
1968 }
1969
1970 if (++i >= s->ethqsets)
1971 i = 0;
1972 } while (budget && i != s->ethtxq_rover);
1973 s->ethtxq_rover = i;
1974 mod_timer(&s->tx_timer, jiffies + (budget ? TX_QCHECK_PERIOD : 2));
1975}
1976
1977int t4_sge_alloc_rxq(struct adapter *adap, struct sge_rspq *iq, bool fwevtq,
1978 struct net_device *dev, int intr_idx,
1979 struct sge_fl *fl, rspq_handler_t hnd)
1980{
1981 int ret, flsz = 0;
1982 struct fw_iq_cmd c;
1983 struct port_info *pi = netdev_priv(dev);
1984
1985 /* Size needs to be multiple of 16, including status entry. */
1986 iq->size = roundup(iq->size, 16);
1987
1988 iq->desc = alloc_ring(adap->pdev_dev, iq->size, iq->iqe_len, 0,
1989 &iq->phys_addr, NULL, 0);
1990 if (!iq->desc)
1991 return -ENOMEM;
1992
1993 memset(&c, 0, sizeof(c));
1994 c.op_to_vfn = htonl(FW_CMD_OP(FW_IQ_CMD) | FW_CMD_REQUEST |
1995 FW_CMD_WRITE | FW_CMD_EXEC |
1996 FW_IQ_CMD_PFN(0) | FW_IQ_CMD_VFN(0));
1997 c.alloc_to_len16 = htonl(FW_IQ_CMD_ALLOC | FW_IQ_CMD_IQSTART(1) |
1998 FW_LEN16(c));
1999 c.type_to_iqandstindex = htonl(FW_IQ_CMD_TYPE(FW_IQ_TYPE_FL_INT_CAP) |
2000 FW_IQ_CMD_IQASYNCH(fwevtq) | FW_IQ_CMD_VIID(pi->viid) |
2001 FW_IQ_CMD_IQANDST(intr_idx < 0) | FW_IQ_CMD_IQANUD(1) |
2002 FW_IQ_CMD_IQANDSTINDEX(intr_idx >= 0 ? intr_idx :
2003 -intr_idx - 1));
2004 c.iqdroprss_to_iqesize = htons(FW_IQ_CMD_IQPCIECH(pi->tx_chan) |
2005 FW_IQ_CMD_IQGTSMODE |
2006 FW_IQ_CMD_IQINTCNTTHRESH(iq->pktcnt_idx) |
2007 FW_IQ_CMD_IQESIZE(ilog2(iq->iqe_len) - 4));
2008 c.iqsize = htons(iq->size);
2009 c.iqaddr = cpu_to_be64(iq->phys_addr);
2010
2011 if (fl) {
2012 fl->size = roundup(fl->size, 8);
2013 fl->desc = alloc_ring(adap->pdev_dev, fl->size, sizeof(__be64),
2014 sizeof(struct rx_sw_desc), &fl->addr,
2015 &fl->sdesc, STAT_LEN);
2016 if (!fl->desc)
2017 goto fl_nomem;
2018
2019 flsz = fl->size / 8 + STAT_LEN / sizeof(struct tx_desc);
2020 c.iqns_to_fl0congen = htonl(FW_IQ_CMD_FL0PACKEN |
2021 FW_IQ_CMD_FL0PADEN);
2022 c.fl0dcaen_to_fl0cidxfthresh = htons(FW_IQ_CMD_FL0FBMIN(2) |
2023 FW_IQ_CMD_FL0FBMAX(3));
2024 c.fl0size = htons(flsz);
2025 c.fl0addr = cpu_to_be64(fl->addr);
2026 }
2027
2028 ret = t4_wr_mbox(adap, 0, &c, sizeof(c), &c);
2029 if (ret)
2030 goto err;
2031
2032 netif_napi_add(dev, &iq->napi, napi_rx_handler, 64);
2033 iq->cur_desc = iq->desc;
2034 iq->cidx = 0;
2035 iq->gen = 1;
2036 iq->next_intr_params = iq->intr_params;
2037 iq->cntxt_id = ntohs(c.iqid);
2038 iq->abs_id = ntohs(c.physiqid);
2039 iq->size--; /* subtract status entry */
2040 iq->adap = adap;
2041 iq->netdev = dev;
2042 iq->handler = hnd;
2043
2044 /* set offset to -1 to distinguish ingress queues without FL */
2045 iq->offset = fl ? 0 : -1;
2046
2047 adap->sge.ingr_map[iq->cntxt_id] = iq;
2048
2049 if (fl) {
2050 fl->cntxt_id = htons(c.fl0id);
2051 fl->avail = fl->pend_cred = 0;
2052 fl->pidx = fl->cidx = 0;
2053 fl->alloc_failed = fl->large_alloc_failed = fl->starving = 0;
2054 adap->sge.egr_map[fl->cntxt_id] = fl;
2055 refill_fl(adap, fl, fl_cap(fl), GFP_KERNEL);
2056 }
2057 return 0;
2058
2059fl_nomem:
2060 ret = -ENOMEM;
2061err:
2062 if (iq->desc) {
2063 dma_free_coherent(adap->pdev_dev, iq->size * iq->iqe_len,
2064 iq->desc, iq->phys_addr);
2065 iq->desc = NULL;
2066 }
2067 if (fl && fl->desc) {
2068 kfree(fl->sdesc);
2069 fl->sdesc = NULL;
2070 dma_free_coherent(adap->pdev_dev, flsz * sizeof(struct tx_desc),
2071 fl->desc, fl->addr);
2072 fl->desc = NULL;
2073 }
2074 return ret;
2075}
2076
2077static void init_txq(struct adapter *adap, struct sge_txq *q, unsigned int id)
2078{
2079 q->in_use = 0;
2080 q->cidx = q->pidx = 0;
2081 q->stops = q->restarts = 0;
2082 q->stat = (void *)&q->desc[q->size];
2083 q->cntxt_id = id;
2084 adap->sge.egr_map[id] = q;
2085}
2086
2087int t4_sge_alloc_eth_txq(struct adapter *adap, struct sge_eth_txq *txq,
2088 struct net_device *dev, struct netdev_queue *netdevq,
2089 unsigned int iqid)
2090{
2091 int ret, nentries;
2092 struct fw_eq_eth_cmd c;
2093 struct port_info *pi = netdev_priv(dev);
2094
2095 /* Add status entries */
2096 nentries = txq->q.size + STAT_LEN / sizeof(struct tx_desc);
2097
2098 txq->q.desc = alloc_ring(adap->pdev_dev, txq->q.size,
2099 sizeof(struct tx_desc), sizeof(struct tx_sw_desc),
2100 &txq->q.phys_addr, &txq->q.sdesc, STAT_LEN);
2101 if (!txq->q.desc)
2102 return -ENOMEM;
2103
2104 memset(&c, 0, sizeof(c));
2105 c.op_to_vfn = htonl(FW_CMD_OP(FW_EQ_ETH_CMD) | FW_CMD_REQUEST |
2106 FW_CMD_WRITE | FW_CMD_EXEC |
2107 FW_EQ_ETH_CMD_PFN(0) | FW_EQ_ETH_CMD_VFN(0));
2108 c.alloc_to_len16 = htonl(FW_EQ_ETH_CMD_ALLOC |
2109 FW_EQ_ETH_CMD_EQSTART | FW_LEN16(c));
2110 c.viid_pkd = htonl(FW_EQ_ETH_CMD_VIID(pi->viid));
2111 c.fetchszm_to_iqid = htonl(FW_EQ_ETH_CMD_HOSTFCMODE(2) |
2112 FW_EQ_ETH_CMD_PCIECHN(pi->tx_chan) |
2113 FW_EQ_ETH_CMD_IQID(iqid));
2114 c.dcaen_to_eqsize = htonl(FW_EQ_ETH_CMD_FBMIN(2) |
2115 FW_EQ_ETH_CMD_FBMAX(3) |
2116 FW_EQ_ETH_CMD_CIDXFTHRESH(5) |
2117 FW_EQ_ETH_CMD_EQSIZE(nentries));
2118 c.eqaddr = cpu_to_be64(txq->q.phys_addr);
2119
2120 ret = t4_wr_mbox(adap, 0, &c, sizeof(c), &c);
2121 if (ret) {
2122 kfree(txq->q.sdesc);
2123 txq->q.sdesc = NULL;
2124 dma_free_coherent(adap->pdev_dev,
2125 nentries * sizeof(struct tx_desc),
2126 txq->q.desc, txq->q.phys_addr);
2127 txq->q.desc = NULL;
2128 return ret;
2129 }
2130
2131 init_txq(adap, &txq->q, FW_EQ_ETH_CMD_EQID_GET(ntohl(c.eqid_pkd)));
2132 txq->txq = netdevq;
2133 txq->tso = txq->tx_cso = txq->vlan_ins = 0;
2134 txq->mapping_err = 0;
2135 return 0;
2136}
2137
2138int t4_sge_alloc_ctrl_txq(struct adapter *adap, struct sge_ctrl_txq *txq,
2139 struct net_device *dev, unsigned int iqid,
2140 unsigned int cmplqid)
2141{
2142 int ret, nentries;
2143 struct fw_eq_ctrl_cmd c;
2144 struct port_info *pi = netdev_priv(dev);
2145
2146 /* Add status entries */
2147 nentries = txq->q.size + STAT_LEN / sizeof(struct tx_desc);
2148
2149 txq->q.desc = alloc_ring(adap->pdev_dev, nentries,
2150 sizeof(struct tx_desc), 0, &txq->q.phys_addr,
2151 NULL, 0);
2152 if (!txq->q.desc)
2153 return -ENOMEM;
2154
2155 c.op_to_vfn = htonl(FW_CMD_OP(FW_EQ_CTRL_CMD) | FW_CMD_REQUEST |
2156 FW_CMD_WRITE | FW_CMD_EXEC |
2157 FW_EQ_CTRL_CMD_PFN(0) | FW_EQ_CTRL_CMD_VFN(0));
2158 c.alloc_to_len16 = htonl(FW_EQ_CTRL_CMD_ALLOC |
2159 FW_EQ_CTRL_CMD_EQSTART | FW_LEN16(c));
2160 c.cmpliqid_eqid = htonl(FW_EQ_CTRL_CMD_CMPLIQID(cmplqid));
2161 c.physeqid_pkd = htonl(0);
2162 c.fetchszm_to_iqid = htonl(FW_EQ_CTRL_CMD_HOSTFCMODE(2) |
2163 FW_EQ_CTRL_CMD_PCIECHN(pi->tx_chan) |
2164 FW_EQ_CTRL_CMD_IQID(iqid));
2165 c.dcaen_to_eqsize = htonl(FW_EQ_CTRL_CMD_FBMIN(2) |
2166 FW_EQ_CTRL_CMD_FBMAX(3) |
2167 FW_EQ_CTRL_CMD_CIDXFTHRESH(5) |
2168 FW_EQ_CTRL_CMD_EQSIZE(nentries));
2169 c.eqaddr = cpu_to_be64(txq->q.phys_addr);
2170
2171 ret = t4_wr_mbox(adap, 0, &c, sizeof(c), &c);
2172 if (ret) {
2173 dma_free_coherent(adap->pdev_dev,
2174 nentries * sizeof(struct tx_desc),
2175 txq->q.desc, txq->q.phys_addr);
2176 txq->q.desc = NULL;
2177 return ret;
2178 }
2179
2180 init_txq(adap, &txq->q, FW_EQ_CTRL_CMD_EQID_GET(ntohl(c.cmpliqid_eqid)));
2181 txq->adap = adap;
2182 skb_queue_head_init(&txq->sendq);
2183 tasklet_init(&txq->qresume_tsk, restart_ctrlq, (unsigned long)txq);
2184 txq->full = 0;
2185 return 0;
2186}
2187
2188int t4_sge_alloc_ofld_txq(struct adapter *adap, struct sge_ofld_txq *txq,
2189 struct net_device *dev, unsigned int iqid)
2190{
2191 int ret, nentries;
2192 struct fw_eq_ofld_cmd c;
2193 struct port_info *pi = netdev_priv(dev);
2194
2195 /* Add status entries */
2196 nentries = txq->q.size + STAT_LEN / sizeof(struct tx_desc);
2197
2198 txq->q.desc = alloc_ring(adap->pdev_dev, txq->q.size,
2199 sizeof(struct tx_desc), sizeof(struct tx_sw_desc),
2200 &txq->q.phys_addr, &txq->q.sdesc, STAT_LEN);
2201 if (!txq->q.desc)
2202 return -ENOMEM;
2203
2204 memset(&c, 0, sizeof(c));
2205 c.op_to_vfn = htonl(FW_CMD_OP(FW_EQ_OFLD_CMD) | FW_CMD_REQUEST |
2206 FW_CMD_WRITE | FW_CMD_EXEC |
2207 FW_EQ_OFLD_CMD_PFN(0) | FW_EQ_OFLD_CMD_VFN(0));
2208 c.alloc_to_len16 = htonl(FW_EQ_OFLD_CMD_ALLOC |
2209 FW_EQ_OFLD_CMD_EQSTART | FW_LEN16(c));
2210 c.fetchszm_to_iqid = htonl(FW_EQ_OFLD_CMD_HOSTFCMODE(2) |
2211 FW_EQ_OFLD_CMD_PCIECHN(pi->tx_chan) |
2212 FW_EQ_OFLD_CMD_IQID(iqid));
2213 c.dcaen_to_eqsize = htonl(FW_EQ_OFLD_CMD_FBMIN(2) |
2214 FW_EQ_OFLD_CMD_FBMAX(3) |
2215 FW_EQ_OFLD_CMD_CIDXFTHRESH(5) |
2216 FW_EQ_OFLD_CMD_EQSIZE(nentries));
2217 c.eqaddr = cpu_to_be64(txq->q.phys_addr);
2218
2219 ret = t4_wr_mbox(adap, 0, &c, sizeof(c), &c);
2220 if (ret) {
2221 kfree(txq->q.sdesc);
2222 txq->q.sdesc = NULL;
2223 dma_free_coherent(adap->pdev_dev,
2224 nentries * sizeof(struct tx_desc),
2225 txq->q.desc, txq->q.phys_addr);
2226 txq->q.desc = NULL;
2227 return ret;
2228 }
2229
2230 init_txq(adap, &txq->q, FW_EQ_OFLD_CMD_EQID_GET(ntohl(c.eqid_pkd)));
2231 txq->adap = adap;
2232 skb_queue_head_init(&txq->sendq);
2233 tasklet_init(&txq->qresume_tsk, restart_ofldq, (unsigned long)txq);
2234 txq->full = 0;
2235 txq->mapping_err = 0;
2236 return 0;
2237}
2238
2239static void free_txq(struct adapter *adap, struct sge_txq *q)
2240{
2241 dma_free_coherent(adap->pdev_dev,
2242 q->size * sizeof(struct tx_desc) + STAT_LEN,
2243 q->desc, q->phys_addr);
2244 q->cntxt_id = 0;
2245 q->sdesc = NULL;
2246 q->desc = NULL;
2247}
2248
2249static void free_rspq_fl(struct adapter *adap, struct sge_rspq *rq,
2250 struct sge_fl *fl)
2251{
2252 unsigned int fl_id = fl ? fl->cntxt_id : 0xffff;
2253
2254 adap->sge.ingr_map[rq->cntxt_id] = NULL;
2255 t4_iq_free(adap, 0, 0, 0, FW_IQ_TYPE_FL_INT_CAP, rq->cntxt_id, fl_id,
2256 0xffff);
2257 dma_free_coherent(adap->pdev_dev, (rq->size + 1) * rq->iqe_len,
2258 rq->desc, rq->phys_addr);
2259 netif_napi_del(&rq->napi);
2260 rq->netdev = NULL;
2261 rq->cntxt_id = rq->abs_id = 0;
2262 rq->desc = NULL;
2263
2264 if (fl) {
2265 free_rx_bufs(adap, fl, fl->avail);
2266 dma_free_coherent(adap->pdev_dev, fl->size * 8 + STAT_LEN,
2267 fl->desc, fl->addr);
2268 kfree(fl->sdesc);
2269 fl->sdesc = NULL;
2270 fl->cntxt_id = 0;
2271 fl->desc = NULL;
2272 }
2273}
2274
2275/**
2276 * t4_free_sge_resources - free SGE resources
2277 * @adap: the adapter
2278 *
2279 * Frees resources used by the SGE queue sets.
2280 */
2281void t4_free_sge_resources(struct adapter *adap)
2282{
2283 int i;
2284 struct sge_eth_rxq *eq = adap->sge.ethrxq;
2285 struct sge_eth_txq *etq = adap->sge.ethtxq;
2286 struct sge_ofld_rxq *oq = adap->sge.ofldrxq;
2287
2288 /* clean up Ethernet Tx/Rx queues */
2289 for (i = 0; i < adap->sge.ethqsets; i++, eq++, etq++) {
2290 if (eq->rspq.desc)
2291 free_rspq_fl(adap, &eq->rspq, &eq->fl);
2292 if (etq->q.desc) {
2293 t4_eth_eq_free(adap, 0, 0, 0, etq->q.cntxt_id);
2294 free_tx_desc(adap, &etq->q, etq->q.in_use, true);
2295 kfree(etq->q.sdesc);
2296 free_txq(adap, &etq->q);
2297 }
2298 }
2299
2300 /* clean up RDMA and iSCSI Rx queues */
2301 for (i = 0; i < adap->sge.ofldqsets; i++, oq++) {
2302 if (oq->rspq.desc)
2303 free_rspq_fl(adap, &oq->rspq, &oq->fl);
2304 }
2305 for (i = 0, oq = adap->sge.rdmarxq; i < adap->sge.rdmaqs; i++, oq++) {
2306 if (oq->rspq.desc)
2307 free_rspq_fl(adap, &oq->rspq, &oq->fl);
2308 }
2309
2310 /* clean up offload Tx queues */
2311 for (i = 0; i < ARRAY_SIZE(adap->sge.ofldtxq); i++) {
2312 struct sge_ofld_txq *q = &adap->sge.ofldtxq[i];
2313
2314 if (q->q.desc) {
2315 tasklet_kill(&q->qresume_tsk);
2316 t4_ofld_eq_free(adap, 0, 0, 0, q->q.cntxt_id);
2317 free_tx_desc(adap, &q->q, q->q.in_use, false);
2318 kfree(q->q.sdesc);
2319 __skb_queue_purge(&q->sendq);
2320 free_txq(adap, &q->q);
2321 }
2322 }
2323
2324 /* clean up control Tx queues */
2325 for (i = 0; i < ARRAY_SIZE(adap->sge.ctrlq); i++) {
2326 struct sge_ctrl_txq *cq = &adap->sge.ctrlq[i];
2327
2328 if (cq->q.desc) {
2329 tasklet_kill(&cq->qresume_tsk);
2330 t4_ctrl_eq_free(adap, 0, 0, 0, cq->q.cntxt_id);
2331 __skb_queue_purge(&cq->sendq);
2332 free_txq(adap, &cq->q);
2333 }
2334 }
2335
2336 if (adap->sge.fw_evtq.desc)
2337 free_rspq_fl(adap, &adap->sge.fw_evtq, NULL);
2338
2339 if (adap->sge.intrq.desc)
2340 free_rspq_fl(adap, &adap->sge.intrq, NULL);
2341
2342 /* clear the reverse egress queue map */
2343 memset(adap->sge.egr_map, 0, sizeof(adap->sge.egr_map));
2344}
2345
2346void t4_sge_start(struct adapter *adap)
2347{
2348 adap->sge.ethtxq_rover = 0;
2349 mod_timer(&adap->sge.rx_timer, jiffies + RX_QCHECK_PERIOD);
2350 mod_timer(&adap->sge.tx_timer, jiffies + TX_QCHECK_PERIOD);
2351}
2352
2353/**
2354 * t4_sge_stop - disable SGE operation
2355 * @adap: the adapter
2356 *
2357 * Stop tasklets and timers associated with the DMA engine. Note that
2358 * this is effective only if measures have been taken to disable any HW
2359 * events that may restart them.
2360 */
2361void t4_sge_stop(struct adapter *adap)
2362{
2363 int i;
2364 struct sge *s = &adap->sge;
2365
2366 if (in_interrupt()) /* actions below require waiting */
2367 return;
2368
2369 if (s->rx_timer.function)
2370 del_timer_sync(&s->rx_timer);
2371 if (s->tx_timer.function)
2372 del_timer_sync(&s->tx_timer);
2373
2374 for (i = 0; i < ARRAY_SIZE(s->ofldtxq); i++) {
2375 struct sge_ofld_txq *q = &s->ofldtxq[i];
2376
2377 if (q->q.desc)
2378 tasklet_kill(&q->qresume_tsk);
2379 }
2380 for (i = 0; i < ARRAY_SIZE(s->ctrlq); i++) {
2381 struct sge_ctrl_txq *cq = &s->ctrlq[i];
2382
2383 if (cq->q.desc)
2384 tasklet_kill(&cq->qresume_tsk);
2385 }
2386}
2387
2388/**
2389 * t4_sge_init - initialize SGE
2390 * @adap: the adapter
2391 *
2392 * Performs SGE initialization needed every time after a chip reset.
2393 * We do not initialize any of the queues here, instead the driver
2394 * top-level must request them individually.
2395 */
2396void t4_sge_init(struct adapter *adap)
2397{
2398 struct sge *s = &adap->sge;
2399 unsigned int fl_align_log = ilog2(FL_ALIGN);
2400
2401 t4_set_reg_field(adap, SGE_CONTROL, PKTSHIFT_MASK |
2402 INGPADBOUNDARY_MASK | EGRSTATUSPAGESIZE,
2403 INGPADBOUNDARY(fl_align_log - 5) | PKTSHIFT(2) |
2404 RXPKTCPLMODE |
2405 (STAT_LEN == 128 ? EGRSTATUSPAGESIZE : 0));
2406 t4_set_reg_field(adap, SGE_HOST_PAGE_SIZE, HOSTPAGESIZEPF0_MASK,
2407 HOSTPAGESIZEPF0(PAGE_SHIFT - 10));
2408 t4_write_reg(adap, SGE_FL_BUFFER_SIZE0, PAGE_SIZE);
2409#if FL_PG_ORDER > 0
2410 t4_write_reg(adap, SGE_FL_BUFFER_SIZE1, PAGE_SIZE << FL_PG_ORDER);
2411#endif
2412 t4_write_reg(adap, SGE_INGRESS_RX_THRESHOLD,
2413 THRESHOLD_0(s->counter_val[0]) |
2414 THRESHOLD_1(s->counter_val[1]) |
2415 THRESHOLD_2(s->counter_val[2]) |
2416 THRESHOLD_3(s->counter_val[3]));
2417 t4_write_reg(adap, SGE_TIMER_VALUE_0_AND_1,
2418 TIMERVALUE0(us_to_core_ticks(adap, s->timer_val[0])) |
2419 TIMERVALUE1(us_to_core_ticks(adap, s->timer_val[1])));
2420 t4_write_reg(adap, SGE_TIMER_VALUE_2_AND_3,
2421 TIMERVALUE0(us_to_core_ticks(adap, s->timer_val[2])) |
2422 TIMERVALUE1(us_to_core_ticks(adap, s->timer_val[3])));
2423 t4_write_reg(adap, SGE_TIMER_VALUE_4_AND_5,
2424 TIMERVALUE0(us_to_core_ticks(adap, s->timer_val[4])) |
2425 TIMERVALUE1(us_to_core_ticks(adap, s->timer_val[5])));
2426 setup_timer(&s->rx_timer, sge_rx_timer_cb, (unsigned long)adap);
2427 setup_timer(&s->tx_timer, sge_tx_timer_cb, (unsigned long)adap);
2428 s->starve_thres = core_ticks_per_usec(adap) * 1000000; /* 1 s */
2429 s->idma_state[0] = s->idma_state[1] = 0;
2430 spin_lock_init(&s->intrq_lock);
2431}
diff --git a/drivers/net/cxgb4/t4_hw.c b/drivers/net/cxgb4/t4_hw.c
new file mode 100644
index 000000000000..a814a3afe123
--- /dev/null
+++ b/drivers/net/cxgb4/t4_hw.c
@@ -0,0 +1,3131 @@
1/*
2 * This file is part of the Chelsio T4 Ethernet driver for Linux.
3 *
4 * Copyright (c) 2003-2010 Chelsio Communications, Inc. All rights reserved.
5 *
6 * This software is available to you under a choice of one of two
7 * licenses. You may choose to be licensed under the terms of the GNU
8 * General Public License (GPL) Version 2, available from the file
9 * COPYING in the main directory of this source tree, or the
10 * OpenIB.org BSD license below:
11 *
12 * Redistribution and use in source and binary forms, with or
13 * without modification, are permitted provided that the following
14 * conditions are met:
15 *
16 * - Redistributions of source code must retain the above
17 * copyright notice, this list of conditions and the following
18 * disclaimer.
19 *
20 * - Redistributions in binary form must reproduce the above
21 * copyright notice, this list of conditions and the following
22 * disclaimer in the documentation and/or other materials
23 * provided with the distribution.
24 *
25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
32 * SOFTWARE.
33 */
34
35#include <linux/init.h>
36#include <linux/delay.h>
37#include "cxgb4.h"
38#include "t4_regs.h"
39#include "t4fw_api.h"
40
41/**
42 * t4_wait_op_done_val - wait until an operation is completed
43 * @adapter: the adapter performing the operation
44 * @reg: the register to check for completion
45 * @mask: a single-bit field within @reg that indicates completion
46 * @polarity: the value of the field when the operation is completed
47 * @attempts: number of check iterations
48 * @delay: delay in usecs between iterations
49 * @valp: where to store the value of the register at completion time
50 *
51 * Wait until an operation is completed by checking a bit in a register
52 * up to @attempts times. If @valp is not NULL the value of the register
53 * at the time it indicated completion is stored there. Returns 0 if the
54 * operation completes and -EAGAIN otherwise.
55 */
56int t4_wait_op_done_val(struct adapter *adapter, int reg, u32 mask,
57 int polarity, int attempts, int delay, u32 *valp)
58{
59 while (1) {
60 u32 val = t4_read_reg(adapter, reg);
61
62 if (!!(val & mask) == polarity) {
63 if (valp)
64 *valp = val;
65 return 0;
66 }
67 if (--attempts == 0)
68 return -EAGAIN;
69 if (delay)
70 udelay(delay);
71 }
72}
73
74static inline int t4_wait_op_done(struct adapter *adapter, int reg, u32 mask,
75 int polarity, int attempts, int delay)
76{
77 return t4_wait_op_done_val(adapter, reg, mask, polarity, attempts,
78 delay, NULL);
79}
80
81/**
82 * t4_set_reg_field - set a register field to a value
83 * @adapter: the adapter to program
84 * @addr: the register address
85 * @mask: specifies the portion of the register to modify
86 * @val: the new value for the register field
87 *
88 * Sets a register field specified by the supplied mask to the
89 * given value.
90 */
91void t4_set_reg_field(struct adapter *adapter, unsigned int addr, u32 mask,
92 u32 val)
93{
94 u32 v = t4_read_reg(adapter, addr) & ~mask;
95
96 t4_write_reg(adapter, addr, v | val);
97 (void) t4_read_reg(adapter, addr); /* flush */
98}
99
100/**
101 * t4_read_indirect - read indirectly addressed registers
102 * @adap: the adapter
103 * @addr_reg: register holding the indirect address
104 * @data_reg: register holding the value of the indirect register
105 * @vals: where the read register values are stored
106 * @nregs: how many indirect registers to read
107 * @start_idx: index of first indirect register to read
108 *
109 * Reads registers that are accessed indirectly through an address/data
110 * register pair.
111 */
112void t4_read_indirect(struct adapter *adap, unsigned int addr_reg,
113 unsigned int data_reg, u32 *vals, unsigned int nregs,
114 unsigned int start_idx)
115{
116 while (nregs--) {
117 t4_write_reg(adap, addr_reg, start_idx);
118 *vals++ = t4_read_reg(adap, data_reg);
119 start_idx++;
120 }
121}
122
123/**
124 * t4_write_indirect - write indirectly addressed registers
125 * @adap: the adapter
126 * @addr_reg: register holding the indirect addresses
127 * @data_reg: register holding the value for the indirect registers
128 * @vals: values to write
129 * @nregs: how many indirect registers to write
130 * @start_idx: address of first indirect register to write
131 *
132 * Writes a sequential block of registers that are accessed indirectly
133 * through an address/data register pair.
134 */
135void t4_write_indirect(struct adapter *adap, unsigned int addr_reg,
136 unsigned int data_reg, const u32 *vals,
137 unsigned int nregs, unsigned int start_idx)
138{
139 while (nregs--) {
140 t4_write_reg(adap, addr_reg, start_idx++);
141 t4_write_reg(adap, data_reg, *vals++);
142 }
143}
144
145/*
146 * Get the reply to a mailbox command and store it in @rpl in big-endian order.
147 */
148static void get_mbox_rpl(struct adapter *adap, __be64 *rpl, int nflit,
149 u32 mbox_addr)
150{
151 for ( ; nflit; nflit--, mbox_addr += 8)
152 *rpl++ = cpu_to_be64(t4_read_reg64(adap, mbox_addr));
153}
154
155/*
156 * Handle a FW assertion reported in a mailbox.
157 */
158static void fw_asrt(struct adapter *adap, u32 mbox_addr)
159{
160 struct fw_debug_cmd asrt;
161
162 get_mbox_rpl(adap, (__be64 *)&asrt, sizeof(asrt) / 8, mbox_addr);
163 dev_alert(adap->pdev_dev,
164 "FW assertion at %.16s:%u, val0 %#x, val1 %#x\n",
165 asrt.u.assert.filename_0_7, ntohl(asrt.u.assert.line),
166 ntohl(asrt.u.assert.x), ntohl(asrt.u.assert.y));
167}
168
169static void dump_mbox(struct adapter *adap, int mbox, u32 data_reg)
170{
171 dev_err(adap->pdev_dev,
172 "mbox %d: %llx %llx %llx %llx %llx %llx %llx %llx\n", mbox,
173 (unsigned long long)t4_read_reg64(adap, data_reg),
174 (unsigned long long)t4_read_reg64(adap, data_reg + 8),
175 (unsigned long long)t4_read_reg64(adap, data_reg + 16),
176 (unsigned long long)t4_read_reg64(adap, data_reg + 24),
177 (unsigned long long)t4_read_reg64(adap, data_reg + 32),
178 (unsigned long long)t4_read_reg64(adap, data_reg + 40),
179 (unsigned long long)t4_read_reg64(adap, data_reg + 48),
180 (unsigned long long)t4_read_reg64(adap, data_reg + 56));
181}
182
183/**
184 * t4_wr_mbox_meat - send a command to FW through the given mailbox
185 * @adap: the adapter
186 * @mbox: index of the mailbox to use
187 * @cmd: the command to write
188 * @size: command length in bytes
189 * @rpl: where to optionally store the reply
190 * @sleep_ok: if true we may sleep while awaiting command completion
191 *
192 * Sends the given command to FW through the selected mailbox and waits
193 * for the FW to execute the command. If @rpl is not %NULL it is used to
194 * store the FW's reply to the command. The command and its optional
195 * reply are of the same length. FW can take up to %FW_CMD_MAX_TIMEOUT ms
196 * to respond. @sleep_ok determines whether we may sleep while awaiting
197 * the response. If sleeping is allowed we use progressive backoff
198 * otherwise we spin.
199 *
200 * The return value is 0 on success or a negative errno on failure. A
201 * failure can happen either because we are not able to execute the
202 * command or FW executes it but signals an error. In the latter case
203 * the return value is the error code indicated by FW (negated).
204 */
205int t4_wr_mbox_meat(struct adapter *adap, int mbox, const void *cmd, int size,
206 void *rpl, bool sleep_ok)
207{
208 static int delay[] = {
209 1, 1, 3, 5, 10, 10, 20, 50, 100, 200
210 };
211
212 u32 v;
213 u64 res;
214 int i, ms, delay_idx;
215 const __be64 *p = cmd;
216 u32 data_reg = PF_REG(mbox, CIM_PF_MAILBOX_DATA);
217 u32 ctl_reg = PF_REG(mbox, CIM_PF_MAILBOX_CTRL);
218
219 if ((size & 15) || size > MBOX_LEN)
220 return -EINVAL;
221
222 v = MBOWNER_GET(t4_read_reg(adap, ctl_reg));
223 for (i = 0; v == MBOX_OWNER_NONE && i < 3; i++)
224 v = MBOWNER_GET(t4_read_reg(adap, ctl_reg));
225
226 if (v != MBOX_OWNER_DRV)
227 return v ? -EBUSY : -ETIMEDOUT;
228
229 for (i = 0; i < size; i += 8)
230 t4_write_reg64(adap, data_reg + i, be64_to_cpu(*p++));
231
232 t4_write_reg(adap, ctl_reg, MBMSGVALID | MBOWNER(MBOX_OWNER_FW));
233 t4_read_reg(adap, ctl_reg); /* flush write */
234
235 delay_idx = 0;
236 ms = delay[0];
237
238 for (i = 0; i < FW_CMD_MAX_TIMEOUT; i += ms) {
239 if (sleep_ok) {
240 ms = delay[delay_idx]; /* last element may repeat */
241 if (delay_idx < ARRAY_SIZE(delay) - 1)
242 delay_idx++;
243 msleep(ms);
244 } else
245 mdelay(ms);
246
247 v = t4_read_reg(adap, ctl_reg);
248 if (MBOWNER_GET(v) == MBOX_OWNER_DRV) {
249 if (!(v & MBMSGVALID)) {
250 t4_write_reg(adap, ctl_reg, 0);
251 continue;
252 }
253
254 res = t4_read_reg64(adap, data_reg);
255 if (FW_CMD_OP_GET(res >> 32) == FW_DEBUG_CMD) {
256 fw_asrt(adap, data_reg);
257 res = FW_CMD_RETVAL(EIO);
258 } else if (rpl)
259 get_mbox_rpl(adap, rpl, size / 8, data_reg);
260
261 if (FW_CMD_RETVAL_GET((int)res))
262 dump_mbox(adap, mbox, data_reg);
263 t4_write_reg(adap, ctl_reg, 0);
264 return -FW_CMD_RETVAL_GET((int)res);
265 }
266 }
267
268 dump_mbox(adap, mbox, data_reg);
269 dev_err(adap->pdev_dev, "command %#x in mailbox %d timed out\n",
270 *(const u8 *)cmd, mbox);
271 return -ETIMEDOUT;
272}
273
274/**
275 * t4_mc_read - read from MC through backdoor accesses
276 * @adap: the adapter
277 * @addr: address of first byte requested
278 * @data: 64 bytes of data containing the requested address
279 * @ecc: where to store the corresponding 64-bit ECC word
280 *
281 * Read 64 bytes of data from MC starting at a 64-byte-aligned address
282 * that covers the requested address @addr. If @parity is not %NULL it
283 * is assigned the 64-bit ECC word for the read data.
284 */
285int t4_mc_read(struct adapter *adap, u32 addr, __be32 *data, u64 *ecc)
286{
287 int i;
288
289 if (t4_read_reg(adap, MC_BIST_CMD) & START_BIST)
290 return -EBUSY;
291 t4_write_reg(adap, MC_BIST_CMD_ADDR, addr & ~0x3fU);
292 t4_write_reg(adap, MC_BIST_CMD_LEN, 64);
293 t4_write_reg(adap, MC_BIST_DATA_PATTERN, 0xc);
294 t4_write_reg(adap, MC_BIST_CMD, BIST_OPCODE(1) | START_BIST |
295 BIST_CMD_GAP(1));
296 i = t4_wait_op_done(adap, MC_BIST_CMD, START_BIST, 0, 10, 1);
297 if (i)
298 return i;
299
300#define MC_DATA(i) MC_BIST_STATUS_REG(MC_BIST_STATUS_RDATA, i)
301
302 for (i = 15; i >= 0; i--)
303 *data++ = htonl(t4_read_reg(adap, MC_DATA(i)));
304 if (ecc)
305 *ecc = t4_read_reg64(adap, MC_DATA(16));
306#undef MC_DATA
307 return 0;
308}
309
310/**
311 * t4_edc_read - read from EDC through backdoor accesses
312 * @adap: the adapter
313 * @idx: which EDC to access
314 * @addr: address of first byte requested
315 * @data: 64 bytes of data containing the requested address
316 * @ecc: where to store the corresponding 64-bit ECC word
317 *
318 * Read 64 bytes of data from EDC starting at a 64-byte-aligned address
319 * that covers the requested address @addr. If @parity is not %NULL it
320 * is assigned the 64-bit ECC word for the read data.
321 */
322int t4_edc_read(struct adapter *adap, int idx, u32 addr, __be32 *data, u64 *ecc)
323{
324 int i;
325
326 idx *= EDC_STRIDE;
327 if (t4_read_reg(adap, EDC_BIST_CMD + idx) & START_BIST)
328 return -EBUSY;
329 t4_write_reg(adap, EDC_BIST_CMD_ADDR + idx, addr & ~0x3fU);
330 t4_write_reg(adap, EDC_BIST_CMD_LEN + idx, 64);
331 t4_write_reg(adap, EDC_BIST_DATA_PATTERN + idx, 0xc);
332 t4_write_reg(adap, EDC_BIST_CMD + idx,
333 BIST_OPCODE(1) | BIST_CMD_GAP(1) | START_BIST);
334 i = t4_wait_op_done(adap, EDC_BIST_CMD + idx, START_BIST, 0, 10, 1);
335 if (i)
336 return i;
337
338#define EDC_DATA(i) (EDC_BIST_STATUS_REG(EDC_BIST_STATUS_RDATA, i) + idx)
339
340 for (i = 15; i >= 0; i--)
341 *data++ = htonl(t4_read_reg(adap, EDC_DATA(i)));
342 if (ecc)
343 *ecc = t4_read_reg64(adap, EDC_DATA(16));
344#undef EDC_DATA
345 return 0;
346}
347
348#define VPD_ENTRY(name, len) \
349 u8 name##_kword[2]; u8 name##_len; u8 name##_data[len]
350
351/*
352 * Partial EEPROM Vital Product Data structure. Includes only the ID and
353 * VPD-R sections.
354 */
355struct t4_vpd {
356 u8 id_tag;
357 u8 id_len[2];
358 u8 id_data[ID_LEN];
359 u8 vpdr_tag;
360 u8 vpdr_len[2];
361 VPD_ENTRY(pn, 16); /* part number */
362 VPD_ENTRY(ec, EC_LEN); /* EC level */
363 VPD_ENTRY(sn, SERNUM_LEN); /* serial number */
364 VPD_ENTRY(na, 12); /* MAC address base */
365 VPD_ENTRY(port_type, 8); /* port types */
366 VPD_ENTRY(gpio, 14); /* GPIO usage */
367 VPD_ENTRY(cclk, 6); /* core clock */
368 VPD_ENTRY(port_addr, 8); /* port MDIO addresses */
369 VPD_ENTRY(rv, 1); /* csum */
370 u32 pad; /* for multiple-of-4 sizing and alignment */
371};
372
373#define EEPROM_STAT_ADDR 0x7bfc
374#define VPD_BASE 0
375
376/**
377 * t4_seeprom_wp - enable/disable EEPROM write protection
378 * @adapter: the adapter
379 * @enable: whether to enable or disable write protection
380 *
381 * Enables or disables write protection on the serial EEPROM.
382 */
383int t4_seeprom_wp(struct adapter *adapter, bool enable)
384{
385 unsigned int v = enable ? 0xc : 0;
386 int ret = pci_write_vpd(adapter->pdev, EEPROM_STAT_ADDR, 4, &v);
387 return ret < 0 ? ret : 0;
388}
389
390/**
391 * get_vpd_params - read VPD parameters from VPD EEPROM
392 * @adapter: adapter to read
393 * @p: where to store the parameters
394 *
395 * Reads card parameters stored in VPD EEPROM.
396 */
397static int get_vpd_params(struct adapter *adapter, struct vpd_params *p)
398{
399 int ret;
400 struct t4_vpd vpd;
401 u8 *q = (u8 *)&vpd, csum;
402
403 ret = pci_read_vpd(adapter->pdev, VPD_BASE, sizeof(vpd), &vpd);
404 if (ret < 0)
405 return ret;
406
407 for (csum = 0; q <= vpd.rv_data; q++)
408 csum += *q;
409
410 if (csum) {
411 dev_err(adapter->pdev_dev,
412 "corrupted VPD EEPROM, actual csum %u\n", csum);
413 return -EINVAL;
414 }
415
416 p->cclk = simple_strtoul(vpd.cclk_data, NULL, 10);
417 memcpy(p->id, vpd.id_data, sizeof(vpd.id_data));
418 strim(p->id);
419 memcpy(p->ec, vpd.ec_data, sizeof(vpd.ec_data));
420 strim(p->ec);
421 memcpy(p->sn, vpd.sn_data, sizeof(vpd.sn_data));
422 strim(p->sn);
423 return 0;
424}
425
426/* serial flash and firmware constants */
427enum {
428 SF_ATTEMPTS = 10, /* max retries for SF operations */
429
430 /* flash command opcodes */
431 SF_PROG_PAGE = 2, /* program page */
432 SF_WR_DISABLE = 4, /* disable writes */
433 SF_RD_STATUS = 5, /* read status register */
434 SF_WR_ENABLE = 6, /* enable writes */
435 SF_RD_DATA_FAST = 0xb, /* read flash */
436 SF_ERASE_SECTOR = 0xd8, /* erase sector */
437
438 FW_START_SEC = 8, /* first flash sector for FW */
439 FW_END_SEC = 15, /* last flash sector for FW */
440 FW_IMG_START = FW_START_SEC * SF_SEC_SIZE,
441 FW_MAX_SIZE = (FW_END_SEC - FW_START_SEC + 1) * SF_SEC_SIZE,
442};
443
444/**
445 * sf1_read - read data from the serial flash
446 * @adapter: the adapter
447 * @byte_cnt: number of bytes to read
448 * @cont: whether another operation will be chained
449 * @lock: whether to lock SF for PL access only
450 * @valp: where to store the read data
451 *
452 * Reads up to 4 bytes of data from the serial flash. The location of
453 * the read needs to be specified prior to calling this by issuing the
454 * appropriate commands to the serial flash.
455 */
456static int sf1_read(struct adapter *adapter, unsigned int byte_cnt, int cont,
457 int lock, u32 *valp)
458{
459 int ret;
460
461 if (!byte_cnt || byte_cnt > 4)
462 return -EINVAL;
463 if (t4_read_reg(adapter, SF_OP) & BUSY)
464 return -EBUSY;
465 cont = cont ? SF_CONT : 0;
466 lock = lock ? SF_LOCK : 0;
467 t4_write_reg(adapter, SF_OP, lock | cont | BYTECNT(byte_cnt - 1));
468 ret = t4_wait_op_done(adapter, SF_OP, BUSY, 0, SF_ATTEMPTS, 5);
469 if (!ret)
470 *valp = t4_read_reg(adapter, SF_DATA);
471 return ret;
472}
473
474/**
475 * sf1_write - write data to the serial flash
476 * @adapter: the adapter
477 * @byte_cnt: number of bytes to write
478 * @cont: whether another operation will be chained
479 * @lock: whether to lock SF for PL access only
480 * @val: value to write
481 *
482 * Writes up to 4 bytes of data to the serial flash. The location of
483 * the write needs to be specified prior to calling this by issuing the
484 * appropriate commands to the serial flash.
485 */
486static int sf1_write(struct adapter *adapter, unsigned int byte_cnt, int cont,
487 int lock, u32 val)
488{
489 if (!byte_cnt || byte_cnt > 4)
490 return -EINVAL;
491 if (t4_read_reg(adapter, SF_OP) & BUSY)
492 return -EBUSY;
493 cont = cont ? SF_CONT : 0;
494 lock = lock ? SF_LOCK : 0;
495 t4_write_reg(adapter, SF_DATA, val);
496 t4_write_reg(adapter, SF_OP, lock |
497 cont | BYTECNT(byte_cnt - 1) | OP_WR);
498 return t4_wait_op_done(adapter, SF_OP, BUSY, 0, SF_ATTEMPTS, 5);
499}
500
501/**
502 * flash_wait_op - wait for a flash operation to complete
503 * @adapter: the adapter
504 * @attempts: max number of polls of the status register
505 * @delay: delay between polls in ms
506 *
507 * Wait for a flash operation to complete by polling the status register.
508 */
509static int flash_wait_op(struct adapter *adapter, int attempts, int delay)
510{
511 int ret;
512 u32 status;
513
514 while (1) {
515 if ((ret = sf1_write(adapter, 1, 1, 1, SF_RD_STATUS)) != 0 ||
516 (ret = sf1_read(adapter, 1, 0, 1, &status)) != 0)
517 return ret;
518 if (!(status & 1))
519 return 0;
520 if (--attempts == 0)
521 return -EAGAIN;
522 if (delay)
523 msleep(delay);
524 }
525}
526
527/**
528 * t4_read_flash - read words from serial flash
529 * @adapter: the adapter
530 * @addr: the start address for the read
531 * @nwords: how many 32-bit words to read
532 * @data: where to store the read data
533 * @byte_oriented: whether to store data as bytes or as words
534 *
535 * Read the specified number of 32-bit words from the serial flash.
536 * If @byte_oriented is set the read data is stored as a byte array
537 * (i.e., big-endian), otherwise as 32-bit words in the platform's
538 * natural endianess.
539 */
540int t4_read_flash(struct adapter *adapter, unsigned int addr,
541 unsigned int nwords, u32 *data, int byte_oriented)
542{
543 int ret;
544
545 if (addr + nwords * sizeof(u32) > SF_SIZE || (addr & 3))
546 return -EINVAL;
547
548 addr = swab32(addr) | SF_RD_DATA_FAST;
549
550 if ((ret = sf1_write(adapter, 4, 1, 0, addr)) != 0 ||
551 (ret = sf1_read(adapter, 1, 1, 0, data)) != 0)
552 return ret;
553
554 for ( ; nwords; nwords--, data++) {
555 ret = sf1_read(adapter, 4, nwords > 1, nwords == 1, data);
556 if (nwords == 1)
557 t4_write_reg(adapter, SF_OP, 0); /* unlock SF */
558 if (ret)
559 return ret;
560 if (byte_oriented)
561 *data = htonl(*data);
562 }
563 return 0;
564}
565
566/**
567 * t4_write_flash - write up to a page of data to the serial flash
568 * @adapter: the adapter
569 * @addr: the start address to write
570 * @n: length of data to write in bytes
571 * @data: the data to write
572 *
573 * Writes up to a page of data (256 bytes) to the serial flash starting
574 * at the given address. All the data must be written to the same page.
575 */
576static int t4_write_flash(struct adapter *adapter, unsigned int addr,
577 unsigned int n, const u8 *data)
578{
579 int ret;
580 u32 buf[64];
581 unsigned int i, c, left, val, offset = addr & 0xff;
582
583 if (addr >= SF_SIZE || offset + n > SF_PAGE_SIZE)
584 return -EINVAL;
585
586 val = swab32(addr) | SF_PROG_PAGE;
587
588 if ((ret = sf1_write(adapter, 1, 0, 1, SF_WR_ENABLE)) != 0 ||
589 (ret = sf1_write(adapter, 4, 1, 1, val)) != 0)
590 goto unlock;
591
592 for (left = n; left; left -= c) {
593 c = min(left, 4U);
594 for (val = 0, i = 0; i < c; ++i)
595 val = (val << 8) + *data++;
596
597 ret = sf1_write(adapter, c, c != left, 1, val);
598 if (ret)
599 goto unlock;
600 }
601 ret = flash_wait_op(adapter, 5, 1);
602 if (ret)
603 goto unlock;
604
605 t4_write_reg(adapter, SF_OP, 0); /* unlock SF */
606
607 /* Read the page to verify the write succeeded */
608 ret = t4_read_flash(adapter, addr & ~0xff, ARRAY_SIZE(buf), buf, 1);
609 if (ret)
610 return ret;
611
612 if (memcmp(data - n, (u8 *)buf + offset, n)) {
613 dev_err(adapter->pdev_dev,
614 "failed to correctly write the flash page at %#x\n",
615 addr);
616 return -EIO;
617 }
618 return 0;
619
620unlock:
621 t4_write_reg(adapter, SF_OP, 0); /* unlock SF */
622 return ret;
623}
624
625/**
626 * get_fw_version - read the firmware version
627 * @adapter: the adapter
628 * @vers: where to place the version
629 *
630 * Reads the FW version from flash.
631 */
632static int get_fw_version(struct adapter *adapter, u32 *vers)
633{
634 return t4_read_flash(adapter,
635 FW_IMG_START + offsetof(struct fw_hdr, fw_ver), 1,
636 vers, 0);
637}
638
639/**
640 * get_tp_version - read the TP microcode version
641 * @adapter: the adapter
642 * @vers: where to place the version
643 *
644 * Reads the TP microcode version from flash.
645 */
646static int get_tp_version(struct adapter *adapter, u32 *vers)
647{
648 return t4_read_flash(adapter, FW_IMG_START + offsetof(struct fw_hdr,
649 tp_microcode_ver),
650 1, vers, 0);
651}
652
653/**
654 * t4_check_fw_version - check if the FW is compatible with this driver
655 * @adapter: the adapter
656 *
657 * Checks if an adapter's FW is compatible with the driver. Returns 0
658 * if there's exact match, a negative error if the version could not be
659 * read or there's a major version mismatch, and a positive value if the
660 * expected major version is found but there's a minor version mismatch.
661 */
662int t4_check_fw_version(struct adapter *adapter)
663{
664 u32 api_vers[2];
665 int ret, major, minor, micro;
666
667 ret = get_fw_version(adapter, &adapter->params.fw_vers);
668 if (!ret)
669 ret = get_tp_version(adapter, &adapter->params.tp_vers);
670 if (!ret)
671 ret = t4_read_flash(adapter,
672 FW_IMG_START + offsetof(struct fw_hdr, intfver_nic),
673 2, api_vers, 1);
674 if (ret)
675 return ret;
676
677 major = FW_HDR_FW_VER_MAJOR_GET(adapter->params.fw_vers);
678 minor = FW_HDR_FW_VER_MINOR_GET(adapter->params.fw_vers);
679 micro = FW_HDR_FW_VER_MICRO_GET(adapter->params.fw_vers);
680 memcpy(adapter->params.api_vers, api_vers,
681 sizeof(adapter->params.api_vers));
682
683 if (major != FW_VERSION_MAJOR) { /* major mismatch - fail */
684 dev_err(adapter->pdev_dev,
685 "card FW has major version %u, driver wants %u\n",
686 major, FW_VERSION_MAJOR);
687 return -EINVAL;
688 }
689
690 if (minor == FW_VERSION_MINOR && micro == FW_VERSION_MICRO)
691 return 0; /* perfect match */
692
693 /* Minor/micro version mismatch. Report it but often it's OK. */
694 return 1;
695}
696
697/**
698 * t4_flash_erase_sectors - erase a range of flash sectors
699 * @adapter: the adapter
700 * @start: the first sector to erase
701 * @end: the last sector to erase
702 *
703 * Erases the sectors in the given inclusive range.
704 */
705static int t4_flash_erase_sectors(struct adapter *adapter, int start, int end)
706{
707 int ret = 0;
708
709 while (start <= end) {
710 if ((ret = sf1_write(adapter, 1, 0, 1, SF_WR_ENABLE)) != 0 ||
711 (ret = sf1_write(adapter, 4, 0, 1,
712 SF_ERASE_SECTOR | (start << 8))) != 0 ||
713 (ret = flash_wait_op(adapter, 5, 500)) != 0) {
714 dev_err(adapter->pdev_dev,
715 "erase of flash sector %d failed, error %d\n",
716 start, ret);
717 break;
718 }
719 start++;
720 }
721 t4_write_reg(adapter, SF_OP, 0); /* unlock SF */
722 return ret;
723}
724
725/**
726 * t4_load_fw - download firmware
727 * @adap: the adapter
728 * @fw_data: the firmware image to write
729 * @size: image size
730 *
731 * Write the supplied firmware image to the card's serial flash.
732 */
733int t4_load_fw(struct adapter *adap, const u8 *fw_data, unsigned int size)
734{
735 u32 csum;
736 int ret, addr;
737 unsigned int i;
738 u8 first_page[SF_PAGE_SIZE];
739 const u32 *p = (const u32 *)fw_data;
740 const struct fw_hdr *hdr = (const struct fw_hdr *)fw_data;
741
742 if (!size) {
743 dev_err(adap->pdev_dev, "FW image has no data\n");
744 return -EINVAL;
745 }
746 if (size & 511) {
747 dev_err(adap->pdev_dev,
748 "FW image size not multiple of 512 bytes\n");
749 return -EINVAL;
750 }
751 if (ntohs(hdr->len512) * 512 != size) {
752 dev_err(adap->pdev_dev,
753 "FW image size differs from size in FW header\n");
754 return -EINVAL;
755 }
756 if (size > FW_MAX_SIZE) {
757 dev_err(adap->pdev_dev, "FW image too large, max is %u bytes\n",
758 FW_MAX_SIZE);
759 return -EFBIG;
760 }
761
762 for (csum = 0, i = 0; i < size / sizeof(csum); i++)
763 csum += ntohl(p[i]);
764
765 if (csum != 0xffffffff) {
766 dev_err(adap->pdev_dev,
767 "corrupted firmware image, checksum %#x\n", csum);
768 return -EINVAL;
769 }
770
771 i = DIV_ROUND_UP(size, SF_SEC_SIZE); /* # of sectors spanned */
772 ret = t4_flash_erase_sectors(adap, FW_START_SEC, FW_START_SEC + i - 1);
773 if (ret)
774 goto out;
775
776 /*
777 * We write the correct version at the end so the driver can see a bad
778 * version if the FW write fails. Start by writing a copy of the
779 * first page with a bad version.
780 */
781 memcpy(first_page, fw_data, SF_PAGE_SIZE);
782 ((struct fw_hdr *)first_page)->fw_ver = htonl(0xffffffff);
783 ret = t4_write_flash(adap, FW_IMG_START, SF_PAGE_SIZE, first_page);
784 if (ret)
785 goto out;
786
787 addr = FW_IMG_START;
788 for (size -= SF_PAGE_SIZE; size; size -= SF_PAGE_SIZE) {
789 addr += SF_PAGE_SIZE;
790 fw_data += SF_PAGE_SIZE;
791 ret = t4_write_flash(adap, addr, SF_PAGE_SIZE, fw_data);
792 if (ret)
793 goto out;
794 }
795
796 ret = t4_write_flash(adap,
797 FW_IMG_START + offsetof(struct fw_hdr, fw_ver),
798 sizeof(hdr->fw_ver), (const u8 *)&hdr->fw_ver);
799out:
800 if (ret)
801 dev_err(adap->pdev_dev, "firmware download failed, error %d\n",
802 ret);
803 return ret;
804}
805
806#define ADVERT_MASK (FW_PORT_CAP_SPEED_100M | FW_PORT_CAP_SPEED_1G |\
807 FW_PORT_CAP_SPEED_10G | FW_PORT_CAP_ANEG)
808
809/**
810 * t4_link_start - apply link configuration to MAC/PHY
811 * @phy: the PHY to setup
812 * @mac: the MAC to setup
813 * @lc: the requested link configuration
814 *
815 * Set up a port's MAC and PHY according to a desired link configuration.
816 * - If the PHY can auto-negotiate first decide what to advertise, then
817 * enable/disable auto-negotiation as desired, and reset.
818 * - If the PHY does not auto-negotiate just reset it.
819 * - If auto-negotiation is off set the MAC to the proper speed/duplex/FC,
820 * otherwise do it later based on the outcome of auto-negotiation.
821 */
822int t4_link_start(struct adapter *adap, unsigned int mbox, unsigned int port,
823 struct link_config *lc)
824{
825 struct fw_port_cmd c;
826 unsigned int fc = 0, mdi = FW_PORT_MDI(FW_PORT_MDI_AUTO);
827
828 lc->link_ok = 0;
829 if (lc->requested_fc & PAUSE_RX)
830 fc |= FW_PORT_CAP_FC_RX;
831 if (lc->requested_fc & PAUSE_TX)
832 fc |= FW_PORT_CAP_FC_TX;
833
834 memset(&c, 0, sizeof(c));
835 c.op_to_portid = htonl(FW_CMD_OP(FW_PORT_CMD) | FW_CMD_REQUEST |
836 FW_CMD_EXEC | FW_PORT_CMD_PORTID(port));
837 c.action_to_len16 = htonl(FW_PORT_CMD_ACTION(FW_PORT_ACTION_L1_CFG) |
838 FW_LEN16(c));
839
840 if (!(lc->supported & FW_PORT_CAP_ANEG)) {
841 c.u.l1cfg.rcap = htonl((lc->supported & ADVERT_MASK) | fc);
842 lc->fc = lc->requested_fc & (PAUSE_RX | PAUSE_TX);
843 } else if (lc->autoneg == AUTONEG_DISABLE) {
844 c.u.l1cfg.rcap = htonl(lc->requested_speed | fc | mdi);
845 lc->fc = lc->requested_fc & (PAUSE_RX | PAUSE_TX);
846 } else
847 c.u.l1cfg.rcap = htonl(lc->advertising | fc | mdi);
848
849 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
850}
851
852/**
853 * t4_restart_aneg - restart autonegotiation
854 * @adap: the adapter
855 * @mbox: mbox to use for the FW command
856 * @port: the port id
857 *
858 * Restarts autonegotiation for the selected port.
859 */
860int t4_restart_aneg(struct adapter *adap, unsigned int mbox, unsigned int port)
861{
862 struct fw_port_cmd c;
863
864 memset(&c, 0, sizeof(c));
865 c.op_to_portid = htonl(FW_CMD_OP(FW_PORT_CMD) | FW_CMD_REQUEST |
866 FW_CMD_EXEC | FW_PORT_CMD_PORTID(port));
867 c.action_to_len16 = htonl(FW_PORT_CMD_ACTION(FW_PORT_ACTION_L1_CFG) |
868 FW_LEN16(c));
869 c.u.l1cfg.rcap = htonl(FW_PORT_CAP_ANEG);
870 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
871}
872
873/**
874 * t4_set_vlan_accel - configure HW VLAN extraction
875 * @adap: the adapter
876 * @ports: bitmap of adapter ports to operate on
877 * @on: enable (1) or disable (0) HW VLAN extraction
878 *
879 * Enables or disables HW extraction of VLAN tags for the ports specified
880 * by @ports. @ports is a bitmap with the ith bit designating the port
881 * associated with the ith adapter channel.
882 */
883void t4_set_vlan_accel(struct adapter *adap, unsigned int ports, int on)
884{
885 ports <<= VLANEXTENABLE_SHIFT;
886 t4_set_reg_field(adap, TP_OUT_CONFIG, ports, on ? ports : 0);
887}
888
889struct intr_info {
890 unsigned int mask; /* bits to check in interrupt status */
891 const char *msg; /* message to print or NULL */
892 short stat_idx; /* stat counter to increment or -1 */
893 unsigned short fatal; /* whether the condition reported is fatal */
894};
895
896/**
897 * t4_handle_intr_status - table driven interrupt handler
898 * @adapter: the adapter that generated the interrupt
899 * @reg: the interrupt status register to process
900 * @acts: table of interrupt actions
901 *
902 * A table driven interrupt handler that applies a set of masks to an
903 * interrupt status word and performs the corresponding actions if the
904 * interrupts described by the mask have occured. The actions include
905 * optionally emitting a warning or alert message. The table is terminated
906 * by an entry specifying mask 0. Returns the number of fatal interrupt
907 * conditions.
908 */
909static int t4_handle_intr_status(struct adapter *adapter, unsigned int reg,
910 const struct intr_info *acts)
911{
912 int fatal = 0;
913 unsigned int mask = 0;
914 unsigned int status = t4_read_reg(adapter, reg);
915
916 for ( ; acts->mask; ++acts) {
917 if (!(status & acts->mask))
918 continue;
919 if (acts->fatal) {
920 fatal++;
921 dev_alert(adapter->pdev_dev, "%s (0x%x)\n", acts->msg,
922 status & acts->mask);
923 } else if (acts->msg && printk_ratelimit())
924 dev_warn(adapter->pdev_dev, "%s (0x%x)\n", acts->msg,
925 status & acts->mask);
926 mask |= acts->mask;
927 }
928 status &= mask;
929 if (status) /* clear processed interrupts */
930 t4_write_reg(adapter, reg, status);
931 return fatal;
932}
933
934/*
935 * Interrupt handler for the PCIE module.
936 */
937static void pcie_intr_handler(struct adapter *adapter)
938{
939 static struct intr_info sysbus_intr_info[] = {
940 { RNPP, "RXNP array parity error", -1, 1 },
941 { RPCP, "RXPC array parity error", -1, 1 },
942 { RCIP, "RXCIF array parity error", -1, 1 },
943 { RCCP, "Rx completions control array parity error", -1, 1 },
944 { RFTP, "RXFT array parity error", -1, 1 },
945 { 0 }
946 };
947 static struct intr_info pcie_port_intr_info[] = {
948 { TPCP, "TXPC array parity error", -1, 1 },
949 { TNPP, "TXNP array parity error", -1, 1 },
950 { TFTP, "TXFT array parity error", -1, 1 },
951 { TCAP, "TXCA array parity error", -1, 1 },
952 { TCIP, "TXCIF array parity error", -1, 1 },
953 { RCAP, "RXCA array parity error", -1, 1 },
954 { OTDD, "outbound request TLP discarded", -1, 1 },
955 { RDPE, "Rx data parity error", -1, 1 },
956 { TDUE, "Tx uncorrectable data error", -1, 1 },
957 { 0 }
958 };
959 static struct intr_info pcie_intr_info[] = {
960 { MSIADDRLPERR, "MSI AddrL parity error", -1, 1 },
961 { MSIADDRHPERR, "MSI AddrH parity error", -1, 1 },
962 { MSIDATAPERR, "MSI data parity error", -1, 1 },
963 { MSIXADDRLPERR, "MSI-X AddrL parity error", -1, 1 },
964 { MSIXADDRHPERR, "MSI-X AddrH parity error", -1, 1 },
965 { MSIXDATAPERR, "MSI-X data parity error", -1, 1 },
966 { MSIXDIPERR, "MSI-X DI parity error", -1, 1 },
967 { PIOCPLPERR, "PCI PIO completion FIFO parity error", -1, 1 },
968 { PIOREQPERR, "PCI PIO request FIFO parity error", -1, 1 },
969 { TARTAGPERR, "PCI PCI target tag FIFO parity error", -1, 1 },
970 { CCNTPERR, "PCI CMD channel count parity error", -1, 1 },
971 { CREQPERR, "PCI CMD channel request parity error", -1, 1 },
972 { CRSPPERR, "PCI CMD channel response parity error", -1, 1 },
973 { DCNTPERR, "PCI DMA channel count parity error", -1, 1 },
974 { DREQPERR, "PCI DMA channel request parity error", -1, 1 },
975 { DRSPPERR, "PCI DMA channel response parity error", -1, 1 },
976 { HCNTPERR, "PCI HMA channel count parity error", -1, 1 },
977 { HREQPERR, "PCI HMA channel request parity error", -1, 1 },
978 { HRSPPERR, "PCI HMA channel response parity error", -1, 1 },
979 { CFGSNPPERR, "PCI config snoop FIFO parity error", -1, 1 },
980 { FIDPERR, "PCI FID parity error", -1, 1 },
981 { INTXCLRPERR, "PCI INTx clear parity error", -1, 1 },
982 { MATAGPERR, "PCI MA tag parity error", -1, 1 },
983 { PIOTAGPERR, "PCI PIO tag parity error", -1, 1 },
984 { RXCPLPERR, "PCI Rx completion parity error", -1, 1 },
985 { RXWRPERR, "PCI Rx write parity error", -1, 1 },
986 { RPLPERR, "PCI replay buffer parity error", -1, 1 },
987 { PCIESINT, "PCI core secondary fault", -1, 1 },
988 { PCIEPINT, "PCI core primary fault", -1, 1 },
989 { UNXSPLCPLERR, "PCI unexpected split completion error", -1, 0 },
990 { 0 }
991 };
992
993 int fat;
994
995 fat = t4_handle_intr_status(adapter,
996 PCIE_CORE_UTL_SYSTEM_BUS_AGENT_STATUS,
997 sysbus_intr_info) +
998 t4_handle_intr_status(adapter,
999 PCIE_CORE_UTL_PCI_EXPRESS_PORT_STATUS,
1000 pcie_port_intr_info) +
1001 t4_handle_intr_status(adapter, PCIE_INT_CAUSE, pcie_intr_info);
1002 if (fat)
1003 t4_fatal_err(adapter);
1004}
1005
1006/*
1007 * TP interrupt handler.
1008 */
1009static void tp_intr_handler(struct adapter *adapter)
1010{
1011 static struct intr_info tp_intr_info[] = {
1012 { 0x3fffffff, "TP parity error", -1, 1 },
1013 { FLMTXFLSTEMPTY, "TP out of Tx pages", -1, 1 },
1014 { 0 }
1015 };
1016
1017 if (t4_handle_intr_status(adapter, TP_INT_CAUSE, tp_intr_info))
1018 t4_fatal_err(adapter);
1019}
1020
1021/*
1022 * SGE interrupt handler.
1023 */
1024static void sge_intr_handler(struct adapter *adapter)
1025{
1026 u64 v;
1027
1028 static struct intr_info sge_intr_info[] = {
1029 { ERR_CPL_EXCEED_IQE_SIZE,
1030 "SGE received CPL exceeding IQE size", -1, 1 },
1031 { ERR_INVALID_CIDX_INC,
1032 "SGE GTS CIDX increment too large", -1, 0 },
1033 { ERR_CPL_OPCODE_0, "SGE received 0-length CPL", -1, 0 },
1034 { ERR_DROPPED_DB, "SGE doorbell dropped", -1, 0 },
1035 { ERR_DATA_CPL_ON_HIGH_QID1 | ERR_DATA_CPL_ON_HIGH_QID0,
1036 "SGE IQID > 1023 received CPL for FL", -1, 0 },
1037 { ERR_BAD_DB_PIDX3, "SGE DBP 3 pidx increment too large", -1,
1038 0 },
1039 { ERR_BAD_DB_PIDX2, "SGE DBP 2 pidx increment too large", -1,
1040 0 },
1041 { ERR_BAD_DB_PIDX1, "SGE DBP 1 pidx increment too large", -1,
1042 0 },
1043 { ERR_BAD_DB_PIDX0, "SGE DBP 0 pidx increment too large", -1,
1044 0 },
1045 { ERR_ING_CTXT_PRIO,
1046 "SGE too many priority ingress contexts", -1, 0 },
1047 { ERR_EGR_CTXT_PRIO,
1048 "SGE too many priority egress contexts", -1, 0 },
1049 { INGRESS_SIZE_ERR, "SGE illegal ingress QID", -1, 0 },
1050 { EGRESS_SIZE_ERR, "SGE illegal egress QID", -1, 0 },
1051 { 0 }
1052 };
1053
1054 v = (u64)t4_read_reg(adapter, SGE_INT_CAUSE1) |
1055 ((u64)t4_read_reg(adapter, SGE_INT_CAUSE2) << 32);
1056 if (v) {
1057 dev_alert(adapter->pdev_dev, "SGE parity error (%#llx)\n",
1058 (unsigned long long)v);
1059 t4_write_reg(adapter, SGE_INT_CAUSE1, v);
1060 t4_write_reg(adapter, SGE_INT_CAUSE2, v >> 32);
1061 }
1062
1063 if (t4_handle_intr_status(adapter, SGE_INT_CAUSE3, sge_intr_info) ||
1064 v != 0)
1065 t4_fatal_err(adapter);
1066}
1067
1068/*
1069 * CIM interrupt handler.
1070 */
1071static void cim_intr_handler(struct adapter *adapter)
1072{
1073 static struct intr_info cim_intr_info[] = {
1074 { PREFDROPINT, "CIM control register prefetch drop", -1, 1 },
1075 { OBQPARERR, "CIM OBQ parity error", -1, 1 },
1076 { IBQPARERR, "CIM IBQ parity error", -1, 1 },
1077 { MBUPPARERR, "CIM mailbox uP parity error", -1, 1 },
1078 { MBHOSTPARERR, "CIM mailbox host parity error", -1, 1 },
1079 { TIEQINPARERRINT, "CIM TIEQ outgoing parity error", -1, 1 },
1080 { TIEQOUTPARERRINT, "CIM TIEQ incoming parity error", -1, 1 },
1081 { 0 }
1082 };
1083 static struct intr_info cim_upintr_info[] = {
1084 { RSVDSPACEINT, "CIM reserved space access", -1, 1 },
1085 { ILLTRANSINT, "CIM illegal transaction", -1, 1 },
1086 { ILLWRINT, "CIM illegal write", -1, 1 },
1087 { ILLRDINT, "CIM illegal read", -1, 1 },
1088 { ILLRDBEINT, "CIM illegal read BE", -1, 1 },
1089 { ILLWRBEINT, "CIM illegal write BE", -1, 1 },
1090 { SGLRDBOOTINT, "CIM single read from boot space", -1, 1 },
1091 { SGLWRBOOTINT, "CIM single write to boot space", -1, 1 },
1092 { BLKWRBOOTINT, "CIM block write to boot space", -1, 1 },
1093 { SGLRDFLASHINT, "CIM single read from flash space", -1, 1 },
1094 { SGLWRFLASHINT, "CIM single write to flash space", -1, 1 },
1095 { BLKWRFLASHINT, "CIM block write to flash space", -1, 1 },
1096 { SGLRDEEPROMINT, "CIM single EEPROM read", -1, 1 },
1097 { SGLWREEPROMINT, "CIM single EEPROM write", -1, 1 },
1098 { BLKRDEEPROMINT, "CIM block EEPROM read", -1, 1 },
1099 { BLKWREEPROMINT, "CIM block EEPROM write", -1, 1 },
1100 { SGLRDCTLINT , "CIM single read from CTL space", -1, 1 },
1101 { SGLWRCTLINT , "CIM single write to CTL space", -1, 1 },
1102 { BLKRDCTLINT , "CIM block read from CTL space", -1, 1 },
1103 { BLKWRCTLINT , "CIM block write to CTL space", -1, 1 },
1104 { SGLRDPLINT , "CIM single read from PL space", -1, 1 },
1105 { SGLWRPLINT , "CIM single write to PL space", -1, 1 },
1106 { BLKRDPLINT , "CIM block read from PL space", -1, 1 },
1107 { BLKWRPLINT , "CIM block write to PL space", -1, 1 },
1108 { REQOVRLOOKUPINT , "CIM request FIFO overwrite", -1, 1 },
1109 { RSPOVRLOOKUPINT , "CIM response FIFO overwrite", -1, 1 },
1110 { TIMEOUTINT , "CIM PIF timeout", -1, 1 },
1111 { TIMEOUTMAINT , "CIM PIF MA timeout", -1, 1 },
1112 { 0 }
1113 };
1114
1115 int fat;
1116
1117 fat = t4_handle_intr_status(adapter, CIM_HOST_INT_CAUSE,
1118 cim_intr_info) +
1119 t4_handle_intr_status(adapter, CIM_HOST_UPACC_INT_CAUSE,
1120 cim_upintr_info);
1121 if (fat)
1122 t4_fatal_err(adapter);
1123}
1124
1125/*
1126 * ULP RX interrupt handler.
1127 */
1128static void ulprx_intr_handler(struct adapter *adapter)
1129{
1130 static struct intr_info ulprx_intr_info[] = {
1131 { 0x7fffff, "ULPRX parity error", -1, 1 },
1132 { 0 }
1133 };
1134
1135 if (t4_handle_intr_status(adapter, ULP_RX_INT_CAUSE, ulprx_intr_info))
1136 t4_fatal_err(adapter);
1137}
1138
1139/*
1140 * ULP TX interrupt handler.
1141 */
1142static void ulptx_intr_handler(struct adapter *adapter)
1143{
1144 static struct intr_info ulptx_intr_info[] = {
1145 { PBL_BOUND_ERR_CH3, "ULPTX channel 3 PBL out of bounds", -1,
1146 0 },
1147 { PBL_BOUND_ERR_CH2, "ULPTX channel 2 PBL out of bounds", -1,
1148 0 },
1149 { PBL_BOUND_ERR_CH1, "ULPTX channel 1 PBL out of bounds", -1,
1150 0 },
1151 { PBL_BOUND_ERR_CH0, "ULPTX channel 0 PBL out of bounds", -1,
1152 0 },
1153 { 0xfffffff, "ULPTX parity error", -1, 1 },
1154 { 0 }
1155 };
1156
1157 if (t4_handle_intr_status(adapter, ULP_TX_INT_CAUSE, ulptx_intr_info))
1158 t4_fatal_err(adapter);
1159}
1160
1161/*
1162 * PM TX interrupt handler.
1163 */
1164static void pmtx_intr_handler(struct adapter *adapter)
1165{
1166 static struct intr_info pmtx_intr_info[] = {
1167 { PCMD_LEN_OVFL0, "PMTX channel 0 pcmd too large", -1, 1 },
1168 { PCMD_LEN_OVFL1, "PMTX channel 1 pcmd too large", -1, 1 },
1169 { PCMD_LEN_OVFL2, "PMTX channel 2 pcmd too large", -1, 1 },
1170 { ZERO_C_CMD_ERROR, "PMTX 0-length pcmd", -1, 1 },
1171 { PMTX_FRAMING_ERROR, "PMTX framing error", -1, 1 },
1172 { OESPI_PAR_ERROR, "PMTX oespi parity error", -1, 1 },
1173 { DB_OPTIONS_PAR_ERROR, "PMTX db_options parity error", -1, 1 },
1174 { ICSPI_PAR_ERROR, "PMTX icspi parity error", -1, 1 },
1175 { C_PCMD_PAR_ERROR, "PMTX c_pcmd parity error", -1, 1},
1176 { 0 }
1177 };
1178
1179 if (t4_handle_intr_status(adapter, PM_TX_INT_CAUSE, pmtx_intr_info))
1180 t4_fatal_err(adapter);
1181}
1182
1183/*
1184 * PM RX interrupt handler.
1185 */
1186static void pmrx_intr_handler(struct adapter *adapter)
1187{
1188 static struct intr_info pmrx_intr_info[] = {
1189 { ZERO_E_CMD_ERROR, "PMRX 0-length pcmd", -1, 1 },
1190 { PMRX_FRAMING_ERROR, "PMRX framing error", -1, 1 },
1191 { OCSPI_PAR_ERROR, "PMRX ocspi parity error", -1, 1 },
1192 { DB_OPTIONS_PAR_ERROR, "PMRX db_options parity error", -1, 1 },
1193 { IESPI_PAR_ERROR, "PMRX iespi parity error", -1, 1 },
1194 { E_PCMD_PAR_ERROR, "PMRX e_pcmd parity error", -1, 1},
1195 { 0 }
1196 };
1197
1198 if (t4_handle_intr_status(adapter, PM_RX_INT_CAUSE, pmrx_intr_info))
1199 t4_fatal_err(adapter);
1200}
1201
1202/*
1203 * CPL switch interrupt handler.
1204 */
1205static void cplsw_intr_handler(struct adapter *adapter)
1206{
1207 static struct intr_info cplsw_intr_info[] = {
1208 { CIM_OP_MAP_PERR, "CPLSW CIM op_map parity error", -1, 1 },
1209 { CIM_OVFL_ERROR, "CPLSW CIM overflow", -1, 1 },
1210 { TP_FRAMING_ERROR, "CPLSW TP framing error", -1, 1 },
1211 { SGE_FRAMING_ERROR, "CPLSW SGE framing error", -1, 1 },
1212 { CIM_FRAMING_ERROR, "CPLSW CIM framing error", -1, 1 },
1213 { ZERO_SWITCH_ERROR, "CPLSW no-switch error", -1, 1 },
1214 { 0 }
1215 };
1216
1217 if (t4_handle_intr_status(adapter, CPL_INTR_CAUSE, cplsw_intr_info))
1218 t4_fatal_err(adapter);
1219}
1220
1221/*
1222 * LE interrupt handler.
1223 */
1224static void le_intr_handler(struct adapter *adap)
1225{
1226 static struct intr_info le_intr_info[] = {
1227 { LIPMISS, "LE LIP miss", -1, 0 },
1228 { LIP0, "LE 0 LIP error", -1, 0 },
1229 { PARITYERR, "LE parity error", -1, 1 },
1230 { UNKNOWNCMD, "LE unknown command", -1, 1 },
1231 { REQQPARERR, "LE request queue parity error", -1, 1 },
1232 { 0 }
1233 };
1234
1235 if (t4_handle_intr_status(adap, LE_DB_INT_CAUSE, le_intr_info))
1236 t4_fatal_err(adap);
1237}
1238
1239/*
1240 * MPS interrupt handler.
1241 */
1242static void mps_intr_handler(struct adapter *adapter)
1243{
1244 static struct intr_info mps_rx_intr_info[] = {
1245 { 0xffffff, "MPS Rx parity error", -1, 1 },
1246 { 0 }
1247 };
1248 static struct intr_info mps_tx_intr_info[] = {
1249 { TPFIFO, "MPS Tx TP FIFO parity error", -1, 1 },
1250 { NCSIFIFO, "MPS Tx NC-SI FIFO parity error", -1, 1 },
1251 { TXDATAFIFO, "MPS Tx data FIFO parity error", -1, 1 },
1252 { TXDESCFIFO, "MPS Tx desc FIFO parity error", -1, 1 },
1253 { BUBBLE, "MPS Tx underflow", -1, 1 },
1254 { SECNTERR, "MPS Tx SOP/EOP error", -1, 1 },
1255 { FRMERR, "MPS Tx framing error", -1, 1 },
1256 { 0 }
1257 };
1258 static struct intr_info mps_trc_intr_info[] = {
1259 { FILTMEM, "MPS TRC filter parity error", -1, 1 },
1260 { PKTFIFO, "MPS TRC packet FIFO parity error", -1, 1 },
1261 { MISCPERR, "MPS TRC misc parity error", -1, 1 },
1262 { 0 }
1263 };
1264 static struct intr_info mps_stat_sram_intr_info[] = {
1265 { 0x1fffff, "MPS statistics SRAM parity error", -1, 1 },
1266 { 0 }
1267 };
1268 static struct intr_info mps_stat_tx_intr_info[] = {
1269 { 0xfffff, "MPS statistics Tx FIFO parity error", -1, 1 },
1270 { 0 }
1271 };
1272 static struct intr_info mps_stat_rx_intr_info[] = {
1273 { 0xffffff, "MPS statistics Rx FIFO parity error", -1, 1 },
1274 { 0 }
1275 };
1276 static struct intr_info mps_cls_intr_info[] = {
1277 { MATCHSRAM, "MPS match SRAM parity error", -1, 1 },
1278 { MATCHTCAM, "MPS match TCAM parity error", -1, 1 },
1279 { HASHSRAM, "MPS hash SRAM parity error", -1, 1 },
1280 { 0 }
1281 };
1282
1283 int fat;
1284
1285 fat = t4_handle_intr_status(adapter, MPS_RX_PERR_INT_CAUSE,
1286 mps_rx_intr_info) +
1287 t4_handle_intr_status(adapter, MPS_TX_INT_CAUSE,
1288 mps_tx_intr_info) +
1289 t4_handle_intr_status(adapter, MPS_TRC_INT_CAUSE,
1290 mps_trc_intr_info) +
1291 t4_handle_intr_status(adapter, MPS_STAT_PERR_INT_CAUSE_SRAM,
1292 mps_stat_sram_intr_info) +
1293 t4_handle_intr_status(adapter, MPS_STAT_PERR_INT_CAUSE_TX_FIFO,
1294 mps_stat_tx_intr_info) +
1295 t4_handle_intr_status(adapter, MPS_STAT_PERR_INT_CAUSE_RX_FIFO,
1296 mps_stat_rx_intr_info) +
1297 t4_handle_intr_status(adapter, MPS_CLS_INT_CAUSE,
1298 mps_cls_intr_info);
1299
1300 t4_write_reg(adapter, MPS_INT_CAUSE, CLSINT | TRCINT |
1301 RXINT | TXINT | STATINT);
1302 t4_read_reg(adapter, MPS_INT_CAUSE); /* flush */
1303 if (fat)
1304 t4_fatal_err(adapter);
1305}
1306
1307#define MEM_INT_MASK (PERR_INT_CAUSE | ECC_CE_INT_CAUSE | ECC_UE_INT_CAUSE)
1308
1309/*
1310 * EDC/MC interrupt handler.
1311 */
1312static void mem_intr_handler(struct adapter *adapter, int idx)
1313{
1314 static const char name[3][5] = { "EDC0", "EDC1", "MC" };
1315
1316 unsigned int addr, cnt_addr, v;
1317
1318 if (idx <= MEM_EDC1) {
1319 addr = EDC_REG(EDC_INT_CAUSE, idx);
1320 cnt_addr = EDC_REG(EDC_ECC_STATUS, idx);
1321 } else {
1322 addr = MC_INT_CAUSE;
1323 cnt_addr = MC_ECC_STATUS;
1324 }
1325
1326 v = t4_read_reg(adapter, addr) & MEM_INT_MASK;
1327 if (v & PERR_INT_CAUSE)
1328 dev_alert(adapter->pdev_dev, "%s FIFO parity error\n",
1329 name[idx]);
1330 if (v & ECC_CE_INT_CAUSE) {
1331 u32 cnt = ECC_CECNT_GET(t4_read_reg(adapter, cnt_addr));
1332
1333 t4_write_reg(adapter, cnt_addr, ECC_CECNT_MASK);
1334 if (printk_ratelimit())
1335 dev_warn(adapter->pdev_dev,
1336 "%u %s correctable ECC data error%s\n",
1337 cnt, name[idx], cnt > 1 ? "s" : "");
1338 }
1339 if (v & ECC_UE_INT_CAUSE)
1340 dev_alert(adapter->pdev_dev,
1341 "%s uncorrectable ECC data error\n", name[idx]);
1342
1343 t4_write_reg(adapter, addr, v);
1344 if (v & (PERR_INT_CAUSE | ECC_UE_INT_CAUSE))
1345 t4_fatal_err(adapter);
1346}
1347
1348/*
1349 * MA interrupt handler.
1350 */
1351static void ma_intr_handler(struct adapter *adap)
1352{
1353 u32 v, status = t4_read_reg(adap, MA_INT_CAUSE);
1354
1355 if (status & MEM_PERR_INT_CAUSE)
1356 dev_alert(adap->pdev_dev,
1357 "MA parity error, parity status %#x\n",
1358 t4_read_reg(adap, MA_PARITY_ERROR_STATUS));
1359 if (status & MEM_WRAP_INT_CAUSE) {
1360 v = t4_read_reg(adap, MA_INT_WRAP_STATUS);
1361 dev_alert(adap->pdev_dev, "MA address wrap-around error by "
1362 "client %u to address %#x\n",
1363 MEM_WRAP_CLIENT_NUM_GET(v),
1364 MEM_WRAP_ADDRESS_GET(v) << 4);
1365 }
1366 t4_write_reg(adap, MA_INT_CAUSE, status);
1367 t4_fatal_err(adap);
1368}
1369
1370/*
1371 * SMB interrupt handler.
1372 */
1373static void smb_intr_handler(struct adapter *adap)
1374{
1375 static struct intr_info smb_intr_info[] = {
1376 { MSTTXFIFOPARINT, "SMB master Tx FIFO parity error", -1, 1 },
1377 { MSTRXFIFOPARINT, "SMB master Rx FIFO parity error", -1, 1 },
1378 { SLVFIFOPARINT, "SMB slave FIFO parity error", -1, 1 },
1379 { 0 }
1380 };
1381
1382 if (t4_handle_intr_status(adap, SMB_INT_CAUSE, smb_intr_info))
1383 t4_fatal_err(adap);
1384}
1385
1386/*
1387 * NC-SI interrupt handler.
1388 */
1389static void ncsi_intr_handler(struct adapter *adap)
1390{
1391 static struct intr_info ncsi_intr_info[] = {
1392 { CIM_DM_PRTY_ERR, "NC-SI CIM parity error", -1, 1 },
1393 { MPS_DM_PRTY_ERR, "NC-SI MPS parity error", -1, 1 },
1394 { TXFIFO_PRTY_ERR, "NC-SI Tx FIFO parity error", -1, 1 },
1395 { RXFIFO_PRTY_ERR, "NC-SI Rx FIFO parity error", -1, 1 },
1396 { 0 }
1397 };
1398
1399 if (t4_handle_intr_status(adap, NCSI_INT_CAUSE, ncsi_intr_info))
1400 t4_fatal_err(adap);
1401}
1402
1403/*
1404 * XGMAC interrupt handler.
1405 */
1406static void xgmac_intr_handler(struct adapter *adap, int port)
1407{
1408 u32 v = t4_read_reg(adap, PORT_REG(port, XGMAC_PORT_INT_CAUSE));
1409
1410 v &= TXFIFO_PRTY_ERR | RXFIFO_PRTY_ERR;
1411 if (!v)
1412 return;
1413
1414 if (v & TXFIFO_PRTY_ERR)
1415 dev_alert(adap->pdev_dev, "XGMAC %d Tx FIFO parity error\n",
1416 port);
1417 if (v & RXFIFO_PRTY_ERR)
1418 dev_alert(adap->pdev_dev, "XGMAC %d Rx FIFO parity error\n",
1419 port);
1420 t4_write_reg(adap, PORT_REG(port, XGMAC_PORT_INT_CAUSE), v);
1421 t4_fatal_err(adap);
1422}
1423
1424/*
1425 * PL interrupt handler.
1426 */
1427static void pl_intr_handler(struct adapter *adap)
1428{
1429 static struct intr_info pl_intr_info[] = {
1430 { FATALPERR, "T4 fatal parity error", -1, 1 },
1431 { PERRVFID, "PL VFID_MAP parity error", -1, 1 },
1432 { 0 }
1433 };
1434
1435 if (t4_handle_intr_status(adap, PL_PL_INT_CAUSE, pl_intr_info))
1436 t4_fatal_err(adap);
1437}
1438
1439#define PF_INTR_MASK (PFSW | PFCIM)
1440#define GLBL_INTR_MASK (CIM | MPS | PL | PCIE | MC | EDC0 | \
1441 EDC1 | LE | TP | MA | PM_TX | PM_RX | ULP_RX | \
1442 CPL_SWITCH | SGE | ULP_TX)
1443
1444/**
1445 * t4_slow_intr_handler - control path interrupt handler
1446 * @adapter: the adapter
1447 *
1448 * T4 interrupt handler for non-data global interrupt events, e.g., errors.
1449 * The designation 'slow' is because it involves register reads, while
1450 * data interrupts typically don't involve any MMIOs.
1451 */
1452int t4_slow_intr_handler(struct adapter *adapter)
1453{
1454 u32 cause = t4_read_reg(adapter, PL_INT_CAUSE);
1455
1456 if (!(cause & GLBL_INTR_MASK))
1457 return 0;
1458 if (cause & CIM)
1459 cim_intr_handler(adapter);
1460 if (cause & MPS)
1461 mps_intr_handler(adapter);
1462 if (cause & NCSI)
1463 ncsi_intr_handler(adapter);
1464 if (cause & PL)
1465 pl_intr_handler(adapter);
1466 if (cause & SMB)
1467 smb_intr_handler(adapter);
1468 if (cause & XGMAC0)
1469 xgmac_intr_handler(adapter, 0);
1470 if (cause & XGMAC1)
1471 xgmac_intr_handler(adapter, 1);
1472 if (cause & XGMAC_KR0)
1473 xgmac_intr_handler(adapter, 2);
1474 if (cause & XGMAC_KR1)
1475 xgmac_intr_handler(adapter, 3);
1476 if (cause & PCIE)
1477 pcie_intr_handler(adapter);
1478 if (cause & MC)
1479 mem_intr_handler(adapter, MEM_MC);
1480 if (cause & EDC0)
1481 mem_intr_handler(adapter, MEM_EDC0);
1482 if (cause & EDC1)
1483 mem_intr_handler(adapter, MEM_EDC1);
1484 if (cause & LE)
1485 le_intr_handler(adapter);
1486 if (cause & TP)
1487 tp_intr_handler(adapter);
1488 if (cause & MA)
1489 ma_intr_handler(adapter);
1490 if (cause & PM_TX)
1491 pmtx_intr_handler(adapter);
1492 if (cause & PM_RX)
1493 pmrx_intr_handler(adapter);
1494 if (cause & ULP_RX)
1495 ulprx_intr_handler(adapter);
1496 if (cause & CPL_SWITCH)
1497 cplsw_intr_handler(adapter);
1498 if (cause & SGE)
1499 sge_intr_handler(adapter);
1500 if (cause & ULP_TX)
1501 ulptx_intr_handler(adapter);
1502
1503 /* Clear the interrupts just processed for which we are the master. */
1504 t4_write_reg(adapter, PL_INT_CAUSE, cause & GLBL_INTR_MASK);
1505 (void) t4_read_reg(adapter, PL_INT_CAUSE); /* flush */
1506 return 1;
1507}
1508
1509/**
1510 * t4_intr_enable - enable interrupts
1511 * @adapter: the adapter whose interrupts should be enabled
1512 *
1513 * Enable PF-specific interrupts for the calling function and the top-level
1514 * interrupt concentrator for global interrupts. Interrupts are already
1515 * enabled at each module, here we just enable the roots of the interrupt
1516 * hierarchies.
1517 *
1518 * Note: this function should be called only when the driver manages
1519 * non PF-specific interrupts from the various HW modules. Only one PCI
1520 * function at a time should be doing this.
1521 */
1522void t4_intr_enable(struct adapter *adapter)
1523{
1524 u32 pf = SOURCEPF_GET(t4_read_reg(adapter, PL_WHOAMI));
1525
1526 t4_write_reg(adapter, SGE_INT_ENABLE3, ERR_CPL_EXCEED_IQE_SIZE |
1527 ERR_INVALID_CIDX_INC | ERR_CPL_OPCODE_0 |
1528 ERR_DROPPED_DB | ERR_DATA_CPL_ON_HIGH_QID1 |
1529 ERR_DATA_CPL_ON_HIGH_QID0 | ERR_BAD_DB_PIDX3 |
1530 ERR_BAD_DB_PIDX2 | ERR_BAD_DB_PIDX1 |
1531 ERR_BAD_DB_PIDX0 | ERR_ING_CTXT_PRIO |
1532 ERR_EGR_CTXT_PRIO | INGRESS_SIZE_ERR |
1533 EGRESS_SIZE_ERR);
1534 t4_write_reg(adapter, MYPF_REG(PL_PF_INT_ENABLE), PF_INTR_MASK);
1535 t4_set_reg_field(adapter, PL_INT_MAP0, 0, 1 << pf);
1536}
1537
1538/**
1539 * t4_intr_disable - disable interrupts
1540 * @adapter: the adapter whose interrupts should be disabled
1541 *
1542 * Disable interrupts. We only disable the top-level interrupt
1543 * concentrators. The caller must be a PCI function managing global
1544 * interrupts.
1545 */
1546void t4_intr_disable(struct adapter *adapter)
1547{
1548 u32 pf = SOURCEPF_GET(t4_read_reg(adapter, PL_WHOAMI));
1549
1550 t4_write_reg(adapter, MYPF_REG(PL_PF_INT_ENABLE), 0);
1551 t4_set_reg_field(adapter, PL_INT_MAP0, 1 << pf, 0);
1552}
1553
1554/**
1555 * t4_intr_clear - clear all interrupts
1556 * @adapter: the adapter whose interrupts should be cleared
1557 *
1558 * Clears all interrupts. The caller must be a PCI function managing
1559 * global interrupts.
1560 */
1561void t4_intr_clear(struct adapter *adapter)
1562{
1563 static const unsigned int cause_reg[] = {
1564 SGE_INT_CAUSE1, SGE_INT_CAUSE2, SGE_INT_CAUSE3,
1565 PCIE_CORE_UTL_SYSTEM_BUS_AGENT_STATUS,
1566 PCIE_CORE_UTL_PCI_EXPRESS_PORT_STATUS,
1567 PCIE_NONFAT_ERR, PCIE_INT_CAUSE,
1568 MC_INT_CAUSE,
1569 MA_INT_WRAP_STATUS, MA_PARITY_ERROR_STATUS, MA_INT_CAUSE,
1570 EDC_INT_CAUSE, EDC_REG(EDC_INT_CAUSE, 1),
1571 CIM_HOST_INT_CAUSE, CIM_HOST_UPACC_INT_CAUSE,
1572 MYPF_REG(CIM_PF_HOST_INT_CAUSE),
1573 TP_INT_CAUSE,
1574 ULP_RX_INT_CAUSE, ULP_TX_INT_CAUSE,
1575 PM_RX_INT_CAUSE, PM_TX_INT_CAUSE,
1576 MPS_RX_PERR_INT_CAUSE,
1577 CPL_INTR_CAUSE,
1578 MYPF_REG(PL_PF_INT_CAUSE),
1579 PL_PL_INT_CAUSE,
1580 LE_DB_INT_CAUSE,
1581 };
1582
1583 unsigned int i;
1584
1585 for (i = 0; i < ARRAY_SIZE(cause_reg); ++i)
1586 t4_write_reg(adapter, cause_reg[i], 0xffffffff);
1587
1588 t4_write_reg(adapter, PL_INT_CAUSE, GLBL_INTR_MASK);
1589 (void) t4_read_reg(adapter, PL_INT_CAUSE); /* flush */
1590}
1591
1592/**
1593 * hash_mac_addr - return the hash value of a MAC address
1594 * @addr: the 48-bit Ethernet MAC address
1595 *
1596 * Hashes a MAC address according to the hash function used by HW inexact
1597 * (hash) address matching.
1598 */
1599static int hash_mac_addr(const u8 *addr)
1600{
1601 u32 a = ((u32)addr[0] << 16) | ((u32)addr[1] << 8) | addr[2];
1602 u32 b = ((u32)addr[3] << 16) | ((u32)addr[4] << 8) | addr[5];
1603 a ^= b;
1604 a ^= (a >> 12);
1605 a ^= (a >> 6);
1606 return a & 0x3f;
1607}
1608
1609/**
1610 * t4_config_rss_range - configure a portion of the RSS mapping table
1611 * @adapter: the adapter
1612 * @mbox: mbox to use for the FW command
1613 * @viid: virtual interface whose RSS subtable is to be written
1614 * @start: start entry in the table to write
1615 * @n: how many table entries to write
1616 * @rspq: values for the response queue lookup table
1617 * @nrspq: number of values in @rspq
1618 *
1619 * Programs the selected part of the VI's RSS mapping table with the
1620 * provided values. If @nrspq < @n the supplied values are used repeatedly
1621 * until the full table range is populated.
1622 *
1623 * The caller must ensure the values in @rspq are in the range allowed for
1624 * @viid.
1625 */
1626int t4_config_rss_range(struct adapter *adapter, int mbox, unsigned int viid,
1627 int start, int n, const u16 *rspq, unsigned int nrspq)
1628{
1629 int ret;
1630 const u16 *rsp = rspq;
1631 const u16 *rsp_end = rspq + nrspq;
1632 struct fw_rss_ind_tbl_cmd cmd;
1633
1634 memset(&cmd, 0, sizeof(cmd));
1635 cmd.op_to_viid = htonl(FW_CMD_OP(FW_RSS_IND_TBL_CMD) |
1636 FW_CMD_REQUEST | FW_CMD_WRITE |
1637 FW_RSS_IND_TBL_CMD_VIID(viid));
1638 cmd.retval_len16 = htonl(FW_LEN16(cmd));
1639
1640 /* each fw_rss_ind_tbl_cmd takes up to 32 entries */
1641 while (n > 0) {
1642 int nq = min(n, 32);
1643 __be32 *qp = &cmd.iq0_to_iq2;
1644
1645 cmd.niqid = htons(nq);
1646 cmd.startidx = htons(start);
1647
1648 start += nq;
1649 n -= nq;
1650
1651 while (nq > 0) {
1652 unsigned int v;
1653
1654 v = FW_RSS_IND_TBL_CMD_IQ0(*rsp);
1655 if (++rsp >= rsp_end)
1656 rsp = rspq;
1657 v |= FW_RSS_IND_TBL_CMD_IQ1(*rsp);
1658 if (++rsp >= rsp_end)
1659 rsp = rspq;
1660 v |= FW_RSS_IND_TBL_CMD_IQ2(*rsp);
1661 if (++rsp >= rsp_end)
1662 rsp = rspq;
1663
1664 *qp++ = htonl(v);
1665 nq -= 3;
1666 }
1667
1668 ret = t4_wr_mbox(adapter, mbox, &cmd, sizeof(cmd), NULL);
1669 if (ret)
1670 return ret;
1671 }
1672 return 0;
1673}
1674
1675/**
1676 * t4_config_glbl_rss - configure the global RSS mode
1677 * @adapter: the adapter
1678 * @mbox: mbox to use for the FW command
1679 * @mode: global RSS mode
1680 * @flags: mode-specific flags
1681 *
1682 * Sets the global RSS mode.
1683 */
1684int t4_config_glbl_rss(struct adapter *adapter, int mbox, unsigned int mode,
1685 unsigned int flags)
1686{
1687 struct fw_rss_glb_config_cmd c;
1688
1689 memset(&c, 0, sizeof(c));
1690 c.op_to_write = htonl(FW_CMD_OP(FW_RSS_GLB_CONFIG_CMD) |
1691 FW_CMD_REQUEST | FW_CMD_WRITE);
1692 c.retval_len16 = htonl(FW_LEN16(c));
1693 if (mode == FW_RSS_GLB_CONFIG_CMD_MODE_MANUAL) {
1694 c.u.manual.mode_pkd = htonl(FW_RSS_GLB_CONFIG_CMD_MODE(mode));
1695 } else if (mode == FW_RSS_GLB_CONFIG_CMD_MODE_BASICVIRTUAL) {
1696 c.u.basicvirtual.mode_pkd =
1697 htonl(FW_RSS_GLB_CONFIG_CMD_MODE(mode));
1698 c.u.basicvirtual.synmapen_to_hashtoeplitz = htonl(flags);
1699 } else
1700 return -EINVAL;
1701 return t4_wr_mbox(adapter, mbox, &c, sizeof(c), NULL);
1702}
1703
1704/* Read an RSS table row */
1705static int rd_rss_row(struct adapter *adap, int row, u32 *val)
1706{
1707 t4_write_reg(adap, TP_RSS_LKP_TABLE, 0xfff00000 | row);
1708 return t4_wait_op_done_val(adap, TP_RSS_LKP_TABLE, LKPTBLROWVLD, 1,
1709 5, 0, val);
1710}
1711
1712/**
1713 * t4_read_rss - read the contents of the RSS mapping table
1714 * @adapter: the adapter
1715 * @map: holds the contents of the RSS mapping table
1716 *
1717 * Reads the contents of the RSS hash->queue mapping table.
1718 */
1719int t4_read_rss(struct adapter *adapter, u16 *map)
1720{
1721 u32 val;
1722 int i, ret;
1723
1724 for (i = 0; i < RSS_NENTRIES / 2; ++i) {
1725 ret = rd_rss_row(adapter, i, &val);
1726 if (ret)
1727 return ret;
1728 *map++ = LKPTBLQUEUE0_GET(val);
1729 *map++ = LKPTBLQUEUE1_GET(val);
1730 }
1731 return 0;
1732}
1733
1734/**
1735 * t4_tp_get_tcp_stats - read TP's TCP MIB counters
1736 * @adap: the adapter
1737 * @v4: holds the TCP/IP counter values
1738 * @v6: holds the TCP/IPv6 counter values
1739 *
1740 * Returns the values of TP's TCP/IP and TCP/IPv6 MIB counters.
1741 * Either @v4 or @v6 may be %NULL to skip the corresponding stats.
1742 */
1743void t4_tp_get_tcp_stats(struct adapter *adap, struct tp_tcp_stats *v4,
1744 struct tp_tcp_stats *v6)
1745{
1746 u32 val[TP_MIB_TCP_RXT_SEG_LO - TP_MIB_TCP_OUT_RST + 1];
1747
1748#define STAT_IDX(x) ((TP_MIB_TCP_##x) - TP_MIB_TCP_OUT_RST)
1749#define STAT(x) val[STAT_IDX(x)]
1750#define STAT64(x) (((u64)STAT(x##_HI) << 32) | STAT(x##_LO))
1751
1752 if (v4) {
1753 t4_read_indirect(adap, TP_MIB_INDEX, TP_MIB_DATA, val,
1754 ARRAY_SIZE(val), TP_MIB_TCP_OUT_RST);
1755 v4->tcpOutRsts = STAT(OUT_RST);
1756 v4->tcpInSegs = STAT64(IN_SEG);
1757 v4->tcpOutSegs = STAT64(OUT_SEG);
1758 v4->tcpRetransSegs = STAT64(RXT_SEG);
1759 }
1760 if (v6) {
1761 t4_read_indirect(adap, TP_MIB_INDEX, TP_MIB_DATA, val,
1762 ARRAY_SIZE(val), TP_MIB_TCP_V6OUT_RST);
1763 v6->tcpOutRsts = STAT(OUT_RST);
1764 v6->tcpInSegs = STAT64(IN_SEG);
1765 v6->tcpOutSegs = STAT64(OUT_SEG);
1766 v6->tcpRetransSegs = STAT64(RXT_SEG);
1767 }
1768#undef STAT64
1769#undef STAT
1770#undef STAT_IDX
1771}
1772
1773/**
1774 * t4_tp_get_err_stats - read TP's error MIB counters
1775 * @adap: the adapter
1776 * @st: holds the counter values
1777 *
1778 * Returns the values of TP's error counters.
1779 */
1780void t4_tp_get_err_stats(struct adapter *adap, struct tp_err_stats *st)
1781{
1782 t4_read_indirect(adap, TP_MIB_INDEX, TP_MIB_DATA, st->macInErrs,
1783 12, TP_MIB_MAC_IN_ERR_0);
1784 t4_read_indirect(adap, TP_MIB_INDEX, TP_MIB_DATA, st->tnlCongDrops,
1785 8, TP_MIB_TNL_CNG_DROP_0);
1786 t4_read_indirect(adap, TP_MIB_INDEX, TP_MIB_DATA, st->tnlTxDrops,
1787 4, TP_MIB_TNL_DROP_0);
1788 t4_read_indirect(adap, TP_MIB_INDEX, TP_MIB_DATA, st->ofldVlanDrops,
1789 4, TP_MIB_OFD_VLN_DROP_0);
1790 t4_read_indirect(adap, TP_MIB_INDEX, TP_MIB_DATA, st->tcp6InErrs,
1791 4, TP_MIB_TCP_V6IN_ERR_0);
1792 t4_read_indirect(adap, TP_MIB_INDEX, TP_MIB_DATA, &st->ofldNoNeigh,
1793 2, TP_MIB_OFD_ARP_DROP);
1794}
1795
1796/**
1797 * t4_read_mtu_tbl - returns the values in the HW path MTU table
1798 * @adap: the adapter
1799 * @mtus: where to store the MTU values
1800 * @mtu_log: where to store the MTU base-2 log (may be %NULL)
1801 *
1802 * Reads the HW path MTU table.
1803 */
1804void t4_read_mtu_tbl(struct adapter *adap, u16 *mtus, u8 *mtu_log)
1805{
1806 u32 v;
1807 int i;
1808
1809 for (i = 0; i < NMTUS; ++i) {
1810 t4_write_reg(adap, TP_MTU_TABLE,
1811 MTUINDEX(0xff) | MTUVALUE(i));
1812 v = t4_read_reg(adap, TP_MTU_TABLE);
1813 mtus[i] = MTUVALUE_GET(v);
1814 if (mtu_log)
1815 mtu_log[i] = MTUWIDTH_GET(v);
1816 }
1817}
1818
1819/**
1820 * init_cong_ctrl - initialize congestion control parameters
1821 * @a: the alpha values for congestion control
1822 * @b: the beta values for congestion control
1823 *
1824 * Initialize the congestion control parameters.
1825 */
1826static void __devinit init_cong_ctrl(unsigned short *a, unsigned short *b)
1827{
1828 a[0] = a[1] = a[2] = a[3] = a[4] = a[5] = a[6] = a[7] = a[8] = 1;
1829 a[9] = 2;
1830 a[10] = 3;
1831 a[11] = 4;
1832 a[12] = 5;
1833 a[13] = 6;
1834 a[14] = 7;
1835 a[15] = 8;
1836 a[16] = 9;
1837 a[17] = 10;
1838 a[18] = 14;
1839 a[19] = 17;
1840 a[20] = 21;
1841 a[21] = 25;
1842 a[22] = 30;
1843 a[23] = 35;
1844 a[24] = 45;
1845 a[25] = 60;
1846 a[26] = 80;
1847 a[27] = 100;
1848 a[28] = 200;
1849 a[29] = 300;
1850 a[30] = 400;
1851 a[31] = 500;
1852
1853 b[0] = b[1] = b[2] = b[3] = b[4] = b[5] = b[6] = b[7] = b[8] = 0;
1854 b[9] = b[10] = 1;
1855 b[11] = b[12] = 2;
1856 b[13] = b[14] = b[15] = b[16] = 3;
1857 b[17] = b[18] = b[19] = b[20] = b[21] = 4;
1858 b[22] = b[23] = b[24] = b[25] = b[26] = b[27] = 5;
1859 b[28] = b[29] = 6;
1860 b[30] = b[31] = 7;
1861}
1862
1863/* The minimum additive increment value for the congestion control table */
1864#define CC_MIN_INCR 2U
1865
1866/**
1867 * t4_load_mtus - write the MTU and congestion control HW tables
1868 * @adap: the adapter
1869 * @mtus: the values for the MTU table
1870 * @alpha: the values for the congestion control alpha parameter
1871 * @beta: the values for the congestion control beta parameter
1872 *
1873 * Write the HW MTU table with the supplied MTUs and the high-speed
1874 * congestion control table with the supplied alpha, beta, and MTUs.
1875 * We write the two tables together because the additive increments
1876 * depend on the MTUs.
1877 */
1878void t4_load_mtus(struct adapter *adap, const unsigned short *mtus,
1879 const unsigned short *alpha, const unsigned short *beta)
1880{
1881 static const unsigned int avg_pkts[NCCTRL_WIN] = {
1882 2, 6, 10, 14, 20, 28, 40, 56, 80, 112, 160, 224, 320, 448, 640,
1883 896, 1281, 1792, 2560, 3584, 5120, 7168, 10240, 14336, 20480,
1884 28672, 40960, 57344, 81920, 114688, 163840, 229376
1885 };
1886
1887 unsigned int i, w;
1888
1889 for (i = 0; i < NMTUS; ++i) {
1890 unsigned int mtu = mtus[i];
1891 unsigned int log2 = fls(mtu);
1892
1893 if (!(mtu & ((1 << log2) >> 2))) /* round */
1894 log2--;
1895 t4_write_reg(adap, TP_MTU_TABLE, MTUINDEX(i) |
1896 MTUWIDTH(log2) | MTUVALUE(mtu));
1897
1898 for (w = 0; w < NCCTRL_WIN; ++w) {
1899 unsigned int inc;
1900
1901 inc = max(((mtu - 40) * alpha[w]) / avg_pkts[w],
1902 CC_MIN_INCR);
1903
1904 t4_write_reg(adap, TP_CCTRL_TABLE, (i << 21) |
1905 (w << 16) | (beta[w] << 13) | inc);
1906 }
1907 }
1908}
1909
1910/**
1911 * t4_set_trace_filter - configure one of the tracing filters
1912 * @adap: the adapter
1913 * @tp: the desired trace filter parameters
1914 * @idx: which filter to configure
1915 * @enable: whether to enable or disable the filter
1916 *
1917 * Configures one of the tracing filters available in HW. If @enable is
1918 * %0 @tp is not examined and may be %NULL.
1919 */
1920int t4_set_trace_filter(struct adapter *adap, const struct trace_params *tp,
1921 int idx, int enable)
1922{
1923 int i, ofst = idx * 4;
1924 u32 data_reg, mask_reg, cfg;
1925 u32 multitrc = TRCMULTIFILTER;
1926
1927 if (!enable) {
1928 t4_write_reg(adap, MPS_TRC_FILTER_MATCH_CTL_A + ofst, 0);
1929 goto out;
1930 }
1931
1932 if (tp->port > 11 || tp->invert > 1 || tp->skip_len > 0x1f ||
1933 tp->skip_ofst > 0x1f || tp->min_len > 0x1ff ||
1934 tp->snap_len > 9600 || (idx && tp->snap_len > 256))
1935 return -EINVAL;
1936
1937 if (tp->snap_len > 256) { /* must be tracer 0 */
1938 if ((t4_read_reg(adap, MPS_TRC_FILTER_MATCH_CTL_A + 4) |
1939 t4_read_reg(adap, MPS_TRC_FILTER_MATCH_CTL_A + 8) |
1940 t4_read_reg(adap, MPS_TRC_FILTER_MATCH_CTL_A + 12)) & TFEN)
1941 return -EINVAL; /* other tracers are enabled */
1942 multitrc = 0;
1943 } else if (idx) {
1944 i = t4_read_reg(adap, MPS_TRC_FILTER_MATCH_CTL_B);
1945 if (TFCAPTUREMAX_GET(i) > 256 &&
1946 (t4_read_reg(adap, MPS_TRC_FILTER_MATCH_CTL_A) & TFEN))
1947 return -EINVAL;
1948 }
1949
1950 /* stop the tracer we'll be changing */
1951 t4_write_reg(adap, MPS_TRC_FILTER_MATCH_CTL_A + ofst, 0);
1952
1953 /* disable tracing globally if running in the wrong single/multi mode */
1954 cfg = t4_read_reg(adap, MPS_TRC_CFG);
1955 if ((cfg & TRCEN) && multitrc != (cfg & TRCMULTIFILTER)) {
1956 t4_write_reg(adap, MPS_TRC_CFG, cfg ^ TRCEN);
1957 t4_read_reg(adap, MPS_TRC_CFG); /* flush */
1958 msleep(1);
1959 if (!(t4_read_reg(adap, MPS_TRC_CFG) & TRCFIFOEMPTY))
1960 return -ETIMEDOUT;
1961 }
1962 /*
1963 * At this point either the tracing is enabled and in the right mode or
1964 * disabled.
1965 */
1966
1967 idx *= (MPS_TRC_FILTER1_MATCH - MPS_TRC_FILTER0_MATCH);
1968 data_reg = MPS_TRC_FILTER0_MATCH + idx;
1969 mask_reg = MPS_TRC_FILTER0_DONT_CARE + idx;
1970
1971 for (i = 0; i < TRACE_LEN / 4; i++, data_reg += 4, mask_reg += 4) {
1972 t4_write_reg(adap, data_reg, tp->data[i]);
1973 t4_write_reg(adap, mask_reg, ~tp->mask[i]);
1974 }
1975 t4_write_reg(adap, MPS_TRC_FILTER_MATCH_CTL_B + ofst,
1976 TFCAPTUREMAX(tp->snap_len) |
1977 TFMINPKTSIZE(tp->min_len));
1978 t4_write_reg(adap, MPS_TRC_FILTER_MATCH_CTL_A + ofst,
1979 TFOFFSET(tp->skip_ofst) | TFLENGTH(tp->skip_len) |
1980 TFPORT(tp->port) | TFEN |
1981 (tp->invert ? TFINVERTMATCH : 0));
1982
1983 cfg &= ~TRCMULTIFILTER;
1984 t4_write_reg(adap, MPS_TRC_CFG, cfg | TRCEN | multitrc);
1985out: t4_read_reg(adap, MPS_TRC_CFG); /* flush */
1986 return 0;
1987}
1988
1989/**
1990 * t4_get_trace_filter - query one of the tracing filters
1991 * @adap: the adapter
1992 * @tp: the current trace filter parameters
1993 * @idx: which trace filter to query
1994 * @enabled: non-zero if the filter is enabled
1995 *
1996 * Returns the current settings of one of the HW tracing filters.
1997 */
1998void t4_get_trace_filter(struct adapter *adap, struct trace_params *tp, int idx,
1999 int *enabled)
2000{
2001 u32 ctla, ctlb;
2002 int i, ofst = idx * 4;
2003 u32 data_reg, mask_reg;
2004
2005 ctla = t4_read_reg(adap, MPS_TRC_FILTER_MATCH_CTL_A + ofst);
2006 ctlb = t4_read_reg(adap, MPS_TRC_FILTER_MATCH_CTL_B + ofst);
2007
2008 *enabled = !!(ctla & TFEN);
2009 tp->snap_len = TFCAPTUREMAX_GET(ctlb);
2010 tp->min_len = TFMINPKTSIZE_GET(ctlb);
2011 tp->skip_ofst = TFOFFSET_GET(ctla);
2012 tp->skip_len = TFLENGTH_GET(ctla);
2013 tp->invert = !!(ctla & TFINVERTMATCH);
2014 tp->port = TFPORT_GET(ctla);
2015
2016 ofst = (MPS_TRC_FILTER1_MATCH - MPS_TRC_FILTER0_MATCH) * idx;
2017 data_reg = MPS_TRC_FILTER0_MATCH + ofst;
2018 mask_reg = MPS_TRC_FILTER0_DONT_CARE + ofst;
2019
2020 for (i = 0; i < TRACE_LEN / 4; i++, data_reg += 4, mask_reg += 4) {
2021 tp->mask[i] = ~t4_read_reg(adap, mask_reg);
2022 tp->data[i] = t4_read_reg(adap, data_reg) & tp->mask[i];
2023 }
2024}
2025
2026/**
2027 * get_mps_bg_map - return the buffer groups associated with a port
2028 * @adap: the adapter
2029 * @idx: the port index
2030 *
2031 * Returns a bitmap indicating which MPS buffer groups are associated
2032 * with the given port. Bit i is set if buffer group i is used by the
2033 * port.
2034 */
2035static unsigned int get_mps_bg_map(struct adapter *adap, int idx)
2036{
2037 u32 n = NUMPORTS_GET(t4_read_reg(adap, MPS_CMN_CTL));
2038
2039 if (n == 0)
2040 return idx == 0 ? 0xf : 0;
2041 if (n == 1)
2042 return idx < 2 ? (3 << (2 * idx)) : 0;
2043 return 1 << idx;
2044}
2045
2046/**
2047 * t4_get_port_stats - collect port statistics
2048 * @adap: the adapter
2049 * @idx: the port index
2050 * @p: the stats structure to fill
2051 *
2052 * Collect statistics related to the given port from HW.
2053 */
2054void t4_get_port_stats(struct adapter *adap, int idx, struct port_stats *p)
2055{
2056 u32 bgmap = get_mps_bg_map(adap, idx);
2057
2058#define GET_STAT(name) \
2059 t4_read_reg64(adap, PORT_REG(idx, MPS_PORT_STAT_##name##_L))
2060#define GET_STAT_COM(name) t4_read_reg64(adap, MPS_STAT_##name##_L)
2061
2062 p->tx_octets = GET_STAT(TX_PORT_BYTES);
2063 p->tx_frames = GET_STAT(TX_PORT_FRAMES);
2064 p->tx_bcast_frames = GET_STAT(TX_PORT_BCAST);
2065 p->tx_mcast_frames = GET_STAT(TX_PORT_MCAST);
2066 p->tx_ucast_frames = GET_STAT(TX_PORT_UCAST);
2067 p->tx_error_frames = GET_STAT(TX_PORT_ERROR);
2068 p->tx_frames_64 = GET_STAT(TX_PORT_64B);
2069 p->tx_frames_65_127 = GET_STAT(TX_PORT_65B_127B);
2070 p->tx_frames_128_255 = GET_STAT(TX_PORT_128B_255B);
2071 p->tx_frames_256_511 = GET_STAT(TX_PORT_256B_511B);
2072 p->tx_frames_512_1023 = GET_STAT(TX_PORT_512B_1023B);
2073 p->tx_frames_1024_1518 = GET_STAT(TX_PORT_1024B_1518B);
2074 p->tx_frames_1519_max = GET_STAT(TX_PORT_1519B_MAX);
2075 p->tx_drop = GET_STAT(TX_PORT_DROP);
2076 p->tx_pause = GET_STAT(TX_PORT_PAUSE);
2077 p->tx_ppp0 = GET_STAT(TX_PORT_PPP0);
2078 p->tx_ppp1 = GET_STAT(TX_PORT_PPP1);
2079 p->tx_ppp2 = GET_STAT(TX_PORT_PPP2);
2080 p->tx_ppp3 = GET_STAT(TX_PORT_PPP3);
2081 p->tx_ppp4 = GET_STAT(TX_PORT_PPP4);
2082 p->tx_ppp5 = GET_STAT(TX_PORT_PPP5);
2083 p->tx_ppp6 = GET_STAT(TX_PORT_PPP6);
2084 p->tx_ppp7 = GET_STAT(TX_PORT_PPP7);
2085
2086 p->rx_octets = GET_STAT(RX_PORT_BYTES);
2087 p->rx_frames = GET_STAT(RX_PORT_FRAMES);
2088 p->rx_bcast_frames = GET_STAT(RX_PORT_BCAST);
2089 p->rx_mcast_frames = GET_STAT(RX_PORT_MCAST);
2090 p->rx_ucast_frames = GET_STAT(RX_PORT_UCAST);
2091 p->rx_too_long = GET_STAT(RX_PORT_MTU_ERROR);
2092 p->rx_jabber = GET_STAT(RX_PORT_MTU_CRC_ERROR);
2093 p->rx_fcs_err = GET_STAT(RX_PORT_CRC_ERROR);
2094 p->rx_len_err = GET_STAT(RX_PORT_LEN_ERROR);
2095 p->rx_symbol_err = GET_STAT(RX_PORT_SYM_ERROR);
2096 p->rx_runt = GET_STAT(RX_PORT_LESS_64B);
2097 p->rx_frames_64 = GET_STAT(RX_PORT_64B);
2098 p->rx_frames_65_127 = GET_STAT(RX_PORT_65B_127B);
2099 p->rx_frames_128_255 = GET_STAT(RX_PORT_128B_255B);
2100 p->rx_frames_256_511 = GET_STAT(RX_PORT_256B_511B);
2101 p->rx_frames_512_1023 = GET_STAT(RX_PORT_512B_1023B);
2102 p->rx_frames_1024_1518 = GET_STAT(RX_PORT_1024B_1518B);
2103 p->rx_frames_1519_max = GET_STAT(RX_PORT_1519B_MAX);
2104 p->rx_pause = GET_STAT(RX_PORT_PAUSE);
2105 p->rx_ppp0 = GET_STAT(RX_PORT_PPP0);
2106 p->rx_ppp1 = GET_STAT(RX_PORT_PPP1);
2107 p->rx_ppp2 = GET_STAT(RX_PORT_PPP2);
2108 p->rx_ppp3 = GET_STAT(RX_PORT_PPP3);
2109 p->rx_ppp4 = GET_STAT(RX_PORT_PPP4);
2110 p->rx_ppp5 = GET_STAT(RX_PORT_PPP5);
2111 p->rx_ppp6 = GET_STAT(RX_PORT_PPP6);
2112 p->rx_ppp7 = GET_STAT(RX_PORT_PPP7);
2113
2114 p->rx_ovflow0 = (bgmap & 1) ? GET_STAT_COM(RX_BG_0_MAC_DROP_FRAME) : 0;
2115 p->rx_ovflow1 = (bgmap & 2) ? GET_STAT_COM(RX_BG_1_MAC_DROP_FRAME) : 0;
2116 p->rx_ovflow2 = (bgmap & 4) ? GET_STAT_COM(RX_BG_2_MAC_DROP_FRAME) : 0;
2117 p->rx_ovflow3 = (bgmap & 8) ? GET_STAT_COM(RX_BG_3_MAC_DROP_FRAME) : 0;
2118 p->rx_trunc0 = (bgmap & 1) ? GET_STAT_COM(RX_BG_0_MAC_TRUNC_FRAME) : 0;
2119 p->rx_trunc1 = (bgmap & 2) ? GET_STAT_COM(RX_BG_1_MAC_TRUNC_FRAME) : 0;
2120 p->rx_trunc2 = (bgmap & 4) ? GET_STAT_COM(RX_BG_2_MAC_TRUNC_FRAME) : 0;
2121 p->rx_trunc3 = (bgmap & 8) ? GET_STAT_COM(RX_BG_3_MAC_TRUNC_FRAME) : 0;
2122
2123#undef GET_STAT
2124#undef GET_STAT_COM
2125}
2126
2127/**
2128 * t4_get_lb_stats - collect loopback port statistics
2129 * @adap: the adapter
2130 * @idx: the loopback port index
2131 * @p: the stats structure to fill
2132 *
2133 * Return HW statistics for the given loopback port.
2134 */
2135void t4_get_lb_stats(struct adapter *adap, int idx, struct lb_port_stats *p)
2136{
2137 u32 bgmap = get_mps_bg_map(adap, idx);
2138
2139#define GET_STAT(name) \
2140 t4_read_reg64(adap, PORT_REG(idx, MPS_PORT_STAT_LB_PORT_##name##_L))
2141#define GET_STAT_COM(name) t4_read_reg64(adap, MPS_STAT_##name##_L)
2142
2143 p->octets = GET_STAT(BYTES);
2144 p->frames = GET_STAT(FRAMES);
2145 p->bcast_frames = GET_STAT(BCAST);
2146 p->mcast_frames = GET_STAT(MCAST);
2147 p->ucast_frames = GET_STAT(UCAST);
2148 p->error_frames = GET_STAT(ERROR);
2149
2150 p->frames_64 = GET_STAT(64B);
2151 p->frames_65_127 = GET_STAT(65B_127B);
2152 p->frames_128_255 = GET_STAT(128B_255B);
2153 p->frames_256_511 = GET_STAT(256B_511B);
2154 p->frames_512_1023 = GET_STAT(512B_1023B);
2155 p->frames_1024_1518 = GET_STAT(1024B_1518B);
2156 p->frames_1519_max = GET_STAT(1519B_MAX);
2157 p->drop = t4_read_reg(adap, PORT_REG(idx,
2158 MPS_PORT_STAT_LB_PORT_DROP_FRAMES));
2159
2160 p->ovflow0 = (bgmap & 1) ? GET_STAT_COM(RX_BG_0_LB_DROP_FRAME) : 0;
2161 p->ovflow1 = (bgmap & 2) ? GET_STAT_COM(RX_BG_1_LB_DROP_FRAME) : 0;
2162 p->ovflow2 = (bgmap & 4) ? GET_STAT_COM(RX_BG_2_LB_DROP_FRAME) : 0;
2163 p->ovflow3 = (bgmap & 8) ? GET_STAT_COM(RX_BG_3_LB_DROP_FRAME) : 0;
2164 p->trunc0 = (bgmap & 1) ? GET_STAT_COM(RX_BG_0_LB_TRUNC_FRAME) : 0;
2165 p->trunc1 = (bgmap & 2) ? GET_STAT_COM(RX_BG_1_LB_TRUNC_FRAME) : 0;
2166 p->trunc2 = (bgmap & 4) ? GET_STAT_COM(RX_BG_2_LB_TRUNC_FRAME) : 0;
2167 p->trunc3 = (bgmap & 8) ? GET_STAT_COM(RX_BG_3_LB_TRUNC_FRAME) : 0;
2168
2169#undef GET_STAT
2170#undef GET_STAT_COM
2171}
2172
2173/**
2174 * t4_wol_magic_enable - enable/disable magic packet WoL
2175 * @adap: the adapter
2176 * @port: the physical port index
2177 * @addr: MAC address expected in magic packets, %NULL to disable
2178 *
2179 * Enables/disables magic packet wake-on-LAN for the selected port.
2180 */
2181void t4_wol_magic_enable(struct adapter *adap, unsigned int port,
2182 const u8 *addr)
2183{
2184 if (addr) {
2185 t4_write_reg(adap, PORT_REG(port, XGMAC_PORT_MAGIC_MACID_LO),
2186 (addr[2] << 24) | (addr[3] << 16) |
2187 (addr[4] << 8) | addr[5]);
2188 t4_write_reg(adap, PORT_REG(port, XGMAC_PORT_MAGIC_MACID_HI),
2189 (addr[0] << 8) | addr[1]);
2190 }
2191 t4_set_reg_field(adap, PORT_REG(port, XGMAC_PORT_CFG2), MAGICEN,
2192 addr ? MAGICEN : 0);
2193}
2194
2195/**
2196 * t4_wol_pat_enable - enable/disable pattern-based WoL
2197 * @adap: the adapter
2198 * @port: the physical port index
2199 * @map: bitmap of which HW pattern filters to set
2200 * @mask0: byte mask for bytes 0-63 of a packet
2201 * @mask1: byte mask for bytes 64-127 of a packet
2202 * @crc: Ethernet CRC for selected bytes
2203 * @enable: enable/disable switch
2204 *
2205 * Sets the pattern filters indicated in @map to mask out the bytes
2206 * specified in @mask0/@mask1 in received packets and compare the CRC of
2207 * the resulting packet against @crc. If @enable is %true pattern-based
2208 * WoL is enabled, otherwise disabled.
2209 */
2210int t4_wol_pat_enable(struct adapter *adap, unsigned int port, unsigned int map,
2211 u64 mask0, u64 mask1, unsigned int crc, bool enable)
2212{
2213 int i;
2214
2215 if (!enable) {
2216 t4_set_reg_field(adap, PORT_REG(port, XGMAC_PORT_CFG2),
2217 PATEN, 0);
2218 return 0;
2219 }
2220 if (map > 0xff)
2221 return -EINVAL;
2222
2223#define EPIO_REG(name) PORT_REG(port, XGMAC_PORT_EPIO_##name)
2224
2225 t4_write_reg(adap, EPIO_REG(DATA1), mask0 >> 32);
2226 t4_write_reg(adap, EPIO_REG(DATA2), mask1);
2227 t4_write_reg(adap, EPIO_REG(DATA3), mask1 >> 32);
2228
2229 for (i = 0; i < NWOL_PAT; i++, map >>= 1) {
2230 if (!(map & 1))
2231 continue;
2232
2233 /* write byte masks */
2234 t4_write_reg(adap, EPIO_REG(DATA0), mask0);
2235 t4_write_reg(adap, EPIO_REG(OP), ADDRESS(i) | EPIOWR);
2236 t4_read_reg(adap, EPIO_REG(OP)); /* flush */
2237 if (t4_read_reg(adap, EPIO_REG(OP)) & BUSY)
2238 return -ETIMEDOUT;
2239
2240 /* write CRC */
2241 t4_write_reg(adap, EPIO_REG(DATA0), crc);
2242 t4_write_reg(adap, EPIO_REG(OP), ADDRESS(i + 32) | EPIOWR);
2243 t4_read_reg(adap, EPIO_REG(OP)); /* flush */
2244 if (t4_read_reg(adap, EPIO_REG(OP)) & BUSY)
2245 return -ETIMEDOUT;
2246 }
2247#undef EPIO_REG
2248
2249 t4_set_reg_field(adap, PORT_REG(port, XGMAC_PORT_CFG2), 0, PATEN);
2250 return 0;
2251}
2252
2253#define INIT_CMD(var, cmd, rd_wr) do { \
2254 (var).op_to_write = htonl(FW_CMD_OP(FW_##cmd##_CMD) | \
2255 FW_CMD_REQUEST | FW_CMD_##rd_wr); \
2256 (var).retval_len16 = htonl(FW_LEN16(var)); \
2257} while (0)
2258
2259/**
2260 * t4_mdio_rd - read a PHY register through MDIO
2261 * @adap: the adapter
2262 * @mbox: mailbox to use for the FW command
2263 * @phy_addr: the PHY address
2264 * @mmd: the PHY MMD to access (0 for clause 22 PHYs)
2265 * @reg: the register to read
2266 * @valp: where to store the value
2267 *
2268 * Issues a FW command through the given mailbox to read a PHY register.
2269 */
2270int t4_mdio_rd(struct adapter *adap, unsigned int mbox, unsigned int phy_addr,
2271 unsigned int mmd, unsigned int reg, u16 *valp)
2272{
2273 int ret;
2274 struct fw_ldst_cmd c;
2275
2276 memset(&c, 0, sizeof(c));
2277 c.op_to_addrspace = htonl(FW_CMD_OP(FW_LDST_CMD) | FW_CMD_REQUEST |
2278 FW_CMD_READ | FW_LDST_CMD_ADDRSPACE(FW_LDST_ADDRSPC_MDIO));
2279 c.cycles_to_len16 = htonl(FW_LEN16(c));
2280 c.u.mdio.paddr_mmd = htons(FW_LDST_CMD_PADDR(phy_addr) |
2281 FW_LDST_CMD_MMD(mmd));
2282 c.u.mdio.raddr = htons(reg);
2283
2284 ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
2285 if (ret == 0)
2286 *valp = ntohs(c.u.mdio.rval);
2287 return ret;
2288}
2289
2290/**
2291 * t4_mdio_wr - write a PHY register through MDIO
2292 * @adap: the adapter
2293 * @mbox: mailbox to use for the FW command
2294 * @phy_addr: the PHY address
2295 * @mmd: the PHY MMD to access (0 for clause 22 PHYs)
2296 * @reg: the register to write
2297 * @valp: value to write
2298 *
2299 * Issues a FW command through the given mailbox to write a PHY register.
2300 */
2301int t4_mdio_wr(struct adapter *adap, unsigned int mbox, unsigned int phy_addr,
2302 unsigned int mmd, unsigned int reg, u16 val)
2303{
2304 struct fw_ldst_cmd c;
2305
2306 memset(&c, 0, sizeof(c));
2307 c.op_to_addrspace = htonl(FW_CMD_OP(FW_LDST_CMD) | FW_CMD_REQUEST |
2308 FW_CMD_WRITE | FW_LDST_CMD_ADDRSPACE(FW_LDST_ADDRSPC_MDIO));
2309 c.cycles_to_len16 = htonl(FW_LEN16(c));
2310 c.u.mdio.paddr_mmd = htons(FW_LDST_CMD_PADDR(phy_addr) |
2311 FW_LDST_CMD_MMD(mmd));
2312 c.u.mdio.raddr = htons(reg);
2313 c.u.mdio.rval = htons(val);
2314
2315 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
2316}
2317
2318/**
2319 * t4_fw_hello - establish communication with FW
2320 * @adap: the adapter
2321 * @mbox: mailbox to use for the FW command
2322 * @evt_mbox: mailbox to receive async FW events
2323 * @master: specifies the caller's willingness to be the device master
2324 * @state: returns the current device state
2325 *
2326 * Issues a command to establish communication with FW.
2327 */
2328int t4_fw_hello(struct adapter *adap, unsigned int mbox, unsigned int evt_mbox,
2329 enum dev_master master, enum dev_state *state)
2330{
2331 int ret;
2332 struct fw_hello_cmd c;
2333
2334 INIT_CMD(c, HELLO, WRITE);
2335 c.err_to_mbasyncnot = htonl(
2336 FW_HELLO_CMD_MASTERDIS(master == MASTER_CANT) |
2337 FW_HELLO_CMD_MASTERFORCE(master == MASTER_MUST) |
2338 FW_HELLO_CMD_MBMASTER(master == MASTER_MUST ? mbox : 0xff) |
2339 FW_HELLO_CMD_MBASYNCNOT(evt_mbox));
2340
2341 ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
2342 if (ret == 0 && state) {
2343 u32 v = ntohl(c.err_to_mbasyncnot);
2344 if (v & FW_HELLO_CMD_INIT)
2345 *state = DEV_STATE_INIT;
2346 else if (v & FW_HELLO_CMD_ERR)
2347 *state = DEV_STATE_ERR;
2348 else
2349 *state = DEV_STATE_UNINIT;
2350 }
2351 return ret;
2352}
2353
2354/**
2355 * t4_fw_bye - end communication with FW
2356 * @adap: the adapter
2357 * @mbox: mailbox to use for the FW command
2358 *
2359 * Issues a command to terminate communication with FW.
2360 */
2361int t4_fw_bye(struct adapter *adap, unsigned int mbox)
2362{
2363 struct fw_bye_cmd c;
2364
2365 INIT_CMD(c, BYE, WRITE);
2366 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
2367}
2368
2369/**
2370 * t4_init_cmd - ask FW to initialize the device
2371 * @adap: the adapter
2372 * @mbox: mailbox to use for the FW command
2373 *
2374 * Issues a command to FW to partially initialize the device. This
2375 * performs initialization that generally doesn't depend on user input.
2376 */
2377int t4_early_init(struct adapter *adap, unsigned int mbox)
2378{
2379 struct fw_initialize_cmd c;
2380
2381 INIT_CMD(c, INITIALIZE, WRITE);
2382 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
2383}
2384
2385/**
2386 * t4_fw_reset - issue a reset to FW
2387 * @adap: the adapter
2388 * @mbox: mailbox to use for the FW command
2389 * @reset: specifies the type of reset to perform
2390 *
2391 * Issues a reset command of the specified type to FW.
2392 */
2393int t4_fw_reset(struct adapter *adap, unsigned int mbox, int reset)
2394{
2395 struct fw_reset_cmd c;
2396
2397 INIT_CMD(c, RESET, WRITE);
2398 c.val = htonl(reset);
2399 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
2400}
2401
2402/**
2403 * t4_query_params - query FW or device parameters
2404 * @adap: the adapter
2405 * @mbox: mailbox to use for the FW command
2406 * @pf: the PF
2407 * @vf: the VF
2408 * @nparams: the number of parameters
2409 * @params: the parameter names
2410 * @val: the parameter values
2411 *
2412 * Reads the value of FW or device parameters. Up to 7 parameters can be
2413 * queried at once.
2414 */
2415int t4_query_params(struct adapter *adap, unsigned int mbox, unsigned int pf,
2416 unsigned int vf, unsigned int nparams, const u32 *params,
2417 u32 *val)
2418{
2419 int i, ret;
2420 struct fw_params_cmd c;
2421 __be32 *p = &c.param[0].mnem;
2422
2423 if (nparams > 7)
2424 return -EINVAL;
2425
2426 memset(&c, 0, sizeof(c));
2427 c.op_to_vfn = htonl(FW_CMD_OP(FW_PARAMS_CMD) | FW_CMD_REQUEST |
2428 FW_CMD_READ | FW_PARAMS_CMD_PFN(pf) |
2429 FW_PARAMS_CMD_VFN(vf));
2430 c.retval_len16 = htonl(FW_LEN16(c));
2431 for (i = 0; i < nparams; i++, p += 2)
2432 *p = htonl(*params++);
2433
2434 ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
2435 if (ret == 0)
2436 for (i = 0, p = &c.param[0].val; i < nparams; i++, p += 2)
2437 *val++ = ntohl(*p);
2438 return ret;
2439}
2440
2441/**
2442 * t4_set_params - sets FW or device parameters
2443 * @adap: the adapter
2444 * @mbox: mailbox to use for the FW command
2445 * @pf: the PF
2446 * @vf: the VF
2447 * @nparams: the number of parameters
2448 * @params: the parameter names
2449 * @val: the parameter values
2450 *
2451 * Sets the value of FW or device parameters. Up to 7 parameters can be
2452 * specified at once.
2453 */
2454int t4_set_params(struct adapter *adap, unsigned int mbox, unsigned int pf,
2455 unsigned int vf, unsigned int nparams, const u32 *params,
2456 const u32 *val)
2457{
2458 struct fw_params_cmd c;
2459 __be32 *p = &c.param[0].mnem;
2460
2461 if (nparams > 7)
2462 return -EINVAL;
2463
2464 memset(&c, 0, sizeof(c));
2465 c.op_to_vfn = htonl(FW_CMD_OP(FW_PARAMS_CMD) | FW_CMD_REQUEST |
2466 FW_CMD_WRITE | FW_PARAMS_CMD_PFN(pf) |
2467 FW_PARAMS_CMD_VFN(vf));
2468 c.retval_len16 = htonl(FW_LEN16(c));
2469 while (nparams--) {
2470 *p++ = htonl(*params++);
2471 *p++ = htonl(*val++);
2472 }
2473
2474 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
2475}
2476
2477/**
2478 * t4_cfg_pfvf - configure PF/VF resource limits
2479 * @adap: the adapter
2480 * @mbox: mailbox to use for the FW command
2481 * @pf: the PF being configured
2482 * @vf: the VF being configured
2483 * @txq: the max number of egress queues
2484 * @txq_eth_ctrl: the max number of egress Ethernet or control queues
2485 * @rxqi: the max number of interrupt-capable ingress queues
2486 * @rxq: the max number of interruptless ingress queues
2487 * @tc: the PCI traffic class
2488 * @vi: the max number of virtual interfaces
2489 * @cmask: the channel access rights mask for the PF/VF
2490 * @pmask: the port access rights mask for the PF/VF
2491 * @nexact: the maximum number of exact MPS filters
2492 * @rcaps: read capabilities
2493 * @wxcaps: write/execute capabilities
2494 *
2495 * Configures resource limits and capabilities for a physical or virtual
2496 * function.
2497 */
2498int t4_cfg_pfvf(struct adapter *adap, unsigned int mbox, unsigned int pf,
2499 unsigned int vf, unsigned int txq, unsigned int txq_eth_ctrl,
2500 unsigned int rxqi, unsigned int rxq, unsigned int tc,
2501 unsigned int vi, unsigned int cmask, unsigned int pmask,
2502 unsigned int nexact, unsigned int rcaps, unsigned int wxcaps)
2503{
2504 struct fw_pfvf_cmd c;
2505
2506 memset(&c, 0, sizeof(c));
2507 c.op_to_vfn = htonl(FW_CMD_OP(FW_PFVF_CMD) | FW_CMD_REQUEST |
2508 FW_CMD_WRITE | FW_PFVF_CMD_PFN(pf) |
2509 FW_PFVF_CMD_VFN(vf));
2510 c.retval_len16 = htonl(FW_LEN16(c));
2511 c.niqflint_niq = htonl(FW_PFVF_CMD_NIQFLINT(rxqi) |
2512 FW_PFVF_CMD_NIQ(rxq));
2513 c.cmask_to_neq = htonl(FW_PFVF_CMD_CMASK(cmask) |
2514 FW_PFVF_CMD_PMASK(pmask) |
2515 FW_PFVF_CMD_NEQ(txq));
2516 c.tc_to_nexactf = htonl(FW_PFVF_CMD_TC(tc) | FW_PFVF_CMD_NVI(vi) |
2517 FW_PFVF_CMD_NEXACTF(nexact));
2518 c.r_caps_to_nethctrl = htonl(FW_PFVF_CMD_R_CAPS(rcaps) |
2519 FW_PFVF_CMD_WX_CAPS(wxcaps) |
2520 FW_PFVF_CMD_NETHCTRL(txq_eth_ctrl));
2521 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
2522}
2523
2524/**
2525 * t4_alloc_vi - allocate a virtual interface
2526 * @adap: the adapter
2527 * @mbox: mailbox to use for the FW command
2528 * @port: physical port associated with the VI
2529 * @pf: the PF owning the VI
2530 * @vf: the VF owning the VI
2531 * @nmac: number of MAC addresses needed (1 to 5)
2532 * @mac: the MAC addresses of the VI
2533 * @rss_size: size of RSS table slice associated with this VI
2534 *
2535 * Allocates a virtual interface for the given physical port. If @mac is
2536 * not %NULL it contains the MAC addresses of the VI as assigned by FW.
2537 * @mac should be large enough to hold @nmac Ethernet addresses, they are
2538 * stored consecutively so the space needed is @nmac * 6 bytes.
2539 * Returns a negative error number or the non-negative VI id.
2540 */
2541int t4_alloc_vi(struct adapter *adap, unsigned int mbox, unsigned int port,
2542 unsigned int pf, unsigned int vf, unsigned int nmac, u8 *mac,
2543 unsigned int *rss_size)
2544{
2545 int ret;
2546 struct fw_vi_cmd c;
2547
2548 memset(&c, 0, sizeof(c));
2549 c.op_to_vfn = htonl(FW_CMD_OP(FW_VI_CMD) | FW_CMD_REQUEST |
2550 FW_CMD_WRITE | FW_CMD_EXEC |
2551 FW_VI_CMD_PFN(pf) | FW_VI_CMD_VFN(vf));
2552 c.alloc_to_len16 = htonl(FW_VI_CMD_ALLOC | FW_LEN16(c));
2553 c.portid_pkd = FW_VI_CMD_PORTID(port);
2554 c.nmac = nmac - 1;
2555
2556 ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
2557 if (ret)
2558 return ret;
2559
2560 if (mac) {
2561 memcpy(mac, c.mac, sizeof(c.mac));
2562 switch (nmac) {
2563 case 5:
2564 memcpy(mac + 24, c.nmac3, sizeof(c.nmac3));
2565 case 4:
2566 memcpy(mac + 18, c.nmac2, sizeof(c.nmac2));
2567 case 3:
2568 memcpy(mac + 12, c.nmac1, sizeof(c.nmac1));
2569 case 2:
2570 memcpy(mac + 6, c.nmac0, sizeof(c.nmac0));
2571 }
2572 }
2573 if (rss_size)
2574 *rss_size = FW_VI_CMD_RSSSIZE_GET(ntohs(c.rsssize_pkd));
2575 return ntohs(c.viid_pkd);
2576}
2577
2578/**
2579 * t4_free_vi - free a virtual interface
2580 * @adap: the adapter
2581 * @mbox: mailbox to use for the FW command
2582 * @pf: the PF owning the VI
2583 * @vf: the VF owning the VI
2584 * @viid: virtual interface identifiler
2585 *
2586 * Free a previously allocated virtual interface.
2587 */
2588int t4_free_vi(struct adapter *adap, unsigned int mbox, unsigned int pf,
2589 unsigned int vf, unsigned int viid)
2590{
2591 struct fw_vi_cmd c;
2592
2593 memset(&c, 0, sizeof(c));
2594 c.op_to_vfn = htonl(FW_CMD_OP(FW_VI_CMD) | FW_CMD_REQUEST |
2595 FW_CMD_EXEC | FW_VI_CMD_PFN(pf) |
2596 FW_VI_CMD_VFN(vf));
2597 c.alloc_to_len16 = htonl(FW_VI_CMD_FREE | FW_LEN16(c));
2598 c.viid_pkd = htons(FW_VI_CMD_VIID(viid));
2599 return t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
2600}
2601
2602/**
2603 * t4_set_rxmode - set Rx properties of a virtual interface
2604 * @adap: the adapter
2605 * @mbox: mailbox to use for the FW command
2606 * @viid: the VI id
2607 * @mtu: the new MTU or -1
2608 * @promisc: 1 to enable promiscuous mode, 0 to disable it, -1 no change
2609 * @all_multi: 1 to enable all-multi mode, 0 to disable it, -1 no change
2610 * @bcast: 1 to enable broadcast Rx, 0 to disable it, -1 no change
2611 * @sleep_ok: if true we may sleep while awaiting command completion
2612 *
2613 * Sets Rx properties of a virtual interface.
2614 */
2615int t4_set_rxmode(struct adapter *adap, unsigned int mbox, unsigned int viid,
2616 int mtu, int promisc, int all_multi, int bcast, bool sleep_ok)
2617{
2618 struct fw_vi_rxmode_cmd c;
2619
2620 /* convert to FW values */
2621 if (mtu < 0)
2622 mtu = FW_RXMODE_MTU_NO_CHG;
2623 if (promisc < 0)
2624 promisc = FW_VI_RXMODE_CMD_PROMISCEN_MASK;
2625 if (all_multi < 0)
2626 all_multi = FW_VI_RXMODE_CMD_ALLMULTIEN_MASK;
2627 if (bcast < 0)
2628 bcast = FW_VI_RXMODE_CMD_BROADCASTEN_MASK;
2629
2630 memset(&c, 0, sizeof(c));
2631 c.op_to_viid = htonl(FW_CMD_OP(FW_VI_RXMODE_CMD) | FW_CMD_REQUEST |
2632 FW_CMD_WRITE | FW_VI_RXMODE_CMD_VIID(viid));
2633 c.retval_len16 = htonl(FW_LEN16(c));
2634 c.mtu_to_broadcasten = htonl(FW_VI_RXMODE_CMD_MTU(mtu) |
2635 FW_VI_RXMODE_CMD_PROMISCEN(promisc) |
2636 FW_VI_RXMODE_CMD_ALLMULTIEN(all_multi) |
2637 FW_VI_RXMODE_CMD_BROADCASTEN(bcast));
2638 return t4_wr_mbox_meat(adap, mbox, &c, sizeof(c), NULL, sleep_ok);
2639}
2640
2641/**
2642 * t4_alloc_mac_filt - allocates exact-match filters for MAC addresses
2643 * @adap: the adapter
2644 * @mbox: mailbox to use for the FW command
2645 * @viid: the VI id
2646 * @free: if true any existing filters for this VI id are first removed
2647 * @naddr: the number of MAC addresses to allocate filters for (up to 7)
2648 * @addr: the MAC address(es)
2649 * @idx: where to store the index of each allocated filter
2650 * @hash: pointer to hash address filter bitmap
2651 * @sleep_ok: call is allowed to sleep
2652 *
2653 * Allocates an exact-match filter for each of the supplied addresses and
2654 * sets it to the corresponding address. If @idx is not %NULL it should
2655 * have at least @naddr entries, each of which will be set to the index of
2656 * the filter allocated for the corresponding MAC address. If a filter
2657 * could not be allocated for an address its index is set to 0xffff.
2658 * If @hash is not %NULL addresses that fail to allocate an exact filter
2659 * are hashed and update the hash filter bitmap pointed at by @hash.
2660 *
2661 * Returns a negative error number or the number of filters allocated.
2662 */
2663int t4_alloc_mac_filt(struct adapter *adap, unsigned int mbox,
2664 unsigned int viid, bool free, unsigned int naddr,
2665 const u8 **addr, u16 *idx, u64 *hash, bool sleep_ok)
2666{
2667 int i, ret;
2668 struct fw_vi_mac_cmd c;
2669 struct fw_vi_mac_exact *p;
2670
2671 if (naddr > 7)
2672 return -EINVAL;
2673
2674 memset(&c, 0, sizeof(c));
2675 c.op_to_viid = htonl(FW_CMD_OP(FW_VI_MAC_CMD) | FW_CMD_REQUEST |
2676 FW_CMD_WRITE | (free ? FW_CMD_EXEC : 0) |
2677 FW_VI_MAC_CMD_VIID(viid));
2678 c.freemacs_to_len16 = htonl(FW_VI_MAC_CMD_FREEMACS(free) |
2679 FW_CMD_LEN16((naddr + 2) / 2));
2680
2681 for (i = 0, p = c.u.exact; i < naddr; i++, p++) {
2682 p->valid_to_idx = htons(FW_VI_MAC_CMD_VALID |
2683 FW_VI_MAC_CMD_IDX(FW_VI_MAC_ADD_MAC));
2684 memcpy(p->macaddr, addr[i], sizeof(p->macaddr));
2685 }
2686
2687 ret = t4_wr_mbox_meat(adap, mbox, &c, sizeof(c), &c, sleep_ok);
2688 if (ret)
2689 return ret;
2690
2691 for (i = 0, p = c.u.exact; i < naddr; i++, p++) {
2692 u16 index = FW_VI_MAC_CMD_IDX_GET(ntohs(p->valid_to_idx));
2693
2694 if (idx)
2695 idx[i] = index >= NEXACT_MAC ? 0xffff : index;
2696 if (index < NEXACT_MAC)
2697 ret++;
2698 else if (hash)
2699 *hash |= (1 << hash_mac_addr(addr[i]));
2700 }
2701 return ret;
2702}
2703
2704/**
2705 * t4_change_mac - modifies the exact-match filter for a MAC address
2706 * @adap: the adapter
2707 * @mbox: mailbox to use for the FW command
2708 * @viid: the VI id
2709 * @idx: index of existing filter for old value of MAC address, or -1
2710 * @addr: the new MAC address value
2711 * @persist: whether a new MAC allocation should be persistent
2712 * @add_smt: if true also add the address to the HW SMT
2713 *
2714 * Modifies an exact-match filter and sets it to the new MAC address.
2715 * Note that in general it is not possible to modify the value of a given
2716 * filter so the generic way to modify an address filter is to free the one
2717 * being used by the old address value and allocate a new filter for the
2718 * new address value. @idx can be -1 if the address is a new addition.
2719 *
2720 * Returns a negative error number or the index of the filter with the new
2721 * MAC value.
2722 */
2723int t4_change_mac(struct adapter *adap, unsigned int mbox, unsigned int viid,
2724 int idx, const u8 *addr, bool persist, bool add_smt)
2725{
2726 int ret, mode;
2727 struct fw_vi_mac_cmd c;
2728 struct fw_vi_mac_exact *p = c.u.exact;
2729
2730 if (idx < 0) /* new allocation */
2731 idx = persist ? FW_VI_MAC_ADD_PERSIST_MAC : FW_VI_MAC_ADD_MAC;
2732 mode = add_smt ? FW_VI_MAC_SMT_AND_MPSTCAM : FW_VI_MAC_MPS_TCAM_ENTRY;
2733
2734 memset(&c, 0, sizeof(c));
2735 c.op_to_viid = htonl(FW_CMD_OP(FW_VI_MAC_CMD) | FW_CMD_REQUEST |
2736 FW_CMD_WRITE | FW_VI_MAC_CMD_VIID(viid));
2737 c.freemacs_to_len16 = htonl(FW_CMD_LEN16(1));
2738 p->valid_to_idx = htons(FW_VI_MAC_CMD_VALID |
2739 FW_VI_MAC_CMD_SMAC_RESULT(mode) |
2740 FW_VI_MAC_CMD_IDX(idx));
2741 memcpy(p->macaddr, addr, sizeof(p->macaddr));
2742
2743 ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
2744 if (ret == 0) {
2745 ret = FW_VI_MAC_CMD_IDX_GET(ntohs(p->valid_to_idx));
2746 if (ret >= NEXACT_MAC)
2747 ret = -ENOMEM;
2748 }
2749 return ret;
2750}
2751
2752/**
2753 * t4_set_addr_hash - program the MAC inexact-match hash filter
2754 * @adap: the adapter
2755 * @mbox: mailbox to use for the FW command
2756 * @viid: the VI id
2757 * @ucast: whether the hash filter should also match unicast addresses
2758 * @vec: the value to be written to the hash filter
2759 * @sleep_ok: call is allowed to sleep
2760 *
2761 * Sets the 64-bit inexact-match hash filter for a virtual interface.
2762 */
2763int t4_set_addr_hash(struct adapter *adap, unsigned int mbox, unsigned int viid,
2764 bool ucast, u64 vec, bool sleep_ok)
2765{
2766 struct fw_vi_mac_cmd c;
2767
2768 memset(&c, 0, sizeof(c));
2769 c.op_to_viid = htonl(FW_CMD_OP(FW_VI_MAC_CMD) | FW_CMD_REQUEST |
2770 FW_CMD_WRITE | FW_VI_ENABLE_CMD_VIID(viid));
2771 c.freemacs_to_len16 = htonl(FW_VI_MAC_CMD_HASHVECEN |
2772 FW_VI_MAC_CMD_HASHUNIEN(ucast) |
2773 FW_CMD_LEN16(1));
2774 c.u.hash.hashvec = cpu_to_be64(vec);
2775 return t4_wr_mbox_meat(adap, mbox, &c, sizeof(c), NULL, sleep_ok);
2776}
2777
2778/**
2779 * t4_enable_vi - enable/disable a virtual interface
2780 * @adap: the adapter
2781 * @mbox: mailbox to use for the FW command
2782 * @viid: the VI id
2783 * @rx_en: 1=enable Rx, 0=disable Rx
2784 * @tx_en: 1=enable Tx, 0=disable Tx
2785 *
2786 * Enables/disables a virtual interface.
2787 */
2788int t4_enable_vi(struct adapter *adap, unsigned int mbox, unsigned int viid,
2789 bool rx_en, bool tx_en)
2790{
2791 struct fw_vi_enable_cmd c;
2792
2793 memset(&c, 0, sizeof(c));
2794 c.op_to_viid = htonl(FW_CMD_OP(FW_VI_ENABLE_CMD) | FW_CMD_REQUEST |
2795 FW_CMD_EXEC | FW_VI_ENABLE_CMD_VIID(viid));
2796 c.ien_to_len16 = htonl(FW_VI_ENABLE_CMD_IEN(rx_en) |
2797 FW_VI_ENABLE_CMD_EEN(tx_en) | FW_LEN16(c));
2798 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
2799}
2800
2801/**
2802 * t4_identify_port - identify a VI's port by blinking its LED
2803 * @adap: the adapter
2804 * @mbox: mailbox to use for the FW command
2805 * @viid: the VI id
2806 * @nblinks: how many times to blink LED at 2.5 Hz
2807 *
2808 * Identifies a VI's port by blinking its LED.
2809 */
2810int t4_identify_port(struct adapter *adap, unsigned int mbox, unsigned int viid,
2811 unsigned int nblinks)
2812{
2813 struct fw_vi_enable_cmd c;
2814
2815 c.op_to_viid = htonl(FW_CMD_OP(FW_VI_ENABLE_CMD) | FW_CMD_REQUEST |
2816 FW_CMD_EXEC | FW_VI_ENABLE_CMD_VIID(viid));
2817 c.ien_to_len16 = htonl(FW_VI_ENABLE_CMD_LED | FW_LEN16(c));
2818 c.blinkdur = htons(nblinks);
2819 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
2820}
2821
2822/**
2823 * t4_iq_start_stop - enable/disable an ingress queue and its FLs
2824 * @adap: the adapter
2825 * @mbox: mailbox to use for the FW command
2826 * @start: %true to enable the queues, %false to disable them
2827 * @pf: the PF owning the queues
2828 * @vf: the VF owning the queues
2829 * @iqid: ingress queue id
2830 * @fl0id: FL0 queue id or 0xffff if no attached FL0
2831 * @fl1id: FL1 queue id or 0xffff if no attached FL1
2832 *
2833 * Starts or stops an ingress queue and its associated FLs, if any.
2834 */
2835int t4_iq_start_stop(struct adapter *adap, unsigned int mbox, bool start,
2836 unsigned int pf, unsigned int vf, unsigned int iqid,
2837 unsigned int fl0id, unsigned int fl1id)
2838{
2839 struct fw_iq_cmd c;
2840
2841 memset(&c, 0, sizeof(c));
2842 c.op_to_vfn = htonl(FW_CMD_OP(FW_IQ_CMD) | FW_CMD_REQUEST |
2843 FW_CMD_EXEC | FW_IQ_CMD_PFN(pf) |
2844 FW_IQ_CMD_VFN(vf));
2845 c.alloc_to_len16 = htonl(FW_IQ_CMD_IQSTART(start) |
2846 FW_IQ_CMD_IQSTOP(!start) | FW_LEN16(c));
2847 c.iqid = htons(iqid);
2848 c.fl0id = htons(fl0id);
2849 c.fl1id = htons(fl1id);
2850 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
2851}
2852
2853/**
2854 * t4_iq_free - free an ingress queue and its FLs
2855 * @adap: the adapter
2856 * @mbox: mailbox to use for the FW command
2857 * @pf: the PF owning the queues
2858 * @vf: the VF owning the queues
2859 * @iqtype: the ingress queue type
2860 * @iqid: ingress queue id
2861 * @fl0id: FL0 queue id or 0xffff if no attached FL0
2862 * @fl1id: FL1 queue id or 0xffff if no attached FL1
2863 *
2864 * Frees an ingress queue and its associated FLs, if any.
2865 */
2866int t4_iq_free(struct adapter *adap, unsigned int mbox, unsigned int pf,
2867 unsigned int vf, unsigned int iqtype, unsigned int iqid,
2868 unsigned int fl0id, unsigned int fl1id)
2869{
2870 struct fw_iq_cmd c;
2871
2872 memset(&c, 0, sizeof(c));
2873 c.op_to_vfn = htonl(FW_CMD_OP(FW_IQ_CMD) | FW_CMD_REQUEST |
2874 FW_CMD_EXEC | FW_IQ_CMD_PFN(pf) |
2875 FW_IQ_CMD_VFN(vf));
2876 c.alloc_to_len16 = htonl(FW_IQ_CMD_FREE | FW_LEN16(c));
2877 c.type_to_iqandstindex = htonl(FW_IQ_CMD_TYPE(iqtype));
2878 c.iqid = htons(iqid);
2879 c.fl0id = htons(fl0id);
2880 c.fl1id = htons(fl1id);
2881 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
2882}
2883
2884/**
2885 * t4_eth_eq_free - free an Ethernet egress queue
2886 * @adap: the adapter
2887 * @mbox: mailbox to use for the FW command
2888 * @pf: the PF owning the queue
2889 * @vf: the VF owning the queue
2890 * @eqid: egress queue id
2891 *
2892 * Frees an Ethernet egress queue.
2893 */
2894int t4_eth_eq_free(struct adapter *adap, unsigned int mbox, unsigned int pf,
2895 unsigned int vf, unsigned int eqid)
2896{
2897 struct fw_eq_eth_cmd c;
2898
2899 memset(&c, 0, sizeof(c));
2900 c.op_to_vfn = htonl(FW_CMD_OP(FW_EQ_ETH_CMD) | FW_CMD_REQUEST |
2901 FW_CMD_EXEC | FW_EQ_ETH_CMD_PFN(pf) |
2902 FW_EQ_ETH_CMD_VFN(vf));
2903 c.alloc_to_len16 = htonl(FW_EQ_ETH_CMD_FREE | FW_LEN16(c));
2904 c.eqid_pkd = htonl(FW_EQ_ETH_CMD_EQID(eqid));
2905 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
2906}
2907
2908/**
2909 * t4_ctrl_eq_free - free a control egress queue
2910 * @adap: the adapter
2911 * @mbox: mailbox to use for the FW command
2912 * @pf: the PF owning the queue
2913 * @vf: the VF owning the queue
2914 * @eqid: egress queue id
2915 *
2916 * Frees a control egress queue.
2917 */
2918int t4_ctrl_eq_free(struct adapter *adap, unsigned int mbox, unsigned int pf,
2919 unsigned int vf, unsigned int eqid)
2920{
2921 struct fw_eq_ctrl_cmd c;
2922
2923 memset(&c, 0, sizeof(c));
2924 c.op_to_vfn = htonl(FW_CMD_OP(FW_EQ_CTRL_CMD) | FW_CMD_REQUEST |
2925 FW_CMD_EXEC | FW_EQ_CTRL_CMD_PFN(pf) |
2926 FW_EQ_CTRL_CMD_VFN(vf));
2927 c.alloc_to_len16 = htonl(FW_EQ_CTRL_CMD_FREE | FW_LEN16(c));
2928 c.cmpliqid_eqid = htonl(FW_EQ_CTRL_CMD_EQID(eqid));
2929 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
2930}
2931
2932/**
2933 * t4_ofld_eq_free - free an offload egress queue
2934 * @adap: the adapter
2935 * @mbox: mailbox to use for the FW command
2936 * @pf: the PF owning the queue
2937 * @vf: the VF owning the queue
2938 * @eqid: egress queue id
2939 *
2940 * Frees a control egress queue.
2941 */
2942int t4_ofld_eq_free(struct adapter *adap, unsigned int mbox, unsigned int pf,
2943 unsigned int vf, unsigned int eqid)
2944{
2945 struct fw_eq_ofld_cmd c;
2946
2947 memset(&c, 0, sizeof(c));
2948 c.op_to_vfn = htonl(FW_CMD_OP(FW_EQ_OFLD_CMD) | FW_CMD_REQUEST |
2949 FW_CMD_EXEC | FW_EQ_OFLD_CMD_PFN(pf) |
2950 FW_EQ_OFLD_CMD_VFN(vf));
2951 c.alloc_to_len16 = htonl(FW_EQ_OFLD_CMD_FREE | FW_LEN16(c));
2952 c.eqid_pkd = htonl(FW_EQ_OFLD_CMD_EQID(eqid));
2953 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
2954}
2955
2956/**
2957 * t4_handle_fw_rpl - process a FW reply message
2958 * @adap: the adapter
2959 * @rpl: start of the FW message
2960 *
2961 * Processes a FW message, such as link state change messages.
2962 */
2963int t4_handle_fw_rpl(struct adapter *adap, const __be64 *rpl)
2964{
2965 u8 opcode = *(const u8 *)rpl;
2966
2967 if (opcode == FW_PORT_CMD) { /* link/module state change message */
2968 int speed = 0, fc = 0;
2969 const struct fw_port_cmd *p = (void *)rpl;
2970 int chan = FW_PORT_CMD_PORTID_GET(ntohl(p->op_to_portid));
2971 int port = adap->chan_map[chan];
2972 struct port_info *pi = adap2pinfo(adap, port);
2973 struct link_config *lc = &pi->link_cfg;
2974 u32 stat = ntohl(p->u.info.lstatus_to_modtype);
2975 int link_ok = (stat & FW_PORT_CMD_LSTATUS) != 0;
2976 u32 mod = FW_PORT_CMD_MODTYPE_GET(stat);
2977
2978 if (stat & FW_PORT_CMD_RXPAUSE)
2979 fc |= PAUSE_RX;
2980 if (stat & FW_PORT_CMD_TXPAUSE)
2981 fc |= PAUSE_TX;
2982 if (stat & FW_PORT_CMD_LSPEED(FW_PORT_CAP_SPEED_100M))
2983 speed = SPEED_100;
2984 else if (stat & FW_PORT_CMD_LSPEED(FW_PORT_CAP_SPEED_1G))
2985 speed = SPEED_1000;
2986 else if (stat & FW_PORT_CMD_LSPEED(FW_PORT_CAP_SPEED_10G))
2987 speed = SPEED_10000;
2988
2989 if (link_ok != lc->link_ok || speed != lc->speed ||
2990 fc != lc->fc) { /* something changed */
2991 lc->link_ok = link_ok;
2992 lc->speed = speed;
2993 lc->fc = fc;
2994 t4_os_link_changed(adap, port, link_ok);
2995 }
2996 if (mod != pi->mod_type) {
2997 pi->mod_type = mod;
2998 t4_os_portmod_changed(adap, port);
2999 }
3000 }
3001 return 0;
3002}
3003
3004static void __devinit get_pci_mode(struct adapter *adapter,
3005 struct pci_params *p)
3006{
3007 u16 val;
3008 u32 pcie_cap = pci_pcie_cap(adapter->pdev);
3009
3010 if (pcie_cap) {
3011 pci_read_config_word(adapter->pdev, pcie_cap + PCI_EXP_LNKSTA,
3012 &val);
3013 p->speed = val & PCI_EXP_LNKSTA_CLS;
3014 p->width = (val & PCI_EXP_LNKSTA_NLW) >> 4;
3015 }
3016}
3017
3018/**
3019 * init_link_config - initialize a link's SW state
3020 * @lc: structure holding the link state
3021 * @caps: link capabilities
3022 *
3023 * Initializes the SW state maintained for each link, including the link's
3024 * capabilities and default speed/flow-control/autonegotiation settings.
3025 */
3026static void __devinit init_link_config(struct link_config *lc,
3027 unsigned int caps)
3028{
3029 lc->supported = caps;
3030 lc->requested_speed = 0;
3031 lc->speed = 0;
3032 lc->requested_fc = lc->fc = PAUSE_RX | PAUSE_TX;
3033 if (lc->supported & FW_PORT_CAP_ANEG) {
3034 lc->advertising = lc->supported & ADVERT_MASK;
3035 lc->autoneg = AUTONEG_ENABLE;
3036 lc->requested_fc |= PAUSE_AUTONEG;
3037 } else {
3038 lc->advertising = 0;
3039 lc->autoneg = AUTONEG_DISABLE;
3040 }
3041}
3042
3043static int __devinit wait_dev_ready(struct adapter *adap)
3044{
3045 if (t4_read_reg(adap, PL_WHOAMI) != 0xffffffff)
3046 return 0;
3047 msleep(500);
3048 return t4_read_reg(adap, PL_WHOAMI) != 0xffffffff ? 0 : -EIO;
3049}
3050
3051/**
3052 * t4_prep_adapter - prepare SW and HW for operation
3053 * @adapter: the adapter
3054 * @reset: if true perform a HW reset
3055 *
3056 * Initialize adapter SW state for the various HW modules, set initial
3057 * values for some adapter tunables, take PHYs out of reset, and
3058 * initialize the MDIO interface.
3059 */
3060int __devinit t4_prep_adapter(struct adapter *adapter)
3061{
3062 int ret;
3063
3064 ret = wait_dev_ready(adapter);
3065 if (ret < 0)
3066 return ret;
3067
3068 get_pci_mode(adapter, &adapter->params.pci);
3069 adapter->params.rev = t4_read_reg(adapter, PL_REV);
3070
3071 ret = get_vpd_params(adapter, &adapter->params.vpd);
3072 if (ret < 0)
3073 return ret;
3074
3075 init_cong_ctrl(adapter->params.a_wnd, adapter->params.b_wnd);
3076
3077 /*
3078 * Default port for debugging in case we can't reach FW.
3079 */
3080 adapter->params.nports = 1;
3081 adapter->params.portvec = 1;
3082 return 0;
3083}
3084
3085int __devinit t4_port_init(struct adapter *adap, int mbox, int pf, int vf)
3086{
3087 u8 addr[6];
3088 int ret, i, j = 0;
3089 struct fw_port_cmd c;
3090
3091 memset(&c, 0, sizeof(c));
3092
3093 for_each_port(adap, i) {
3094 unsigned int rss_size;
3095 struct port_info *p = adap2pinfo(adap, i);
3096
3097 while ((adap->params.portvec & (1 << j)) == 0)
3098 j++;
3099
3100 c.op_to_portid = htonl(FW_CMD_OP(FW_PORT_CMD) |
3101 FW_CMD_REQUEST | FW_CMD_READ |
3102 FW_PORT_CMD_PORTID(j));
3103 c.action_to_len16 = htonl(
3104 FW_PORT_CMD_ACTION(FW_PORT_ACTION_GET_PORT_INFO) |
3105 FW_LEN16(c));
3106 ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
3107 if (ret)
3108 return ret;
3109
3110 ret = t4_alloc_vi(adap, mbox, j, pf, vf, 1, addr, &rss_size);
3111 if (ret < 0)
3112 return ret;
3113
3114 p->viid = ret;
3115 p->tx_chan = j;
3116 p->lport = j;
3117 p->rss_size = rss_size;
3118 memcpy(adap->port[i]->dev_addr, addr, ETH_ALEN);
3119 memcpy(adap->port[i]->perm_addr, addr, ETH_ALEN);
3120
3121 ret = ntohl(c.u.info.lstatus_to_modtype);
3122 p->mdio_addr = (ret & FW_PORT_CMD_MDIOCAP) ?
3123 FW_PORT_CMD_MDIOADDR_GET(ret) : -1;
3124 p->port_type = FW_PORT_CMD_PTYPE_GET(ret);
3125 p->mod_type = FW_PORT_CMD_MODTYPE_GET(ret);
3126
3127 init_link_config(&p->link_cfg, ntohs(c.u.info.pcap));
3128 j++;
3129 }
3130 return 0;
3131}
diff --git a/drivers/net/cxgb4/t4_hw.h b/drivers/net/cxgb4/t4_hw.h
new file mode 100644
index 000000000000..025623285c93
--- /dev/null
+++ b/drivers/net/cxgb4/t4_hw.h
@@ -0,0 +1,100 @@
1/*
2 * This file is part of the Chelsio T4 Ethernet driver for Linux.
3 *
4 * Copyright (c) 2003-2010 Chelsio Communications, Inc. All rights reserved.
5 *
6 * This software is available to you under a choice of one of two
7 * licenses. You may choose to be licensed under the terms of the GNU
8 * General Public License (GPL) Version 2, available from the file
9 * COPYING in the main directory of this source tree, or the
10 * OpenIB.org BSD license below:
11 *
12 * Redistribution and use in source and binary forms, with or
13 * without modification, are permitted provided that the following
14 * conditions are met:
15 *
16 * - Redistributions of source code must retain the above
17 * copyright notice, this list of conditions and the following
18 * disclaimer.
19 *
20 * - Redistributions in binary form must reproduce the above
21 * copyright notice, this list of conditions and the following
22 * disclaimer in the documentation and/or other materials
23 * provided with the distribution.
24 *
25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
32 * SOFTWARE.
33 */
34
35#ifndef __T4_HW_H
36#define __T4_HW_H
37
38#include <linux/types.h>
39
40enum {
41 NCHAN = 4, /* # of HW channels */
42 MAX_MTU = 9600, /* max MAC MTU, excluding header + FCS */
43 EEPROMSIZE = 17408, /* Serial EEPROM physical size */
44 EEPROMVSIZE = 32768, /* Serial EEPROM virtual address space size */
45 RSS_NENTRIES = 2048, /* # of entries in RSS mapping table */
46 TCB_SIZE = 128, /* TCB size */
47 NMTUS = 16, /* size of MTU table */
48 NCCTRL_WIN = 32, /* # of congestion control windows */
49 NEXACT_MAC = 336, /* # of exact MAC address filters */
50 L2T_SIZE = 4096, /* # of L2T entries */
51 MBOX_LEN = 64, /* mailbox size in bytes */
52 TRACE_LEN = 112, /* length of trace data and mask */
53 FILTER_OPT_LEN = 36, /* filter tuple width for optional components */
54 NWOL_PAT = 8, /* # of WoL patterns */
55 WOL_PAT_LEN = 128, /* length of WoL patterns */
56};
57
58enum {
59 SF_PAGE_SIZE = 256, /* serial flash page size */
60 SF_SEC_SIZE = 64 * 1024, /* serial flash sector size */
61 SF_SIZE = SF_SEC_SIZE * 16, /* serial flash size */
62};
63
64enum { RSP_TYPE_FLBUF, RSP_TYPE_CPL, RSP_TYPE_INTR }; /* response entry types */
65
66enum { MBOX_OWNER_NONE, MBOX_OWNER_FW, MBOX_OWNER_DRV }; /* mailbox owners */
67
68enum {
69 SGE_MAX_WR_LEN = 512, /* max WR size in bytes */
70 SGE_NTIMERS = 6, /* # of interrupt holdoff timer values */
71 SGE_NCOUNTERS = 4, /* # of interrupt packet counter values */
72};
73
74struct sge_qstat { /* data written to SGE queue status entries */
75 __be32 qid;
76 __be16 cidx;
77 __be16 pidx;
78};
79
80/*
81 * Structure for last 128 bits of response descriptors
82 */
83struct rsp_ctrl {
84 __be32 hdrbuflen_pidx;
85 __be32 pldbuflen_qid;
86 union {
87 u8 type_gen;
88 __be64 last_flit;
89 };
90};
91
92#define RSPD_NEWBUF 0x80000000U
93#define RSPD_LEN 0x7fffffffU
94
95#define RSPD_GEN(x) ((x) >> 7)
96#define RSPD_TYPE(x) (((x) >> 4) & 3)
97
98#define QINTR_CNT_EN 0x1
99#define QINTR_TIMER_IDX(x) ((x) << 1)
100#endif /* __T4_HW_H */
diff --git a/drivers/net/cxgb4/t4_msg.h b/drivers/net/cxgb4/t4_msg.h
new file mode 100644
index 000000000000..fdb117443144
--- /dev/null
+++ b/drivers/net/cxgb4/t4_msg.h
@@ -0,0 +1,664 @@
1/*
2 * This file is part of the Chelsio T4 Ethernet driver for Linux.
3 *
4 * Copyright (c) 2003-2010 Chelsio Communications, Inc. All rights reserved.
5 *
6 * This software is available to you under a choice of one of two
7 * licenses. You may choose to be licensed under the terms of the GNU
8 * General Public License (GPL) Version 2, available from the file
9 * COPYING in the main directory of this source tree, or the
10 * OpenIB.org BSD license below:
11 *
12 * Redistribution and use in source and binary forms, with or
13 * without modification, are permitted provided that the following
14 * conditions are met:
15 *
16 * - Redistributions of source code must retain the above
17 * copyright notice, this list of conditions and the following
18 * disclaimer.
19 *
20 * - Redistributions in binary form must reproduce the above
21 * copyright notice, this list of conditions and the following
22 * disclaimer in the documentation and/or other materials
23 * provided with the distribution.
24 *
25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
32 * SOFTWARE.
33 */
34
35#ifndef __T4_MSG_H
36#define __T4_MSG_H
37
38#include <linux/types.h>
39
40enum {
41 CPL_PASS_OPEN_REQ = 0x1,
42 CPL_PASS_ACCEPT_RPL = 0x2,
43 CPL_ACT_OPEN_REQ = 0x3,
44 CPL_SET_TCB_FIELD = 0x5,
45 CPL_GET_TCB = 0x6,
46 CPL_CLOSE_CON_REQ = 0x8,
47 CPL_CLOSE_LISTSRV_REQ = 0x9,
48 CPL_ABORT_REQ = 0xA,
49 CPL_ABORT_RPL = 0xB,
50 CPL_RX_DATA_ACK = 0xD,
51 CPL_TX_PKT = 0xE,
52 CPL_L2T_WRITE_REQ = 0x12,
53 CPL_TID_RELEASE = 0x1A,
54
55 CPL_CLOSE_LISTSRV_RPL = 0x20,
56 CPL_L2T_WRITE_RPL = 0x23,
57 CPL_PASS_OPEN_RPL = 0x24,
58 CPL_ACT_OPEN_RPL = 0x25,
59 CPL_PEER_CLOSE = 0x26,
60 CPL_ABORT_REQ_RSS = 0x2B,
61 CPL_ABORT_RPL_RSS = 0x2D,
62
63 CPL_CLOSE_CON_RPL = 0x32,
64 CPL_ISCSI_HDR = 0x33,
65 CPL_RDMA_CQE = 0x35,
66 CPL_RDMA_CQE_READ_RSP = 0x36,
67 CPL_RDMA_CQE_ERR = 0x37,
68 CPL_RX_DATA = 0x39,
69 CPL_SET_TCB_RPL = 0x3A,
70 CPL_RX_PKT = 0x3B,
71 CPL_RX_DDP_COMPLETE = 0x3F,
72
73 CPL_ACT_ESTABLISH = 0x40,
74 CPL_PASS_ESTABLISH = 0x41,
75 CPL_RX_DATA_DDP = 0x42,
76 CPL_PASS_ACCEPT_REQ = 0x44,
77
78 CPL_RDMA_READ_REQ = 0x60,
79
80 CPL_PASS_OPEN_REQ6 = 0x81,
81 CPL_ACT_OPEN_REQ6 = 0x83,
82
83 CPL_RDMA_TERMINATE = 0xA2,
84 CPL_RDMA_WRITE = 0xA4,
85 CPL_SGE_EGR_UPDATE = 0xA5,
86
87 CPL_TRACE_PKT = 0xB0,
88
89 CPL_FW4_MSG = 0xC0,
90 CPL_FW4_PLD = 0xC1,
91 CPL_FW4_ACK = 0xC3,
92
93 CPL_FW6_MSG = 0xE0,
94 CPL_FW6_PLD = 0xE1,
95 CPL_TX_PKT_LSO = 0xED,
96 CPL_TX_PKT_XT = 0xEE,
97
98 NUM_CPL_CMDS
99};
100
101enum CPL_error {
102 CPL_ERR_NONE = 0,
103 CPL_ERR_TCAM_FULL = 3,
104 CPL_ERR_BAD_LENGTH = 15,
105 CPL_ERR_BAD_ROUTE = 18,
106 CPL_ERR_CONN_RESET = 20,
107 CPL_ERR_CONN_EXIST_SYNRECV = 21,
108 CPL_ERR_CONN_EXIST = 22,
109 CPL_ERR_ARP_MISS = 23,
110 CPL_ERR_BAD_SYN = 24,
111 CPL_ERR_CONN_TIMEDOUT = 30,
112 CPL_ERR_XMIT_TIMEDOUT = 31,
113 CPL_ERR_PERSIST_TIMEDOUT = 32,
114 CPL_ERR_FINWAIT2_TIMEDOUT = 33,
115 CPL_ERR_KEEPALIVE_TIMEDOUT = 34,
116 CPL_ERR_RTX_NEG_ADVICE = 35,
117 CPL_ERR_PERSIST_NEG_ADVICE = 36,
118 CPL_ERR_ABORT_FAILED = 42,
119 CPL_ERR_IWARP_FLM = 50,
120};
121
122enum {
123 ULP_MODE_NONE = 0,
124 ULP_MODE_ISCSI = 2,
125 ULP_MODE_RDMA = 4,
126 ULP_MODE_FCOE = 6,
127};
128
129enum {
130 ULP_CRC_HEADER = 1 << 0,
131 ULP_CRC_DATA = 1 << 1
132};
133
134enum {
135 CPL_ABORT_SEND_RST = 0,
136 CPL_ABORT_NO_RST,
137};
138
139enum { /* TX_PKT_XT checksum types */
140 TX_CSUM_TCP = 0,
141 TX_CSUM_UDP = 1,
142 TX_CSUM_CRC16 = 4,
143 TX_CSUM_CRC32 = 5,
144 TX_CSUM_CRC32C = 6,
145 TX_CSUM_FCOE = 7,
146 TX_CSUM_TCPIP = 8,
147 TX_CSUM_UDPIP = 9,
148 TX_CSUM_TCPIP6 = 10,
149 TX_CSUM_UDPIP6 = 11,
150 TX_CSUM_IP = 12,
151};
152
153union opcode_tid {
154 __be32 opcode_tid;
155 u8 opcode;
156};
157
158#define CPL_OPCODE(x) ((x) << 24)
159#define MK_OPCODE_TID(opcode, tid) (CPL_OPCODE(opcode) | (tid))
160#define OPCODE_TID(cmd) ((cmd)->ot.opcode_tid)
161#define GET_TID(cmd) (ntohl(OPCODE_TID(cmd)) & 0xFFFFFF)
162
163/* partitioning of TID fields that also carry a queue id */
164#define GET_TID_TID(x) ((x) & 0x3fff)
165#define GET_TID_QID(x) (((x) >> 14) & 0x3ff)
166#define TID_QID(x) ((x) << 14)
167
168struct rss_header {
169 u8 opcode;
170#if defined(__LITTLE_ENDIAN_BITFIELD)
171 u8 channel:2;
172 u8 filter_hit:1;
173 u8 filter_tid:1;
174 u8 hash_type:2;
175 u8 ipv6:1;
176 u8 send2fw:1;
177#else
178 u8 send2fw:1;
179 u8 ipv6:1;
180 u8 hash_type:2;
181 u8 filter_tid:1;
182 u8 filter_hit:1;
183 u8 channel:2;
184#endif
185 __be16 qid;
186 __be32 hash_val;
187};
188
189struct work_request_hdr {
190 __be32 wr_hi;
191 __be32 wr_mid;
192 __be64 wr_lo;
193};
194
195#define WR_HDR struct work_request_hdr wr
196
197struct cpl_pass_open_req {
198 WR_HDR;
199 union opcode_tid ot;
200 __be16 local_port;
201 __be16 peer_port;
202 __be32 local_ip;
203 __be32 peer_ip;
204 __be64 opt0;
205#define TX_CHAN(x) ((x) << 2)
206#define DELACK(x) ((x) << 5)
207#define ULP_MODE(x) ((x) << 8)
208#define RCV_BUFSIZ(x) ((x) << 12)
209#define DSCP(x) ((x) << 22)
210#define SMAC_SEL(x) ((u64)(x) << 28)
211#define L2T_IDX(x) ((u64)(x) << 36)
212#define NAGLE(x) ((u64)(x) << 49)
213#define WND_SCALE(x) ((u64)(x) << 50)
214#define KEEP_ALIVE(x) ((u64)(x) << 54)
215#define MSS_IDX(x) ((u64)(x) << 60)
216 __be64 opt1;
217#define SYN_RSS_ENABLE (1 << 0)
218#define SYN_RSS_QUEUE(x) ((x) << 2)
219#define CONN_POLICY_ASK (1 << 22)
220};
221
222struct cpl_pass_open_req6 {
223 WR_HDR;
224 union opcode_tid ot;
225 __be16 local_port;
226 __be16 peer_port;
227 __be64 local_ip_hi;
228 __be64 local_ip_lo;
229 __be64 peer_ip_hi;
230 __be64 peer_ip_lo;
231 __be64 opt0;
232 __be64 opt1;
233};
234
235struct cpl_pass_open_rpl {
236 union opcode_tid ot;
237 u8 rsvd[3];
238 u8 status;
239};
240
241struct cpl_pass_accept_rpl {
242 WR_HDR;
243 union opcode_tid ot;
244 __be32 opt2;
245#define RSS_QUEUE(x) ((x) << 0)
246#define RSS_QUEUE_VALID (1 << 10)
247#define RX_COALESCE_VALID(x) ((x) << 11)
248#define RX_COALESCE(x) ((x) << 12)
249#define TX_QUEUE(x) ((x) << 23)
250#define RX_CHANNEL(x) ((x) << 26)
251#define WND_SCALE_EN(x) ((x) << 28)
252#define TSTAMPS_EN(x) ((x) << 29)
253#define SACK_EN(x) ((x) << 30)
254 __be64 opt0;
255};
256
257struct cpl_act_open_req {
258 WR_HDR;
259 union opcode_tid ot;
260 __be16 local_port;
261 __be16 peer_port;
262 __be32 local_ip;
263 __be32 peer_ip;
264 __be64 opt0;
265 __be32 params;
266 __be32 opt2;
267};
268
269struct cpl_act_open_req6 {
270 WR_HDR;
271 union opcode_tid ot;
272 __be16 local_port;
273 __be16 peer_port;
274 __be64 local_ip_hi;
275 __be64 local_ip_lo;
276 __be64 peer_ip_hi;
277 __be64 peer_ip_lo;
278 __be64 opt0;
279 __be32 params;
280 __be32 opt2;
281};
282
283struct cpl_act_open_rpl {
284 union opcode_tid ot;
285 __be32 atid_status;
286#define GET_AOPEN_STATUS(x) ((x) & 0xff)
287#define GET_AOPEN_ATID(x) (((x) >> 8) & 0xffffff)
288};
289
290struct cpl_pass_establish {
291 union opcode_tid ot;
292 __be32 rsvd;
293 __be32 tos_stid;
294#define GET_POPEN_TID(x) ((x) & 0xffffff)
295#define GET_POPEN_TOS(x) (((x) >> 24) & 0xff)
296 __be16 mac_idx;
297 __be16 tcp_opt;
298#define GET_TCPOPT_WSCALE_OK(x) (((x) >> 5) & 1)
299#define GET_TCPOPT_SACK(x) (((x) >> 6) & 1)
300#define GET_TCPOPT_TSTAMP(x) (((x) >> 7) & 1)
301#define GET_TCPOPT_SND_WSCALE(x) (((x) >> 8) & 0xf)
302#define GET_TCPOPT_MSS(x) (((x) >> 12) & 0xf)
303 __be32 snd_isn;
304 __be32 rcv_isn;
305};
306
307struct cpl_act_establish {
308 union opcode_tid ot;
309 __be32 rsvd;
310 __be32 tos_atid;
311 __be16 mac_idx;
312 __be16 tcp_opt;
313 __be32 snd_isn;
314 __be32 rcv_isn;
315};
316
317struct cpl_get_tcb {
318 WR_HDR;
319 union opcode_tid ot;
320 __be16 reply_ctrl;
321#define QUEUENO(x) ((x) << 0)
322#define REPLY_CHAN(x) ((x) << 14)
323#define NO_REPLY(x) ((x) << 15)
324 __be16 cookie;
325};
326
327struct cpl_set_tcb_field {
328 WR_HDR;
329 union opcode_tid ot;
330 __be16 reply_ctrl;
331 __be16 word_cookie;
332#define TCB_WORD(x) ((x) << 0)
333#define TCB_COOKIE(x) ((x) << 5)
334 __be64 mask;
335 __be64 val;
336};
337
338struct cpl_set_tcb_rpl {
339 union opcode_tid ot;
340 __be16 rsvd;
341 u8 cookie;
342 u8 status;
343 __be64 oldval;
344};
345
346struct cpl_close_con_req {
347 WR_HDR;
348 union opcode_tid ot;
349 __be32 rsvd;
350};
351
352struct cpl_close_con_rpl {
353 union opcode_tid ot;
354 u8 rsvd[3];
355 u8 status;
356 __be32 snd_nxt;
357 __be32 rcv_nxt;
358};
359
360struct cpl_close_listsvr_req {
361 WR_HDR;
362 union opcode_tid ot;
363 __be16 reply_ctrl;
364#define LISTSVR_IPV6 (1 << 14)
365 __be16 rsvd;
366};
367
368struct cpl_close_listsvr_rpl {
369 union opcode_tid ot;
370 u8 rsvd[3];
371 u8 status;
372};
373
374struct cpl_abort_req_rss {
375 union opcode_tid ot;
376 u8 rsvd[3];
377 u8 status;
378};
379
380struct cpl_abort_req {
381 WR_HDR;
382 union opcode_tid ot;
383 __be32 rsvd0;
384 u8 rsvd1;
385 u8 cmd;
386 u8 rsvd2[6];
387};
388
389struct cpl_abort_rpl_rss {
390 union opcode_tid ot;
391 u8 rsvd[3];
392 u8 status;
393};
394
395struct cpl_abort_rpl {
396 WR_HDR;
397 union opcode_tid ot;
398 __be32 rsvd0;
399 u8 rsvd1;
400 u8 cmd;
401 u8 rsvd2[6];
402};
403
404struct cpl_peer_close {
405 union opcode_tid ot;
406 __be32 rcv_nxt;
407};
408
409struct cpl_tid_release {
410 WR_HDR;
411 union opcode_tid ot;
412 __be32 rsvd;
413};
414
415struct cpl_tx_pkt_core {
416 __be32 ctrl0;
417#define TXPKT_VF(x) ((x) << 0)
418#define TXPKT_PF(x) ((x) << 8)
419#define TXPKT_VF_VLD (1 << 11)
420#define TXPKT_OVLAN_IDX(x) ((x) << 12)
421#define TXPKT_INTF(x) ((x) << 16)
422#define TXPKT_INS_OVLAN (1 << 21)
423#define TXPKT_OPCODE(x) ((x) << 24)
424 __be16 pack;
425 __be16 len;
426 __be64 ctrl1;
427#define TXPKT_CSUM_END(x) ((x) << 12)
428#define TXPKT_CSUM_START(x) ((x) << 20)
429#define TXPKT_IPHDR_LEN(x) ((u64)(x) << 20)
430#define TXPKT_CSUM_LOC(x) ((u64)(x) << 30)
431#define TXPKT_ETHHDR_LEN(x) ((u64)(x) << 34)
432#define TXPKT_CSUM_TYPE(x) ((u64)(x) << 40)
433#define TXPKT_VLAN(x) ((u64)(x) << 44)
434#define TXPKT_VLAN_VLD (1ULL << 60)
435#define TXPKT_IPCSUM_DIS (1ULL << 62)
436#define TXPKT_L4CSUM_DIS (1ULL << 63)
437};
438
439struct cpl_tx_pkt {
440 WR_HDR;
441 struct cpl_tx_pkt_core c;
442};
443
444#define cpl_tx_pkt_xt cpl_tx_pkt
445
446struct cpl_tx_pkt_lso {
447 WR_HDR;
448 __be32 lso_ctrl;
449#define LSO_TCPHDR_LEN(x) ((x) << 0)
450#define LSO_IPHDR_LEN(x) ((x) << 4)
451#define LSO_ETHHDR_LEN(x) ((x) << 16)
452#define LSO_IPV6(x) ((x) << 20)
453#define LSO_LAST_SLICE (1 << 22)
454#define LSO_FIRST_SLICE (1 << 23)
455#define LSO_OPCODE(x) ((x) << 24)
456 __be16 ipid_ofst;
457 __be16 mss;
458 __be32 seqno_offset;
459 __be32 len;
460 /* encapsulated CPL (TX_PKT, TX_PKT_XT or TX_DATA) follows here */
461};
462
463struct cpl_iscsi_hdr {
464 union opcode_tid ot;
465 __be16 pdu_len_ddp;
466#define ISCSI_PDU_LEN(x) ((x) & 0x7FFF)
467#define ISCSI_DDP (1 << 15)
468 __be16 len;
469 __be32 seq;
470 __be16 urg;
471 u8 rsvd;
472 u8 status;
473};
474
475struct cpl_rx_data {
476 union opcode_tid ot;
477 __be16 rsvd;
478 __be16 len;
479 __be32 seq;
480 __be16 urg;
481#if defined(__LITTLE_ENDIAN_BITFIELD)
482 u8 dack_mode:2;
483 u8 psh:1;
484 u8 heartbeat:1;
485 u8 ddp_off:1;
486 u8 :3;
487#else
488 u8 :3;
489 u8 ddp_off:1;
490 u8 heartbeat:1;
491 u8 psh:1;
492 u8 dack_mode:2;
493#endif
494 u8 status;
495};
496
497struct cpl_rx_data_ack {
498 WR_HDR;
499 union opcode_tid ot;
500 __be32 credit_dack;
501#define RX_CREDITS(x) ((x) << 0)
502#define RX_FORCE_ACK(x) ((x) << 28)
503};
504
505struct cpl_rx_pkt {
506 u8 opcode;
507#if defined(__LITTLE_ENDIAN_BITFIELD)
508 u8 iff:4;
509 u8 csum_calc:1;
510 u8 ipmi_pkt:1;
511 u8 vlan_ex:1;
512 u8 ip_frag:1;
513#else
514 u8 ip_frag:1;
515 u8 vlan_ex:1;
516 u8 ipmi_pkt:1;
517 u8 csum_calc:1;
518 u8 iff:4;
519#endif
520 __be16 csum;
521 __be16 vlan;
522 __be16 len;
523 __be32 l2info;
524#define RXF_UDP (1 << 22)
525#define RXF_TCP (1 << 23)
526 __be16 hdr_len;
527 __be16 err_vec;
528};
529
530struct cpl_trace_pkt {
531 u8 opcode;
532 u8 intf;
533#if defined(__LITTLE_ENDIAN_BITFIELD)
534 u8 runt:4;
535 u8 filter_hit:4;
536 u8 :6;
537 u8 err:1;
538 u8 trunc:1;
539#else
540 u8 filter_hit:4;
541 u8 runt:4;
542 u8 trunc:1;
543 u8 err:1;
544 u8 :6;
545#endif
546 __be16 rsvd;
547 __be16 len;
548 __be64 tstamp;
549};
550
551struct cpl_l2t_write_req {
552 WR_HDR;
553 union opcode_tid ot;
554 __be16 params;
555#define L2T_W_INFO(x) ((x) << 2)
556#define L2T_W_PORT(x) ((x) << 8)
557#define L2T_W_NOREPLY(x) ((x) << 15)
558 __be16 l2t_idx;
559 __be16 vlan;
560 u8 dst_mac[6];
561};
562
563struct cpl_l2t_write_rpl {
564 union opcode_tid ot;
565 u8 status;
566 u8 rsvd[3];
567};
568
569struct cpl_rdma_terminate {
570 union opcode_tid ot;
571 __be16 rsvd;
572 __be16 len;
573};
574
575struct cpl_sge_egr_update {
576 __be32 opcode_qid;
577#define EGR_QID(x) ((x) & 0x1FFFF)
578 __be16 cidx;
579 __be16 pidx;
580};
581
582struct cpl_fw4_pld {
583 u8 opcode;
584 u8 rsvd0[3];
585 u8 type;
586 u8 rsvd1;
587 __be16 len;
588 __be64 data;
589 __be64 rsvd2;
590};
591
592struct cpl_fw6_pld {
593 u8 opcode;
594 u8 rsvd[5];
595 __be16 len;
596 __be64 data[4];
597};
598
599struct cpl_fw4_msg {
600 u8 opcode;
601 u8 type;
602 __be16 rsvd0;
603 __be32 rsvd1;
604 __be64 data[2];
605};
606
607struct cpl_fw4_ack {
608 union opcode_tid ot;
609 u8 credits;
610 u8 rsvd0[2];
611 u8 seq_vld;
612 __be32 snd_nxt;
613 __be32 snd_una;
614 __be64 rsvd1;
615};
616
617struct cpl_fw6_msg {
618 u8 opcode;
619 u8 type;
620 __be16 rsvd0;
621 __be32 rsvd1;
622 __be64 data[4];
623};
624
625enum {
626 ULP_TX_MEM_READ = 2,
627 ULP_TX_MEM_WRITE = 3,
628 ULP_TX_PKT = 4
629};
630
631enum {
632 ULP_TX_SC_NOOP = 0x80,
633 ULP_TX_SC_IMM = 0x81,
634 ULP_TX_SC_DSGL = 0x82,
635 ULP_TX_SC_ISGL = 0x83
636};
637
638struct ulptx_sge_pair {
639 __be32 len[2];
640 __be64 addr[2];
641};
642
643struct ulptx_sgl {
644 __be32 cmd_nsge;
645#define ULPTX_CMD(x) ((x) << 24)
646#define ULPTX_NSGE(x) ((x) << 0)
647 __be32 len0;
648 __be64 addr0;
649 struct ulptx_sge_pair sge[0];
650};
651
652struct ulp_mem_io {
653 WR_HDR;
654 __be32 cmd;
655#define ULP_MEMIO_ORDER(x) ((x) << 23)
656 __be32 len16; /* command length */
657 __be32 dlen; /* data length in 32-byte units */
658#define ULP_MEMIO_DATA_LEN(x) ((x) << 0)
659 __be32 lock_addr;
660#define ULP_MEMIO_ADDR(x) ((x) << 0)
661#define ULP_MEMIO_LOCK(x) ((x) << 31)
662};
663
664#endif /* __T4_MSG_H */
diff --git a/drivers/net/cxgb4/t4_regs.h b/drivers/net/cxgb4/t4_regs.h
new file mode 100644
index 000000000000..5ed56483cbc2
--- /dev/null
+++ b/drivers/net/cxgb4/t4_regs.h
@@ -0,0 +1,878 @@
1/*
2 * This file is part of the Chelsio T4 Ethernet driver for Linux.
3 *
4 * Copyright (c) 2010 Chelsio Communications, Inc. All rights reserved.
5 *
6 * This software is available to you under a choice of one of two
7 * licenses. You may choose to be licensed under the terms of the GNU
8 * General Public License (GPL) Version 2, available from the file
9 * COPYING in the main directory of this source tree, or the
10 * OpenIB.org BSD license below:
11 *
12 * Redistribution and use in source and binary forms, with or
13 * without modification, are permitted provided that the following
14 * conditions are met:
15 *
16 * - Redistributions of source code must retain the above
17 * copyright notice, this list of conditions and the following
18 * disclaimer.
19 *
20 * - Redistributions in binary form must reproduce the above
21 * copyright notice, this list of conditions and the following
22 * disclaimer in the documentation and/or other materials
23 * provided with the distribution.
24 *
25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
32 * SOFTWARE.
33 */
34
35#ifndef __T4_REGS_H
36#define __T4_REGS_H
37
38#define MYPF_BASE 0x1b000
39#define MYPF_REG(reg_addr) (MYPF_BASE + (reg_addr))
40
41#define PF0_BASE 0x1e000
42#define PF0_REG(reg_addr) (PF0_BASE + (reg_addr))
43
44#define PF_STRIDE 0x400
45#define PF_BASE(idx) (PF0_BASE + (idx) * PF_STRIDE)
46#define PF_REG(idx, reg) (PF_BASE(idx) + (reg))
47
48#define MYPORT_BASE 0x1c000
49#define MYPORT_REG(reg_addr) (MYPORT_BASE + (reg_addr))
50
51#define PORT0_BASE 0x20000
52#define PORT0_REG(reg_addr) (PORT0_BASE + (reg_addr))
53
54#define PORT_STRIDE 0x2000
55#define PORT_BASE(idx) (PORT0_BASE + (idx) * PORT_STRIDE)
56#define PORT_REG(idx, reg) (PORT_BASE(idx) + (reg))
57
58#define EDC_STRIDE (EDC_1_BASE_ADDR - EDC_0_BASE_ADDR)
59#define EDC_REG(reg, idx) (reg + EDC_STRIDE * idx)
60
61#define PCIE_MEM_ACCESS_REG(reg_addr, idx) ((reg_addr) + (idx) * 8)
62#define PCIE_MAILBOX_REG(reg_addr, idx) ((reg_addr) + (idx) * 8)
63#define MC_BIST_STATUS_REG(reg_addr, idx) ((reg_addr) + (idx) * 4)
64#define EDC_BIST_STATUS_REG(reg_addr, idx) ((reg_addr) + (idx) * 4)
65
66#define SGE_PF_KDOORBELL 0x0
67#define QID_MASK 0xffff8000U
68#define QID_SHIFT 15
69#define QID(x) ((x) << QID_SHIFT)
70#define DBPRIO 0x00004000U
71#define PIDX_MASK 0x00003fffU
72#define PIDX_SHIFT 0
73#define PIDX(x) ((x) << PIDX_SHIFT)
74
75#define SGE_PF_GTS 0x4
76#define INGRESSQID_MASK 0xffff0000U
77#define INGRESSQID_SHIFT 16
78#define INGRESSQID(x) ((x) << INGRESSQID_SHIFT)
79#define TIMERREG_MASK 0x0000e000U
80#define TIMERREG_SHIFT 13
81#define TIMERREG(x) ((x) << TIMERREG_SHIFT)
82#define SEINTARM_MASK 0x00001000U
83#define SEINTARM_SHIFT 12
84#define SEINTARM(x) ((x) << SEINTARM_SHIFT)
85#define CIDXINC_MASK 0x00000fffU
86#define CIDXINC_SHIFT 0
87#define CIDXINC(x) ((x) << CIDXINC_SHIFT)
88
89#define SGE_CONTROL 0x1008
90#define DCASYSTYPE 0x00080000U
91#define RXPKTCPLMODE 0x00040000U
92#define EGRSTATUSPAGESIZE 0x00020000U
93#define PKTSHIFT_MASK 0x00001c00U
94#define PKTSHIFT_SHIFT 10
95#define PKTSHIFT(x) ((x) << PKTSHIFT_SHIFT)
96#define INGPCIEBOUNDARY_MASK 0x00000380U
97#define INGPCIEBOUNDARY_SHIFT 7
98#define INGPCIEBOUNDARY(x) ((x) << INGPCIEBOUNDARY_SHIFT)
99#define INGPADBOUNDARY_MASK 0x00000070U
100#define INGPADBOUNDARY_SHIFT 4
101#define INGPADBOUNDARY(x) ((x) << INGPADBOUNDARY_SHIFT)
102#define EGRPCIEBOUNDARY_MASK 0x0000000eU
103#define EGRPCIEBOUNDARY_SHIFT 1
104#define EGRPCIEBOUNDARY(x) ((x) << EGRPCIEBOUNDARY_SHIFT)
105#define GLOBALENABLE 0x00000001U
106
107#define SGE_HOST_PAGE_SIZE 0x100c
108#define HOSTPAGESIZEPF0_MASK 0x0000000fU
109#define HOSTPAGESIZEPF0_SHIFT 0
110#define HOSTPAGESIZEPF0(x) ((x) << HOSTPAGESIZEPF0_SHIFT)
111
112#define SGE_EGRESS_QUEUES_PER_PAGE_PF 0x1010
113#define QUEUESPERPAGEPF0_MASK 0x0000000fU
114#define QUEUESPERPAGEPF0_GET(x) ((x) & QUEUESPERPAGEPF0_MASK)
115
116#define SGE_INT_CAUSE1 0x1024
117#define SGE_INT_CAUSE2 0x1030
118#define SGE_INT_CAUSE3 0x103c
119#define ERR_FLM_DBP 0x80000000U
120#define ERR_FLM_IDMA1 0x40000000U
121#define ERR_FLM_IDMA0 0x20000000U
122#define ERR_FLM_HINT 0x10000000U
123#define ERR_PCIE_ERROR3 0x08000000U
124#define ERR_PCIE_ERROR2 0x04000000U
125#define ERR_PCIE_ERROR1 0x02000000U
126#define ERR_PCIE_ERROR0 0x01000000U
127#define ERR_TIMER_ABOVE_MAX_QID 0x00800000U
128#define ERR_CPL_EXCEED_IQE_SIZE 0x00400000U
129#define ERR_INVALID_CIDX_INC 0x00200000U
130#define ERR_ITP_TIME_PAUSED 0x00100000U
131#define ERR_CPL_OPCODE_0 0x00080000U
132#define ERR_DROPPED_DB 0x00040000U
133#define ERR_DATA_CPL_ON_HIGH_QID1 0x00020000U
134#define ERR_DATA_CPL_ON_HIGH_QID0 0x00010000U
135#define ERR_BAD_DB_PIDX3 0x00008000U
136#define ERR_BAD_DB_PIDX2 0x00004000U
137#define ERR_BAD_DB_PIDX1 0x00002000U
138#define ERR_BAD_DB_PIDX0 0x00001000U
139#define ERR_ING_PCIE_CHAN 0x00000800U
140#define ERR_ING_CTXT_PRIO 0x00000400U
141#define ERR_EGR_CTXT_PRIO 0x00000200U
142#define DBFIFO_HP_INT 0x00000100U
143#define DBFIFO_LP_INT 0x00000080U
144#define REG_ADDRESS_ERR 0x00000040U
145#define INGRESS_SIZE_ERR 0x00000020U
146#define EGRESS_SIZE_ERR 0x00000010U
147#define ERR_INV_CTXT3 0x00000008U
148#define ERR_INV_CTXT2 0x00000004U
149#define ERR_INV_CTXT1 0x00000002U
150#define ERR_INV_CTXT0 0x00000001U
151
152#define SGE_INT_ENABLE3 0x1040
153#define SGE_FL_BUFFER_SIZE0 0x1044
154#define SGE_FL_BUFFER_SIZE1 0x1048
155#define SGE_INGRESS_RX_THRESHOLD 0x10a0
156#define THRESHOLD_0_MASK 0x3f000000U
157#define THRESHOLD_0_SHIFT 24
158#define THRESHOLD_0(x) ((x) << THRESHOLD_0_SHIFT)
159#define THRESHOLD_0_GET(x) (((x) & THRESHOLD_0_MASK) >> THRESHOLD_0_SHIFT)
160#define THRESHOLD_1_MASK 0x003f0000U
161#define THRESHOLD_1_SHIFT 16
162#define THRESHOLD_1(x) ((x) << THRESHOLD_1_SHIFT)
163#define THRESHOLD_1_GET(x) (((x) & THRESHOLD_1_MASK) >> THRESHOLD_1_SHIFT)
164#define THRESHOLD_2_MASK 0x00003f00U
165#define THRESHOLD_2_SHIFT 8
166#define THRESHOLD_2(x) ((x) << THRESHOLD_2_SHIFT)
167#define THRESHOLD_2_GET(x) (((x) & THRESHOLD_2_MASK) >> THRESHOLD_2_SHIFT)
168#define THRESHOLD_3_MASK 0x0000003fU
169#define THRESHOLD_3_SHIFT 0
170#define THRESHOLD_3(x) ((x) << THRESHOLD_3_SHIFT)
171#define THRESHOLD_3_GET(x) (((x) & THRESHOLD_3_MASK) >> THRESHOLD_3_SHIFT)
172
173#define SGE_TIMER_VALUE_0_AND_1 0x10b8
174#define TIMERVALUE0_MASK 0xffff0000U
175#define TIMERVALUE0_SHIFT 16
176#define TIMERVALUE0(x) ((x) << TIMERVALUE0_SHIFT)
177#define TIMERVALUE0_GET(x) (((x) & TIMERVALUE0_MASK) >> TIMERVALUE0_SHIFT)
178#define TIMERVALUE1_MASK 0x0000ffffU
179#define TIMERVALUE1_SHIFT 0
180#define TIMERVALUE1(x) ((x) << TIMERVALUE1_SHIFT)
181#define TIMERVALUE1_GET(x) (((x) & TIMERVALUE1_MASK) >> TIMERVALUE1_SHIFT)
182
183#define SGE_TIMER_VALUE_2_AND_3 0x10bc
184#define SGE_TIMER_VALUE_4_AND_5 0x10c0
185#define SGE_DEBUG_INDEX 0x10cc
186#define SGE_DEBUG_DATA_HIGH 0x10d0
187#define SGE_DEBUG_DATA_LOW 0x10d4
188#define SGE_INGRESS_QUEUES_PER_PAGE_PF 0x10f4
189
190#define PCIE_PF_CLI 0x44
191#define PCIE_INT_CAUSE 0x3004
192#define UNXSPLCPLERR 0x20000000U
193#define PCIEPINT 0x10000000U
194#define PCIESINT 0x08000000U
195#define RPLPERR 0x04000000U
196#define RXWRPERR 0x02000000U
197#define RXCPLPERR 0x01000000U
198#define PIOTAGPERR 0x00800000U
199#define MATAGPERR 0x00400000U
200#define INTXCLRPERR 0x00200000U
201#define FIDPERR 0x00100000U
202#define CFGSNPPERR 0x00080000U
203#define HRSPPERR 0x00040000U
204#define HREQPERR 0x00020000U
205#define HCNTPERR 0x00010000U
206#define DRSPPERR 0x00008000U
207#define DREQPERR 0x00004000U
208#define DCNTPERR 0x00002000U
209#define CRSPPERR 0x00001000U
210#define CREQPERR 0x00000800U
211#define CCNTPERR 0x00000400U
212#define TARTAGPERR 0x00000200U
213#define PIOREQPERR 0x00000100U
214#define PIOCPLPERR 0x00000080U
215#define MSIXDIPERR 0x00000040U
216#define MSIXDATAPERR 0x00000020U
217#define MSIXADDRHPERR 0x00000010U
218#define MSIXADDRLPERR 0x00000008U
219#define MSIDATAPERR 0x00000004U
220#define MSIADDRHPERR 0x00000002U
221#define MSIADDRLPERR 0x00000001U
222
223#define PCIE_NONFAT_ERR 0x3010
224#define PCIE_MEM_ACCESS_BASE_WIN 0x3068
225#define PCIEOFST_MASK 0xfffffc00U
226#define BIR_MASK 0x00000300U
227#define BIR_SHIFT 8
228#define BIR(x) ((x) << BIR_SHIFT)
229#define WINDOW_MASK 0x000000ffU
230#define WINDOW_SHIFT 0
231#define WINDOW(x) ((x) << WINDOW_SHIFT)
232
233#define PCIE_CORE_UTL_SYSTEM_BUS_AGENT_STATUS 0x5908
234#define RNPP 0x80000000U
235#define RPCP 0x20000000U
236#define RCIP 0x08000000U
237#define RCCP 0x04000000U
238#define RFTP 0x00800000U
239#define PTRP 0x00100000U
240
241#define PCIE_CORE_UTL_PCI_EXPRESS_PORT_STATUS 0x59a4
242#define TPCP 0x40000000U
243#define TNPP 0x20000000U
244#define TFTP 0x10000000U
245#define TCAP 0x08000000U
246#define TCIP 0x04000000U
247#define RCAP 0x02000000U
248#define PLUP 0x00800000U
249#define PLDN 0x00400000U
250#define OTDD 0x00200000U
251#define GTRP 0x00100000U
252#define RDPE 0x00040000U
253#define TDCE 0x00020000U
254#define TDUE 0x00010000U
255
256#define MC_INT_CAUSE 0x7518
257#define ECC_UE_INT_CAUSE 0x00000004U
258#define ECC_CE_INT_CAUSE 0x00000002U
259#define PERR_INT_CAUSE 0x00000001U
260
261#define MC_ECC_STATUS 0x751c
262#define ECC_CECNT_MASK 0xffff0000U
263#define ECC_CECNT_SHIFT 16
264#define ECC_CECNT(x) ((x) << ECC_CECNT_SHIFT)
265#define ECC_CECNT_GET(x) (((x) & ECC_CECNT_MASK) >> ECC_CECNT_SHIFT)
266#define ECC_UECNT_MASK 0x0000ffffU
267#define ECC_UECNT_SHIFT 0
268#define ECC_UECNT(x) ((x) << ECC_UECNT_SHIFT)
269#define ECC_UECNT_GET(x) (((x) & ECC_UECNT_MASK) >> ECC_UECNT_SHIFT)
270
271#define MC_BIST_CMD 0x7600
272#define START_BIST 0x80000000U
273#define BIST_CMD_GAP_MASK 0x0000ff00U
274#define BIST_CMD_GAP_SHIFT 8
275#define BIST_CMD_GAP(x) ((x) << BIST_CMD_GAP_SHIFT)
276#define BIST_OPCODE_MASK 0x00000003U
277#define BIST_OPCODE_SHIFT 0
278#define BIST_OPCODE(x) ((x) << BIST_OPCODE_SHIFT)
279
280#define MC_BIST_CMD_ADDR 0x7604
281#define MC_BIST_CMD_LEN 0x7608
282#define MC_BIST_DATA_PATTERN 0x760c
283#define BIST_DATA_TYPE_MASK 0x0000000fU
284#define BIST_DATA_TYPE_SHIFT 0
285#define BIST_DATA_TYPE(x) ((x) << BIST_DATA_TYPE_SHIFT)
286
287#define MC_BIST_STATUS_RDATA 0x7688
288
289#define MA_EXT_MEMORY_BAR 0x77c8
290#define EXT_MEM_SIZE_MASK 0x00000fffU
291#define EXT_MEM_SIZE_SHIFT 0
292#define EXT_MEM_SIZE_GET(x) (((x) & EXT_MEM_SIZE_MASK) >> EXT_MEM_SIZE_SHIFT)
293
294#define MA_TARGET_MEM_ENABLE 0x77d8
295#define EXT_MEM_ENABLE 0x00000004U
296#define EDRAM1_ENABLE 0x00000002U
297#define EDRAM0_ENABLE 0x00000001U
298
299#define MA_INT_CAUSE 0x77e0
300#define MEM_PERR_INT_CAUSE 0x00000002U
301#define MEM_WRAP_INT_CAUSE 0x00000001U
302
303#define MA_INT_WRAP_STATUS 0x77e4
304#define MEM_WRAP_ADDRESS_MASK 0xfffffff0U
305#define MEM_WRAP_ADDRESS_SHIFT 4
306#define MEM_WRAP_ADDRESS_GET(x) (((x) & MEM_WRAP_ADDRESS_MASK) >> MEM_WRAP_ADDRESS_SHIFT)
307#define MEM_WRAP_CLIENT_NUM_MASK 0x0000000fU
308#define MEM_WRAP_CLIENT_NUM_SHIFT 0
309#define MEM_WRAP_CLIENT_NUM_GET(x) (((x) & MEM_WRAP_CLIENT_NUM_MASK) >> MEM_WRAP_CLIENT_NUM_SHIFT)
310
311#define MA_PARITY_ERROR_STATUS 0x77f4
312
313#define EDC_0_BASE_ADDR 0x7900
314
315#define EDC_BIST_CMD 0x7904
316#define EDC_BIST_CMD_ADDR 0x7908
317#define EDC_BIST_CMD_LEN 0x790c
318#define EDC_BIST_DATA_PATTERN 0x7910
319#define EDC_BIST_STATUS_RDATA 0x7928
320#define EDC_INT_CAUSE 0x7978
321#define ECC_UE_PAR 0x00000020U
322#define ECC_CE_PAR 0x00000010U
323#define PERR_PAR_CAUSE 0x00000008U
324
325#define EDC_ECC_STATUS 0x797c
326
327#define EDC_1_BASE_ADDR 0x7980
328
329#define CIM_PF_MAILBOX_DATA 0x240
330#define CIM_PF_MAILBOX_CTRL 0x280
331#define MBMSGVALID 0x00000008U
332#define MBINTREQ 0x00000004U
333#define MBOWNER_MASK 0x00000003U
334#define MBOWNER_SHIFT 0
335#define MBOWNER(x) ((x) << MBOWNER_SHIFT)
336#define MBOWNER_GET(x) (((x) & MBOWNER_MASK) >> MBOWNER_SHIFT)
337
338#define CIM_PF_HOST_INT_CAUSE 0x28c
339#define MBMSGRDYINT 0x00080000U
340
341#define CIM_HOST_INT_CAUSE 0x7b2c
342#define TIEQOUTPARERRINT 0x00100000U
343#define TIEQINPARERRINT 0x00080000U
344#define MBHOSTPARERR 0x00040000U
345#define MBUPPARERR 0x00020000U
346#define IBQPARERR 0x0001f800U
347#define IBQTP0PARERR 0x00010000U
348#define IBQTP1PARERR 0x00008000U
349#define IBQULPPARERR 0x00004000U
350#define IBQSGELOPARERR 0x00002000U
351#define IBQSGEHIPARERR 0x00001000U
352#define IBQNCSIPARERR 0x00000800U
353#define OBQPARERR 0x000007e0U
354#define OBQULP0PARERR 0x00000400U
355#define OBQULP1PARERR 0x00000200U
356#define OBQULP2PARERR 0x00000100U
357#define OBQULP3PARERR 0x00000080U
358#define OBQSGEPARERR 0x00000040U
359#define OBQNCSIPARERR 0x00000020U
360#define PREFDROPINT 0x00000002U
361#define UPACCNONZERO 0x00000001U
362
363#define CIM_HOST_UPACC_INT_CAUSE 0x7b34
364#define EEPROMWRINT 0x40000000U
365#define TIMEOUTMAINT 0x20000000U
366#define TIMEOUTINT 0x10000000U
367#define RSPOVRLOOKUPINT 0x08000000U
368#define REQOVRLOOKUPINT 0x04000000U
369#define BLKWRPLINT 0x02000000U
370#define BLKRDPLINT 0x01000000U
371#define SGLWRPLINT 0x00800000U
372#define SGLRDPLINT 0x00400000U
373#define BLKWRCTLINT 0x00200000U
374#define BLKRDCTLINT 0x00100000U
375#define SGLWRCTLINT 0x00080000U
376#define SGLRDCTLINT 0x00040000U
377#define BLKWREEPROMINT 0x00020000U
378#define BLKRDEEPROMINT 0x00010000U
379#define SGLWREEPROMINT 0x00008000U
380#define SGLRDEEPROMINT 0x00004000U
381#define BLKWRFLASHINT 0x00002000U
382#define BLKRDFLASHINT 0x00001000U
383#define SGLWRFLASHINT 0x00000800U
384#define SGLRDFLASHINT 0x00000400U
385#define BLKWRBOOTINT 0x00000200U
386#define BLKRDBOOTINT 0x00000100U
387#define SGLWRBOOTINT 0x00000080U
388#define SGLRDBOOTINT 0x00000040U
389#define ILLWRBEINT 0x00000020U
390#define ILLRDBEINT 0x00000010U
391#define ILLRDINT 0x00000008U
392#define ILLWRINT 0x00000004U
393#define ILLTRANSINT 0x00000002U
394#define RSVDSPACEINT 0x00000001U
395
396#define TP_OUT_CONFIG 0x7d04
397#define VLANEXTENABLE_MASK 0x0000f000U
398#define VLANEXTENABLE_SHIFT 12
399
400#define TP_PARA_REG2 0x7d68
401#define MAXRXDATA_MASK 0xffff0000U
402#define MAXRXDATA_SHIFT 16
403#define MAXRXDATA_GET(x) (((x) & MAXRXDATA_MASK) >> MAXRXDATA_SHIFT)
404
405#define TP_TIMER_RESOLUTION 0x7d90
406#define TIMERRESOLUTION_MASK 0x00ff0000U
407#define TIMERRESOLUTION_SHIFT 16
408#define TIMERRESOLUTION_GET(x) (((x) & TIMERRESOLUTION_MASK) >> TIMERRESOLUTION_SHIFT)
409
410#define TP_SHIFT_CNT 0x7dc0
411
412#define TP_CCTRL_TABLE 0x7ddc
413#define TP_MTU_TABLE 0x7de4
414#define MTUINDEX_MASK 0xff000000U
415#define MTUINDEX_SHIFT 24
416#define MTUINDEX(x) ((x) << MTUINDEX_SHIFT)
417#define MTUWIDTH_MASK 0x000f0000U
418#define MTUWIDTH_SHIFT 16
419#define MTUWIDTH(x) ((x) << MTUWIDTH_SHIFT)
420#define MTUWIDTH_GET(x) (((x) & MTUWIDTH_MASK) >> MTUWIDTH_SHIFT)
421#define MTUVALUE_MASK 0x00003fffU
422#define MTUVALUE_SHIFT 0
423#define MTUVALUE(x) ((x) << MTUVALUE_SHIFT)
424#define MTUVALUE_GET(x) (((x) & MTUVALUE_MASK) >> MTUVALUE_SHIFT)
425
426#define TP_RSS_LKP_TABLE 0x7dec
427#define LKPTBLROWVLD 0x80000000U
428#define LKPTBLQUEUE1_MASK 0x000ffc00U
429#define LKPTBLQUEUE1_SHIFT 10
430#define LKPTBLQUEUE1(x) ((x) << LKPTBLQUEUE1_SHIFT)
431#define LKPTBLQUEUE1_GET(x) (((x) & LKPTBLQUEUE1_MASK) >> LKPTBLQUEUE1_SHIFT)
432#define LKPTBLQUEUE0_MASK 0x000003ffU
433#define LKPTBLQUEUE0_SHIFT 0
434#define LKPTBLQUEUE0(x) ((x) << LKPTBLQUEUE0_SHIFT)
435#define LKPTBLQUEUE0_GET(x) (((x) & LKPTBLQUEUE0_MASK) >> LKPTBLQUEUE0_SHIFT)
436
437#define TP_PIO_ADDR 0x7e40
438#define TP_PIO_DATA 0x7e44
439#define TP_MIB_INDEX 0x7e50
440#define TP_MIB_DATA 0x7e54
441#define TP_INT_CAUSE 0x7e74
442#define FLMTXFLSTEMPTY 0x40000000U
443
444#define TP_INGRESS_CONFIG 0x141
445#define VNIC 0x00000800U
446#define CSUM_HAS_PSEUDO_HDR 0x00000400U
447#define RM_OVLAN 0x00000200U
448#define LOOKUPEVERYPKT 0x00000100U
449
450#define TP_MIB_MAC_IN_ERR_0 0x0
451#define TP_MIB_TCP_OUT_RST 0xc
452#define TP_MIB_TCP_IN_SEG_HI 0x10
453#define TP_MIB_TCP_IN_SEG_LO 0x11
454#define TP_MIB_TCP_OUT_SEG_HI 0x12
455#define TP_MIB_TCP_OUT_SEG_LO 0x13
456#define TP_MIB_TCP_RXT_SEG_HI 0x14
457#define TP_MIB_TCP_RXT_SEG_LO 0x15
458#define TP_MIB_TNL_CNG_DROP_0 0x18
459#define TP_MIB_TCP_V6IN_ERR_0 0x28
460#define TP_MIB_TCP_V6OUT_RST 0x2c
461#define TP_MIB_OFD_ARP_DROP 0x36
462#define TP_MIB_TNL_DROP_0 0x44
463#define TP_MIB_OFD_VLN_DROP_0 0x58
464
465#define ULP_TX_INT_CAUSE 0x8dcc
466#define PBL_BOUND_ERR_CH3 0x80000000U
467#define PBL_BOUND_ERR_CH2 0x40000000U
468#define PBL_BOUND_ERR_CH1 0x20000000U
469#define PBL_BOUND_ERR_CH0 0x10000000U
470
471#define PM_RX_INT_CAUSE 0x8fdc
472#define ZERO_E_CMD_ERROR 0x00400000U
473#define PMRX_FRAMING_ERROR 0x003ffff0U
474#define OCSPI_PAR_ERROR 0x00000008U
475#define DB_OPTIONS_PAR_ERROR 0x00000004U
476#define IESPI_PAR_ERROR 0x00000002U
477#define E_PCMD_PAR_ERROR 0x00000001U
478
479#define PM_TX_INT_CAUSE 0x8ffc
480#define PCMD_LEN_OVFL0 0x80000000U
481#define PCMD_LEN_OVFL1 0x40000000U
482#define PCMD_LEN_OVFL2 0x20000000U
483#define ZERO_C_CMD_ERROR 0x10000000U
484#define PMTX_FRAMING_ERROR 0x0ffffff0U
485#define OESPI_PAR_ERROR 0x00000008U
486#define ICSPI_PAR_ERROR 0x00000002U
487#define C_PCMD_PAR_ERROR 0x00000001U
488
489#define MPS_PORT_STAT_TX_PORT_BYTES_L 0x400
490#define MPS_PORT_STAT_TX_PORT_BYTES_H 0x404
491#define MPS_PORT_STAT_TX_PORT_FRAMES_L 0x408
492#define MPS_PORT_STAT_TX_PORT_FRAMES_H 0x40c
493#define MPS_PORT_STAT_TX_PORT_BCAST_L 0x410
494#define MPS_PORT_STAT_TX_PORT_BCAST_H 0x414
495#define MPS_PORT_STAT_TX_PORT_MCAST_L 0x418
496#define MPS_PORT_STAT_TX_PORT_MCAST_H 0x41c
497#define MPS_PORT_STAT_TX_PORT_UCAST_L 0x420
498#define MPS_PORT_STAT_TX_PORT_UCAST_H 0x424
499#define MPS_PORT_STAT_TX_PORT_ERROR_L 0x428
500#define MPS_PORT_STAT_TX_PORT_ERROR_H 0x42c
501#define MPS_PORT_STAT_TX_PORT_64B_L 0x430
502#define MPS_PORT_STAT_TX_PORT_64B_H 0x434
503#define MPS_PORT_STAT_TX_PORT_65B_127B_L 0x438
504#define MPS_PORT_STAT_TX_PORT_65B_127B_H 0x43c
505#define MPS_PORT_STAT_TX_PORT_128B_255B_L 0x440
506#define MPS_PORT_STAT_TX_PORT_128B_255B_H 0x444
507#define MPS_PORT_STAT_TX_PORT_256B_511B_L 0x448
508#define MPS_PORT_STAT_TX_PORT_256B_511B_H 0x44c
509#define MPS_PORT_STAT_TX_PORT_512B_1023B_L 0x450
510#define MPS_PORT_STAT_TX_PORT_512B_1023B_H 0x454
511#define MPS_PORT_STAT_TX_PORT_1024B_1518B_L 0x458
512#define MPS_PORT_STAT_TX_PORT_1024B_1518B_H 0x45c
513#define MPS_PORT_STAT_TX_PORT_1519B_MAX_L 0x460
514#define MPS_PORT_STAT_TX_PORT_1519B_MAX_H 0x464
515#define MPS_PORT_STAT_TX_PORT_DROP_L 0x468
516#define MPS_PORT_STAT_TX_PORT_DROP_H 0x46c
517#define MPS_PORT_STAT_TX_PORT_PAUSE_L 0x470
518#define MPS_PORT_STAT_TX_PORT_PAUSE_H 0x474
519#define MPS_PORT_STAT_TX_PORT_PPP0_L 0x478
520#define MPS_PORT_STAT_TX_PORT_PPP0_H 0x47c
521#define MPS_PORT_STAT_TX_PORT_PPP1_L 0x480
522#define MPS_PORT_STAT_TX_PORT_PPP1_H 0x484
523#define MPS_PORT_STAT_TX_PORT_PPP2_L 0x488
524#define MPS_PORT_STAT_TX_PORT_PPP2_H 0x48c
525#define MPS_PORT_STAT_TX_PORT_PPP3_L 0x490
526#define MPS_PORT_STAT_TX_PORT_PPP3_H 0x494
527#define MPS_PORT_STAT_TX_PORT_PPP4_L 0x498
528#define MPS_PORT_STAT_TX_PORT_PPP4_H 0x49c
529#define MPS_PORT_STAT_TX_PORT_PPP5_L 0x4a0
530#define MPS_PORT_STAT_TX_PORT_PPP5_H 0x4a4
531#define MPS_PORT_STAT_TX_PORT_PPP6_L 0x4a8
532#define MPS_PORT_STAT_TX_PORT_PPP6_H 0x4ac
533#define MPS_PORT_STAT_TX_PORT_PPP7_L 0x4b0
534#define MPS_PORT_STAT_TX_PORT_PPP7_H 0x4b4
535#define MPS_PORT_STAT_LB_PORT_BYTES_L 0x4c0
536#define MPS_PORT_STAT_LB_PORT_BYTES_H 0x4c4
537#define MPS_PORT_STAT_LB_PORT_FRAMES_L 0x4c8
538#define MPS_PORT_STAT_LB_PORT_FRAMES_H 0x4cc
539#define MPS_PORT_STAT_LB_PORT_BCAST_L 0x4d0
540#define MPS_PORT_STAT_LB_PORT_BCAST_H 0x4d4
541#define MPS_PORT_STAT_LB_PORT_MCAST_L 0x4d8
542#define MPS_PORT_STAT_LB_PORT_MCAST_H 0x4dc
543#define MPS_PORT_STAT_LB_PORT_UCAST_L 0x4e0
544#define MPS_PORT_STAT_LB_PORT_UCAST_H 0x4e4
545#define MPS_PORT_STAT_LB_PORT_ERROR_L 0x4e8
546#define MPS_PORT_STAT_LB_PORT_ERROR_H 0x4ec
547#define MPS_PORT_STAT_LB_PORT_64B_L 0x4f0
548#define MPS_PORT_STAT_LB_PORT_64B_H 0x4f4
549#define MPS_PORT_STAT_LB_PORT_65B_127B_L 0x4f8
550#define MPS_PORT_STAT_LB_PORT_65B_127B_H 0x4fc
551#define MPS_PORT_STAT_LB_PORT_128B_255B_L 0x500
552#define MPS_PORT_STAT_LB_PORT_128B_255B_H 0x504
553#define MPS_PORT_STAT_LB_PORT_256B_511B_L 0x508
554#define MPS_PORT_STAT_LB_PORT_256B_511B_H 0x50c
555#define MPS_PORT_STAT_LB_PORT_512B_1023B_L 0x510
556#define MPS_PORT_STAT_LB_PORT_512B_1023B_H 0x514
557#define MPS_PORT_STAT_LB_PORT_1024B_1518B_L 0x518
558#define MPS_PORT_STAT_LB_PORT_1024B_1518B_H 0x51c
559#define MPS_PORT_STAT_LB_PORT_1519B_MAX_L 0x520
560#define MPS_PORT_STAT_LB_PORT_1519B_MAX_H 0x524
561#define MPS_PORT_STAT_LB_PORT_DROP_FRAMES 0x528
562#define MPS_PORT_STAT_RX_PORT_BYTES_L 0x540
563#define MPS_PORT_STAT_RX_PORT_BYTES_H 0x544
564#define MPS_PORT_STAT_RX_PORT_FRAMES_L 0x548
565#define MPS_PORT_STAT_RX_PORT_FRAMES_H 0x54c
566#define MPS_PORT_STAT_RX_PORT_BCAST_L 0x550
567#define MPS_PORT_STAT_RX_PORT_BCAST_H 0x554
568#define MPS_PORT_STAT_RX_PORT_MCAST_L 0x558
569#define MPS_PORT_STAT_RX_PORT_MCAST_H 0x55c
570#define MPS_PORT_STAT_RX_PORT_UCAST_L 0x560
571#define MPS_PORT_STAT_RX_PORT_UCAST_H 0x564
572#define MPS_PORT_STAT_RX_PORT_MTU_ERROR_L 0x568
573#define MPS_PORT_STAT_RX_PORT_MTU_ERROR_H 0x56c
574#define MPS_PORT_STAT_RX_PORT_MTU_CRC_ERROR_L 0x570
575#define MPS_PORT_STAT_RX_PORT_MTU_CRC_ERROR_H 0x574
576#define MPS_PORT_STAT_RX_PORT_CRC_ERROR_L 0x578
577#define MPS_PORT_STAT_RX_PORT_CRC_ERROR_H 0x57c
578#define MPS_PORT_STAT_RX_PORT_LEN_ERROR_L 0x580
579#define MPS_PORT_STAT_RX_PORT_LEN_ERROR_H 0x584
580#define MPS_PORT_STAT_RX_PORT_SYM_ERROR_L 0x588
581#define MPS_PORT_STAT_RX_PORT_SYM_ERROR_H 0x58c
582#define MPS_PORT_STAT_RX_PORT_64B_L 0x590
583#define MPS_PORT_STAT_RX_PORT_64B_H 0x594
584#define MPS_PORT_STAT_RX_PORT_65B_127B_L 0x598
585#define MPS_PORT_STAT_RX_PORT_65B_127B_H 0x59c
586#define MPS_PORT_STAT_RX_PORT_128B_255B_L 0x5a0
587#define MPS_PORT_STAT_RX_PORT_128B_255B_H 0x5a4
588#define MPS_PORT_STAT_RX_PORT_256B_511B_L 0x5a8
589#define MPS_PORT_STAT_RX_PORT_256B_511B_H 0x5ac
590#define MPS_PORT_STAT_RX_PORT_512B_1023B_L 0x5b0
591#define MPS_PORT_STAT_RX_PORT_512B_1023B_H 0x5b4
592#define MPS_PORT_STAT_RX_PORT_1024B_1518B_L 0x5b8
593#define MPS_PORT_STAT_RX_PORT_1024B_1518B_H 0x5bc
594#define MPS_PORT_STAT_RX_PORT_1519B_MAX_L 0x5c0
595#define MPS_PORT_STAT_RX_PORT_1519B_MAX_H 0x5c4
596#define MPS_PORT_STAT_RX_PORT_PAUSE_L 0x5c8
597#define MPS_PORT_STAT_RX_PORT_PAUSE_H 0x5cc
598#define MPS_PORT_STAT_RX_PORT_PPP0_L 0x5d0
599#define MPS_PORT_STAT_RX_PORT_PPP0_H 0x5d4
600#define MPS_PORT_STAT_RX_PORT_PPP1_L 0x5d8
601#define MPS_PORT_STAT_RX_PORT_PPP1_H 0x5dc
602#define MPS_PORT_STAT_RX_PORT_PPP2_L 0x5e0
603#define MPS_PORT_STAT_RX_PORT_PPP2_H 0x5e4
604#define MPS_PORT_STAT_RX_PORT_PPP3_L 0x5e8
605#define MPS_PORT_STAT_RX_PORT_PPP3_H 0x5ec
606#define MPS_PORT_STAT_RX_PORT_PPP4_L 0x5f0
607#define MPS_PORT_STAT_RX_PORT_PPP4_H 0x5f4
608#define MPS_PORT_STAT_RX_PORT_PPP5_L 0x5f8
609#define MPS_PORT_STAT_RX_PORT_PPP5_H 0x5fc
610#define MPS_PORT_STAT_RX_PORT_PPP6_L 0x600
611#define MPS_PORT_STAT_RX_PORT_PPP6_H 0x604
612#define MPS_PORT_STAT_RX_PORT_PPP7_L 0x608
613#define MPS_PORT_STAT_RX_PORT_PPP7_H 0x60c
614#define MPS_PORT_STAT_RX_PORT_LESS_64B_L 0x610
615#define MPS_PORT_STAT_RX_PORT_LESS_64B_H 0x614
616#define MPS_CMN_CTL 0x9000
617#define NUMPORTS_MASK 0x00000003U
618#define NUMPORTS_SHIFT 0
619#define NUMPORTS_GET(x) (((x) & NUMPORTS_MASK) >> NUMPORTS_SHIFT)
620
621#define MPS_INT_CAUSE 0x9008
622#define STATINT 0x00000020U
623#define TXINT 0x00000010U
624#define RXINT 0x00000008U
625#define TRCINT 0x00000004U
626#define CLSINT 0x00000002U
627#define PLINT 0x00000001U
628
629#define MPS_TX_INT_CAUSE 0x9408
630#define PORTERR 0x00010000U
631#define FRMERR 0x00008000U
632#define SECNTERR 0x00004000U
633#define BUBBLE 0x00002000U
634#define TXDESCFIFO 0x00001e00U
635#define TXDATAFIFO 0x000001e0U
636#define NCSIFIFO 0x00000010U
637#define TPFIFO 0x0000000fU
638
639#define MPS_STAT_PERR_INT_CAUSE_SRAM 0x9614
640#define MPS_STAT_PERR_INT_CAUSE_TX_FIFO 0x9620
641#define MPS_STAT_PERR_INT_CAUSE_RX_FIFO 0x962c
642
643#define MPS_STAT_RX_BG_0_MAC_DROP_FRAME_L 0x9640
644#define MPS_STAT_RX_BG_0_MAC_DROP_FRAME_H 0x9644
645#define MPS_STAT_RX_BG_1_MAC_DROP_FRAME_L 0x9648
646#define MPS_STAT_RX_BG_1_MAC_DROP_FRAME_H 0x964c
647#define MPS_STAT_RX_BG_2_MAC_DROP_FRAME_L 0x9650
648#define MPS_STAT_RX_BG_2_MAC_DROP_FRAME_H 0x9654
649#define MPS_STAT_RX_BG_3_MAC_DROP_FRAME_L 0x9658
650#define MPS_STAT_RX_BG_3_MAC_DROP_FRAME_H 0x965c
651#define MPS_STAT_RX_BG_0_LB_DROP_FRAME_L 0x9660
652#define MPS_STAT_RX_BG_0_LB_DROP_FRAME_H 0x9664
653#define MPS_STAT_RX_BG_1_LB_DROP_FRAME_L 0x9668
654#define MPS_STAT_RX_BG_1_LB_DROP_FRAME_H 0x966c
655#define MPS_STAT_RX_BG_2_LB_DROP_FRAME_L 0x9670
656#define MPS_STAT_RX_BG_2_LB_DROP_FRAME_H 0x9674
657#define MPS_STAT_RX_BG_3_LB_DROP_FRAME_L 0x9678
658#define MPS_STAT_RX_BG_3_LB_DROP_FRAME_H 0x967c
659#define MPS_STAT_RX_BG_0_MAC_TRUNC_FRAME_L 0x9680
660#define MPS_STAT_RX_BG_0_MAC_TRUNC_FRAME_H 0x9684
661#define MPS_STAT_RX_BG_1_MAC_TRUNC_FRAME_L 0x9688
662#define MPS_STAT_RX_BG_1_MAC_TRUNC_FRAME_H 0x968c
663#define MPS_STAT_RX_BG_2_MAC_TRUNC_FRAME_L 0x9690
664#define MPS_STAT_RX_BG_2_MAC_TRUNC_FRAME_H 0x9694
665#define MPS_STAT_RX_BG_3_MAC_TRUNC_FRAME_L 0x9698
666#define MPS_STAT_RX_BG_3_MAC_TRUNC_FRAME_H 0x969c
667#define MPS_STAT_RX_BG_0_LB_TRUNC_FRAME_L 0x96a0
668#define MPS_STAT_RX_BG_0_LB_TRUNC_FRAME_H 0x96a4
669#define MPS_STAT_RX_BG_1_LB_TRUNC_FRAME_L 0x96a8
670#define MPS_STAT_RX_BG_1_LB_TRUNC_FRAME_H 0x96ac
671#define MPS_STAT_RX_BG_2_LB_TRUNC_FRAME_L 0x96b0
672#define MPS_STAT_RX_BG_2_LB_TRUNC_FRAME_H 0x96b4
673#define MPS_STAT_RX_BG_3_LB_TRUNC_FRAME_L 0x96b8
674#define MPS_STAT_RX_BG_3_LB_TRUNC_FRAME_H 0x96bc
675#define MPS_TRC_CFG 0x9800
676#define TRCFIFOEMPTY 0x00000010U
677#define TRCIGNOREDROPINPUT 0x00000008U
678#define TRCKEEPDUPLICATES 0x00000004U
679#define TRCEN 0x00000002U
680#define TRCMULTIFILTER 0x00000001U
681
682#define MPS_TRC_RSS_CONTROL 0x9808
683#define RSSCONTROL_MASK 0x00ff0000U
684#define RSSCONTROL_SHIFT 16
685#define RSSCONTROL(x) ((x) << RSSCONTROL_SHIFT)
686#define QUEUENUMBER_MASK 0x0000ffffU
687#define QUEUENUMBER_SHIFT 0
688#define QUEUENUMBER(x) ((x) << QUEUENUMBER_SHIFT)
689
690#define MPS_TRC_FILTER_MATCH_CTL_A 0x9810
691#define TFINVERTMATCH 0x01000000U
692#define TFPKTTOOLARGE 0x00800000U
693#define TFEN 0x00400000U
694#define TFPORT_MASK 0x003c0000U
695#define TFPORT_SHIFT 18
696#define TFPORT(x) ((x) << TFPORT_SHIFT)
697#define TFPORT_GET(x) (((x) & TFPORT_MASK) >> TFPORT_SHIFT)
698#define TFDROP 0x00020000U
699#define TFSOPEOPERR 0x00010000U
700#define TFLENGTH_MASK 0x00001f00U
701#define TFLENGTH_SHIFT 8
702#define TFLENGTH(x) ((x) << TFLENGTH_SHIFT)
703#define TFLENGTH_GET(x) (((x) & TFLENGTH_MASK) >> TFLENGTH_SHIFT)
704#define TFOFFSET_MASK 0x0000001fU
705#define TFOFFSET_SHIFT 0
706#define TFOFFSET(x) ((x) << TFOFFSET_SHIFT)
707#define TFOFFSET_GET(x) (((x) & TFOFFSET_MASK) >> TFOFFSET_SHIFT)
708
709#define MPS_TRC_FILTER_MATCH_CTL_B 0x9820
710#define TFMINPKTSIZE_MASK 0x01ff0000U
711#define TFMINPKTSIZE_SHIFT 16
712#define TFMINPKTSIZE(x) ((x) << TFMINPKTSIZE_SHIFT)
713#define TFMINPKTSIZE_GET(x) (((x) & TFMINPKTSIZE_MASK) >> TFMINPKTSIZE_SHIFT)
714#define TFCAPTUREMAX_MASK 0x00003fffU
715#define TFCAPTUREMAX_SHIFT 0
716#define TFCAPTUREMAX(x) ((x) << TFCAPTUREMAX_SHIFT)
717#define TFCAPTUREMAX_GET(x) (((x) & TFCAPTUREMAX_MASK) >> TFCAPTUREMAX_SHIFT)
718
719#define MPS_TRC_INT_CAUSE 0x985c
720#define MISCPERR 0x00000100U
721#define PKTFIFO 0x000000f0U
722#define FILTMEM 0x0000000fU
723
724#define MPS_TRC_FILTER0_MATCH 0x9c00
725#define MPS_TRC_FILTER0_DONT_CARE 0x9c80
726#define MPS_TRC_FILTER1_MATCH 0x9d00
727#define MPS_CLS_INT_CAUSE 0xd028
728#define PLERRENB 0x00000008U
729#define HASHSRAM 0x00000004U
730#define MATCHTCAM 0x00000002U
731#define MATCHSRAM 0x00000001U
732
733#define MPS_RX_PERR_INT_CAUSE 0x11074
734
735#define CPL_INTR_CAUSE 0x19054
736#define CIM_OP_MAP_PERR 0x00000020U
737#define CIM_OVFL_ERROR 0x00000010U
738#define TP_FRAMING_ERROR 0x00000008U
739#define SGE_FRAMING_ERROR 0x00000004U
740#define CIM_FRAMING_ERROR 0x00000002U
741#define ZERO_SWITCH_ERROR 0x00000001U
742
743#define SMB_INT_CAUSE 0x19090
744#define MSTTXFIFOPARINT 0x00200000U
745#define MSTRXFIFOPARINT 0x00100000U
746#define SLVFIFOPARINT 0x00080000U
747
748#define ULP_RX_INT_CAUSE 0x19158
749#define ULP_RX_ISCSI_TAGMASK 0x19164
750#define ULP_RX_ISCSI_PSZ 0x19168
751#define HPZ3_MASK 0x0f000000U
752#define HPZ3_SHIFT 24
753#define HPZ3(x) ((x) << HPZ3_SHIFT)
754#define HPZ2_MASK 0x000f0000U
755#define HPZ2_SHIFT 16
756#define HPZ2(x) ((x) << HPZ2_SHIFT)
757#define HPZ1_MASK 0x00000f00U
758#define HPZ1_SHIFT 8
759#define HPZ1(x) ((x) << HPZ1_SHIFT)
760#define HPZ0_MASK 0x0000000fU
761#define HPZ0_SHIFT 0
762#define HPZ0(x) ((x) << HPZ0_SHIFT)
763
764#define ULP_RX_TDDP_PSZ 0x19178
765
766#define SF_DATA 0x193f8
767#define SF_OP 0x193fc
768#define BUSY 0x80000000U
769#define SF_LOCK 0x00000010U
770#define SF_CONT 0x00000008U
771#define BYTECNT_MASK 0x00000006U
772#define BYTECNT_SHIFT 1
773#define BYTECNT(x) ((x) << BYTECNT_SHIFT)
774#define OP_WR 0x00000001U
775
776#define PL_PF_INT_CAUSE 0x3c0
777#define PFSW 0x00000008U
778#define PFSGE 0x00000004U
779#define PFCIM 0x00000002U
780#define PFMPS 0x00000001U
781
782#define PL_PF_INT_ENABLE 0x3c4
783#define PL_PF_CTL 0x3c8
784#define SWINT 0x00000001U
785
786#define PL_WHOAMI 0x19400
787#define SOURCEPF_MASK 0x00000700U
788#define SOURCEPF_SHIFT 8
789#define SOURCEPF(x) ((x) << SOURCEPF_SHIFT)
790#define SOURCEPF_GET(x) (((x) & SOURCEPF_MASK) >> SOURCEPF_SHIFT)
791#define ISVF 0x00000080U
792#define VFID_MASK 0x0000007fU
793#define VFID_SHIFT 0
794#define VFID(x) ((x) << VFID_SHIFT)
795#define VFID_GET(x) (((x) & VFID_MASK) >> VFID_SHIFT)
796
797#define PL_INT_CAUSE 0x1940c
798#define ULP_TX 0x08000000U
799#define SGE 0x04000000U
800#define HMA 0x02000000U
801#define CPL_SWITCH 0x01000000U
802#define ULP_RX 0x00800000U
803#define PM_RX 0x00400000U
804#define PM_TX 0x00200000U
805#define MA 0x00100000U
806#define TP 0x00080000U
807#define LE 0x00040000U
808#define EDC1 0x00020000U
809#define EDC0 0x00010000U
810#define MC 0x00008000U
811#define PCIE 0x00004000U
812#define PMU 0x00002000U
813#define XGMAC_KR1 0x00001000U
814#define XGMAC_KR0 0x00000800U
815#define XGMAC1 0x00000400U
816#define XGMAC0 0x00000200U
817#define SMB 0x00000100U
818#define SF 0x00000080U
819#define PL 0x00000040U
820#define NCSI 0x00000020U
821#define MPS 0x00000010U
822#define MI 0x00000008U
823#define DBG 0x00000004U
824#define I2CM 0x00000002U
825#define CIM 0x00000001U
826
827#define PL_INT_MAP0 0x19414
828#define PL_RST 0x19428
829#define PIORST 0x00000002U
830#define PIORSTMODE 0x00000001U
831
832#define PL_PL_INT_CAUSE 0x19430
833#define FATALPERR 0x00000010U
834#define PERRVFID 0x00000001U
835
836#define PL_REV 0x1943c
837
838#define LE_DB_CONFIG 0x19c04
839#define HASHEN 0x00100000U
840
841#define LE_DB_SERVER_INDEX 0x19c18
842#define LE_DB_ACT_CNT_IPV4 0x19c20
843#define LE_DB_ACT_CNT_IPV6 0x19c24
844
845#define LE_DB_INT_CAUSE 0x19c3c
846#define REQQPARERR 0x00010000U
847#define UNKNOWNCMD 0x00008000U
848#define PARITYERR 0x00000040U
849#define LIPMISS 0x00000020U
850#define LIP0 0x00000010U
851
852#define LE_DB_TID_HASHBASE 0x19df8
853
854#define NCSI_INT_CAUSE 0x1a0d8
855#define CIM_DM_PRTY_ERR 0x00000100U
856#define MPS_DM_PRTY_ERR 0x00000080U
857#define TXFIFO_PRTY_ERR 0x00000002U
858#define RXFIFO_PRTY_ERR 0x00000001U
859
860#define XGMAC_PORT_CFG2 0x1018
861#define PATEN 0x00040000U
862#define MAGICEN 0x00020000U
863
864#define XGMAC_PORT_MAGIC_MACID_LO 0x1024
865#define XGMAC_PORT_MAGIC_MACID_HI 0x1028
866
867#define XGMAC_PORT_EPIO_DATA0 0x10c0
868#define XGMAC_PORT_EPIO_DATA1 0x10c4
869#define XGMAC_PORT_EPIO_DATA2 0x10c8
870#define XGMAC_PORT_EPIO_DATA3 0x10cc
871#define XGMAC_PORT_EPIO_OP 0x10d0
872#define EPIOWR 0x00000100U
873#define ADDRESS_MASK 0x000000ffU
874#define ADDRESS_SHIFT 0
875#define ADDRESS(x) ((x) << ADDRESS_SHIFT)
876
877#define XGMAC_PORT_INT_CAUSE 0x10dc
878#endif /* __T4_REGS_H */
diff --git a/drivers/net/cxgb4/t4fw_api.h b/drivers/net/cxgb4/t4fw_api.h
new file mode 100644
index 000000000000..3393d05a388a
--- /dev/null
+++ b/drivers/net/cxgb4/t4fw_api.h
@@ -0,0 +1,1580 @@
1/*
2 * This file is part of the Chelsio T4 Ethernet driver for Linux.
3 *
4 * Copyright (c) 2009-2010 Chelsio Communications, Inc. All rights reserved.
5 *
6 * This software is available to you under a choice of one of two
7 * licenses. You may choose to be licensed under the terms of the GNU
8 * General Public License (GPL) Version 2, available from the file
9 * COPYING in the main directory of this source tree, or the
10 * OpenIB.org BSD license below:
11 *
12 * Redistribution and use in source and binary forms, with or
13 * without modification, are permitted provided that the following
14 * conditions are met:
15 *
16 * - Redistributions of source code must retain the above
17 * copyright notice, this list of conditions and the following
18 * disclaimer.
19 *
20 * - Redistributions in binary form must reproduce the above
21 * copyright notice, this list of conditions and the following
22 * disclaimer in the documentation and/or other materials
23 * provided with the distribution.
24 *
25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
32 * SOFTWARE.
33 */
34
35#ifndef _T4FW_INTERFACE_H_
36#define _T4FW_INTERFACE_H_
37
38#define FW_T4VF_SGE_BASE_ADDR 0x0000
39#define FW_T4VF_MPS_BASE_ADDR 0x0100
40#define FW_T4VF_PL_BASE_ADDR 0x0200
41#define FW_T4VF_MBDATA_BASE_ADDR 0x0240
42#define FW_T4VF_CIM_BASE_ADDR 0x0300
43
44enum fw_wr_opcodes {
45 FW_FILTER_WR = 0x02,
46 FW_ULPTX_WR = 0x04,
47 FW_TP_WR = 0x05,
48 FW_ETH_TX_PKT_WR = 0x08,
49 FW_FLOWC_WR = 0x0a,
50 FW_OFLD_TX_DATA_WR = 0x0b,
51 FW_CMD_WR = 0x10,
52 FW_ETH_TX_PKT_VM_WR = 0x11,
53 FW_RI_RES_WR = 0x0c,
54 FW_RI_INIT_WR = 0x0d,
55 FW_RI_RDMA_WRITE_WR = 0x14,
56 FW_RI_SEND_WR = 0x15,
57 FW_RI_RDMA_READ_WR = 0x16,
58 FW_RI_RECV_WR = 0x17,
59 FW_RI_BIND_MW_WR = 0x18,
60 FW_RI_FR_NSMR_WR = 0x19,
61 FW_RI_INV_LSTAG_WR = 0x1a,
62 FW_LASTC2E_WR = 0x40
63};
64
65struct fw_wr_hdr {
66 __be32 hi;
67 __be32 lo;
68};
69
70#define FW_WR_OP(x) ((x) << 24)
71#define FW_WR_ATOMIC(x) ((x) << 23)
72#define FW_WR_FLUSH(x) ((x) << 22)
73#define FW_WR_COMPL(x) ((x) << 21)
74#define FW_WR_IMMDLEN(x) ((x) << 0)
75
76#define FW_WR_EQUIQ (1U << 31)
77#define FW_WR_EQUEQ (1U << 30)
78#define FW_WR_FLOWID(x) ((x) << 8)
79#define FW_WR_LEN16(x) ((x) << 0)
80
81struct fw_ulptx_wr {
82 __be32 op_to_compl;
83 __be32 flowid_len16;
84 u64 cookie;
85};
86
87struct fw_tp_wr {
88 __be32 op_to_immdlen;
89 __be32 flowid_len16;
90 u64 cookie;
91};
92
93struct fw_eth_tx_pkt_wr {
94 __be32 op_immdlen;
95 __be32 equiq_to_len16;
96 __be64 r3;
97};
98
99enum fw_flowc_mnem {
100 FW_FLOWC_MNEM_PFNVFN, /* PFN [15:8] VFN [7:0] */
101 FW_FLOWC_MNEM_CH,
102 FW_FLOWC_MNEM_PORT,
103 FW_FLOWC_MNEM_IQID,
104 FW_FLOWC_MNEM_SNDNXT,
105 FW_FLOWC_MNEM_RCVNXT,
106 FW_FLOWC_MNEM_SNDBUF,
107 FW_FLOWC_MNEM_MSS,
108};
109
110struct fw_flowc_mnemval {
111 u8 mnemonic;
112 u8 r4[3];
113 __be32 val;
114};
115
116struct fw_flowc_wr {
117 __be32 op_to_nparams;
118#define FW_FLOWC_WR_NPARAMS(x) ((x) << 0)
119 __be32 flowid_len16;
120 struct fw_flowc_mnemval mnemval[0];
121};
122
123struct fw_ofld_tx_data_wr {
124 __be32 op_to_immdlen;
125 __be32 flowid_len16;
126 __be32 plen;
127 __be32 tunnel_to_proxy;
128#define FW_OFLD_TX_DATA_WR_TUNNEL(x) ((x) << 19)
129#define FW_OFLD_TX_DATA_WR_SAVE(x) ((x) << 18)
130#define FW_OFLD_TX_DATA_WR_FLUSH(x) ((x) << 17)
131#define FW_OFLD_TX_DATA_WR_URGENT(x) ((x) << 16)
132#define FW_OFLD_TX_DATA_WR_MORE(x) ((x) << 15)
133#define FW_OFLD_TX_DATA_WR_SHOVE(x) ((x) << 14)
134#define FW_OFLD_TX_DATA_WR_ULPMODE(x) ((x) << 10)
135#define FW_OFLD_TX_DATA_WR_ULPSUBMODE(x) ((x) << 6)
136};
137
138struct fw_cmd_wr {
139 __be32 op_dma;
140#define FW_CMD_WR_DMA (1U << 17)
141 __be32 len16_pkd;
142 __be64 cookie_daddr;
143};
144
145struct fw_eth_tx_pkt_vm_wr {
146 __be32 op_immdlen;
147 __be32 equiq_to_len16;
148 __be32 r3[2];
149 u8 ethmacdst[6];
150 u8 ethmacsrc[6];
151 __be16 ethtype;
152 __be16 vlantci;
153};
154
155#define FW_CMD_MAX_TIMEOUT 3000
156
157enum fw_cmd_opcodes {
158 FW_LDST_CMD = 0x01,
159 FW_RESET_CMD = 0x03,
160 FW_HELLO_CMD = 0x04,
161 FW_BYE_CMD = 0x05,
162 FW_INITIALIZE_CMD = 0x06,
163 FW_CAPS_CONFIG_CMD = 0x07,
164 FW_PARAMS_CMD = 0x08,
165 FW_PFVF_CMD = 0x09,
166 FW_IQ_CMD = 0x10,
167 FW_EQ_MNGT_CMD = 0x11,
168 FW_EQ_ETH_CMD = 0x12,
169 FW_EQ_CTRL_CMD = 0x13,
170 FW_EQ_OFLD_CMD = 0x21,
171 FW_VI_CMD = 0x14,
172 FW_VI_MAC_CMD = 0x15,
173 FW_VI_RXMODE_CMD = 0x16,
174 FW_VI_ENABLE_CMD = 0x17,
175 FW_ACL_MAC_CMD = 0x18,
176 FW_ACL_VLAN_CMD = 0x19,
177 FW_VI_STATS_CMD = 0x1a,
178 FW_PORT_CMD = 0x1b,
179 FW_PORT_STATS_CMD = 0x1c,
180 FW_PORT_LB_STATS_CMD = 0x1d,
181 FW_PORT_TRACE_CMD = 0x1e,
182 FW_PORT_TRACE_MMAP_CMD = 0x1f,
183 FW_RSS_IND_TBL_CMD = 0x20,
184 FW_RSS_GLB_CONFIG_CMD = 0x22,
185 FW_RSS_VI_CONFIG_CMD = 0x23,
186 FW_LASTC2E_CMD = 0x40,
187 FW_ERROR_CMD = 0x80,
188 FW_DEBUG_CMD = 0x81,
189};
190
191enum fw_cmd_cap {
192 FW_CMD_CAP_PF = 0x01,
193 FW_CMD_CAP_DMAQ = 0x02,
194 FW_CMD_CAP_PORT = 0x04,
195 FW_CMD_CAP_PORTPROMISC = 0x08,
196 FW_CMD_CAP_PORTSTATS = 0x10,
197 FW_CMD_CAP_VF = 0x80,
198};
199
200/*
201 * Generic command header flit0
202 */
203struct fw_cmd_hdr {
204 __be32 hi;
205 __be32 lo;
206};
207
208#define FW_CMD_OP(x) ((x) << 24)
209#define FW_CMD_OP_GET(x) (((x) >> 24) & 0xff)
210#define FW_CMD_REQUEST (1U << 23)
211#define FW_CMD_READ (1U << 22)
212#define FW_CMD_WRITE (1U << 21)
213#define FW_CMD_EXEC (1U << 20)
214#define FW_CMD_RAMASK(x) ((x) << 20)
215#define FW_CMD_RETVAL(x) ((x) << 8)
216#define FW_CMD_RETVAL_GET(x) (((x) >> 8) & 0xff)
217#define FW_CMD_LEN16(x) ((x) << 0)
218
219enum fw_ldst_addrspc {
220 FW_LDST_ADDRSPC_FIRMWARE = 0x0001,
221 FW_LDST_ADDRSPC_SGE_EGRC = 0x0008,
222 FW_LDST_ADDRSPC_SGE_INGC = 0x0009,
223 FW_LDST_ADDRSPC_SGE_FLMC = 0x000a,
224 FW_LDST_ADDRSPC_SGE_CONMC = 0x000b,
225 FW_LDST_ADDRSPC_TP_PIO = 0x0010,
226 FW_LDST_ADDRSPC_TP_TM_PIO = 0x0011,
227 FW_LDST_ADDRSPC_TP_MIB = 0x0012,
228 FW_LDST_ADDRSPC_MDIO = 0x0018,
229 FW_LDST_ADDRSPC_MPS = 0x0020,
230 FW_LDST_ADDRSPC_FUNC = 0x0028
231};
232
233enum fw_ldst_mps_fid {
234 FW_LDST_MPS_ATRB,
235 FW_LDST_MPS_RPLC
236};
237
238enum fw_ldst_func_access_ctl {
239 FW_LDST_FUNC_ACC_CTL_VIID,
240 FW_LDST_FUNC_ACC_CTL_FID
241};
242
243enum fw_ldst_func_mod_index {
244 FW_LDST_FUNC_MPS
245};
246
247struct fw_ldst_cmd {
248 __be32 op_to_addrspace;
249#define FW_LDST_CMD_ADDRSPACE(x) ((x) << 0)
250 __be32 cycles_to_len16;
251 union fw_ldst {
252 struct fw_ldst_addrval {
253 __be32 addr;
254 __be32 val;
255 } addrval;
256 struct fw_ldst_idctxt {
257 __be32 physid;
258 __be32 msg_pkd;
259 __be32 ctxt_data7;
260 __be32 ctxt_data6;
261 __be32 ctxt_data5;
262 __be32 ctxt_data4;
263 __be32 ctxt_data3;
264 __be32 ctxt_data2;
265 __be32 ctxt_data1;
266 __be32 ctxt_data0;
267 } idctxt;
268 struct fw_ldst_mdio {
269 __be16 paddr_mmd;
270 __be16 raddr;
271 __be16 vctl;
272 __be16 rval;
273 } mdio;
274 struct fw_ldst_mps {
275 __be16 fid_ctl;
276 __be16 rplcpf_pkd;
277 __be32 rplc127_96;
278 __be32 rplc95_64;
279 __be32 rplc63_32;
280 __be32 rplc31_0;
281 __be32 atrb;
282 __be16 vlan[16];
283 } mps;
284 struct fw_ldst_func {
285 u8 access_ctl;
286 u8 mod_index;
287 __be16 ctl_id;
288 __be32 offset;
289 __be64 data0;
290 __be64 data1;
291 } func;
292 } u;
293};
294
295#define FW_LDST_CMD_MSG(x) ((x) << 31)
296#define FW_LDST_CMD_PADDR(x) ((x) << 8)
297#define FW_LDST_CMD_MMD(x) ((x) << 0)
298#define FW_LDST_CMD_FID(x) ((x) << 15)
299#define FW_LDST_CMD_CTL(x) ((x) << 0)
300#define FW_LDST_CMD_RPLCPF(x) ((x) << 0)
301
302struct fw_reset_cmd {
303 __be32 op_to_write;
304 __be32 retval_len16;
305 __be32 val;
306 __be32 r3;
307};
308
309struct fw_hello_cmd {
310 __be32 op_to_write;
311 __be32 retval_len16;
312 __be32 err_to_mbasyncnot;
313#define FW_HELLO_CMD_ERR (1U << 31)
314#define FW_HELLO_CMD_INIT (1U << 30)
315#define FW_HELLO_CMD_MASTERDIS(x) ((x) << 29)
316#define FW_HELLO_CMD_MASTERFORCE(x) ((x) << 28)
317#define FW_HELLO_CMD_MBMASTER(x) ((x) << 24)
318#define FW_HELLO_CMD_MBASYNCNOT(x) ((x) << 20)
319 __be32 fwrev;
320};
321
322struct fw_bye_cmd {
323 __be32 op_to_write;
324 __be32 retval_len16;
325 __be64 r3;
326};
327
328struct fw_initialize_cmd {
329 __be32 op_to_write;
330 __be32 retval_len16;
331 __be64 r3;
332};
333
334enum fw_caps_config_hm {
335 FW_CAPS_CONFIG_HM_PCIE = 0x00000001,
336 FW_CAPS_CONFIG_HM_PL = 0x00000002,
337 FW_CAPS_CONFIG_HM_SGE = 0x00000004,
338 FW_CAPS_CONFIG_HM_CIM = 0x00000008,
339 FW_CAPS_CONFIG_HM_ULPTX = 0x00000010,
340 FW_CAPS_CONFIG_HM_TP = 0x00000020,
341 FW_CAPS_CONFIG_HM_ULPRX = 0x00000040,
342 FW_CAPS_CONFIG_HM_PMRX = 0x00000080,
343 FW_CAPS_CONFIG_HM_PMTX = 0x00000100,
344 FW_CAPS_CONFIG_HM_MC = 0x00000200,
345 FW_CAPS_CONFIG_HM_LE = 0x00000400,
346 FW_CAPS_CONFIG_HM_MPS = 0x00000800,
347 FW_CAPS_CONFIG_HM_XGMAC = 0x00001000,
348 FW_CAPS_CONFIG_HM_CPLSWITCH = 0x00002000,
349 FW_CAPS_CONFIG_HM_T4DBG = 0x00004000,
350 FW_CAPS_CONFIG_HM_MI = 0x00008000,
351 FW_CAPS_CONFIG_HM_I2CM = 0x00010000,
352 FW_CAPS_CONFIG_HM_NCSI = 0x00020000,
353 FW_CAPS_CONFIG_HM_SMB = 0x00040000,
354 FW_CAPS_CONFIG_HM_MA = 0x00080000,
355 FW_CAPS_CONFIG_HM_EDRAM = 0x00100000,
356 FW_CAPS_CONFIG_HM_PMU = 0x00200000,
357 FW_CAPS_CONFIG_HM_UART = 0x00400000,
358 FW_CAPS_CONFIG_HM_SF = 0x00800000,
359};
360
361enum fw_caps_config_nbm {
362 FW_CAPS_CONFIG_NBM_IPMI = 0x00000001,
363 FW_CAPS_CONFIG_NBM_NCSI = 0x00000002,
364};
365
366enum fw_caps_config_link {
367 FW_CAPS_CONFIG_LINK_PPP = 0x00000001,
368 FW_CAPS_CONFIG_LINK_QFC = 0x00000002,
369 FW_CAPS_CONFIG_LINK_DCBX = 0x00000004,
370};
371
372enum fw_caps_config_switch {
373 FW_CAPS_CONFIG_SWITCH_INGRESS = 0x00000001,
374 FW_CAPS_CONFIG_SWITCH_EGRESS = 0x00000002,
375};
376
377enum fw_caps_config_nic {
378 FW_CAPS_CONFIG_NIC = 0x00000001,
379 FW_CAPS_CONFIG_NIC_VM = 0x00000002,
380};
381
382enum fw_caps_config_ofld {
383 FW_CAPS_CONFIG_OFLD = 0x00000001,
384};
385
386enum fw_caps_config_rdma {
387 FW_CAPS_CONFIG_RDMA_RDDP = 0x00000001,
388 FW_CAPS_CONFIG_RDMA_RDMAC = 0x00000002,
389};
390
391enum fw_caps_config_iscsi {
392 FW_CAPS_CONFIG_ISCSI_INITIATOR_PDU = 0x00000001,
393 FW_CAPS_CONFIG_ISCSI_TARGET_PDU = 0x00000002,
394 FW_CAPS_CONFIG_ISCSI_INITIATOR_CNXOFLD = 0x00000004,
395 FW_CAPS_CONFIG_ISCSI_TARGET_CNXOFLD = 0x00000008,
396};
397
398enum fw_caps_config_fcoe {
399 FW_CAPS_CONFIG_FCOE_INITIATOR = 0x00000001,
400 FW_CAPS_CONFIG_FCOE_TARGET = 0x00000002,
401};
402
403struct fw_caps_config_cmd {
404 __be32 op_to_write;
405 __be32 retval_len16;
406 __be32 r2;
407 __be32 hwmbitmap;
408 __be16 nbmcaps;
409 __be16 linkcaps;
410 __be16 switchcaps;
411 __be16 r3;
412 __be16 niccaps;
413 __be16 ofldcaps;
414 __be16 rdmacaps;
415 __be16 r4;
416 __be16 iscsicaps;
417 __be16 fcoecaps;
418 __be32 r5;
419 __be64 r6;
420};
421
422/*
423 * params command mnemonics
424 */
425enum fw_params_mnem {
426 FW_PARAMS_MNEM_DEV = 1, /* device params */
427 FW_PARAMS_MNEM_PFVF = 2, /* function params */
428 FW_PARAMS_MNEM_REG = 3, /* limited register access */
429 FW_PARAMS_MNEM_DMAQ = 4, /* dma queue params */
430 FW_PARAMS_MNEM_LAST
431};
432
433/*
434 * device parameters
435 */
436enum fw_params_param_dev {
437 FW_PARAMS_PARAM_DEV_CCLK = 0x00, /* chip core clock in khz */
438 FW_PARAMS_PARAM_DEV_PORTVEC = 0x01, /* the port vector */
439 FW_PARAMS_PARAM_DEV_NTID = 0x02, /* reads the number of TIDs
440 * allocated by the device's
441 * Lookup Engine
442 */
443 FW_PARAMS_PARAM_DEV_FLOWC_BUFFIFO_SZ = 0x03,
444 FW_PARAMS_PARAM_DEV_INTVER_NIC = 0x04,
445 FW_PARAMS_PARAM_DEV_INTVER_VNIC = 0x05,
446 FW_PARAMS_PARAM_DEV_INTVER_OFLD = 0x06,
447 FW_PARAMS_PARAM_DEV_INTVER_RI = 0x07,
448 FW_PARAMS_PARAM_DEV_INTVER_ISCSIPDU = 0x08,
449 FW_PARAMS_PARAM_DEV_INTVER_ISCSI = 0x09,
450 FW_PARAMS_PARAM_DEV_INTVER_FCOE = 0x0A
451};
452
453/*
454 * physical and virtual function parameters
455 */
456enum fw_params_param_pfvf {
457 FW_PARAMS_PARAM_PFVF_RWXCAPS = 0x00,
458 FW_PARAMS_PARAM_PFVF_ROUTE_START = 0x01,
459 FW_PARAMS_PARAM_PFVF_ROUTE_END = 0x02,
460 FW_PARAMS_PARAM_PFVF_CLIP_START = 0x03,
461 FW_PARAMS_PARAM_PFVF_CLIP_END = 0x04,
462 FW_PARAMS_PARAM_PFVF_FILTER_START = 0x05,
463 FW_PARAMS_PARAM_PFVF_FILTER_END = 0x06,
464 FW_PARAMS_PARAM_PFVF_SERVER_START = 0x07,
465 FW_PARAMS_PARAM_PFVF_SERVER_END = 0x08,
466 FW_PARAMS_PARAM_PFVF_TDDP_START = 0x09,
467 FW_PARAMS_PARAM_PFVF_TDDP_END = 0x0A,
468 FW_PARAMS_PARAM_PFVF_ISCSI_START = 0x0B,
469 FW_PARAMS_PARAM_PFVF_ISCSI_END = 0x0C,
470 FW_PARAMS_PARAM_PFVF_STAG_START = 0x0D,
471 FW_PARAMS_PARAM_PFVF_STAG_END = 0x0E,
472 FW_PARAMS_PARAM_PFVF_RQ_START = 0x1F,
473 FW_PARAMS_PARAM_PFVF_RQ_END = 0x10,
474 FW_PARAMS_PARAM_PFVF_PBL_START = 0x11,
475 FW_PARAMS_PARAM_PFVF_PBL_END = 0x12,
476 FW_PARAMS_PARAM_PFVF_L2T_START = 0x13,
477 FW_PARAMS_PARAM_PFVF_L2T_END = 0x14,
478 FW_PARAMS_PARAM_PFVF_SCHEDCLASS_ETH = 0x20,
479};
480
481/*
482 * dma queue parameters
483 */
484enum fw_params_param_dmaq {
485 FW_PARAMS_PARAM_DMAQ_IQ_DCAEN_DCACPU = 0x00,
486 FW_PARAMS_PARAM_DMAQ_IQ_INTCNTTHRESH = 0x01,
487 FW_PARAMS_PARAM_DMAQ_EQ_CMPLIQID_MNGT = 0x10,
488 FW_PARAMS_PARAM_DMAQ_EQ_CMPLIQID_CTRL = 0x11,
489 FW_PARAMS_PARAM_DMAQ_EQ_SCHEDCLASS_ETH = 0x12,
490};
491
492#define FW_PARAMS_MNEM(x) ((x) << 24)
493#define FW_PARAMS_PARAM_X(x) ((x) << 16)
494#define FW_PARAMS_PARAM_Y(x) ((x) << 8)
495#define FW_PARAMS_PARAM_Z(x) ((x) << 0)
496#define FW_PARAMS_PARAM_XYZ(x) ((x) << 0)
497#define FW_PARAMS_PARAM_YZ(x) ((x) << 0)
498
499struct fw_params_cmd {
500 __be32 op_to_vfn;
501 __be32 retval_len16;
502 struct fw_params_param {
503 __be32 mnem;
504 __be32 val;
505 } param[7];
506};
507
508#define FW_PARAMS_CMD_PFN(x) ((x) << 8)
509#define FW_PARAMS_CMD_VFN(x) ((x) << 0)
510
511struct fw_pfvf_cmd {
512 __be32 op_to_vfn;
513 __be32 retval_len16;
514 __be32 niqflint_niq;
515 __be32 cmask_to_neq;
516 __be32 tc_to_nexactf;
517 __be32 r_caps_to_nethctrl;
518 __be16 nricq;
519 __be16 nriqp;
520 __be32 r4;
521};
522
523#define FW_PFVF_CMD_PFN(x) ((x) << 8)
524#define FW_PFVF_CMD_VFN(x) ((x) << 0)
525
526#define FW_PFVF_CMD_NIQFLINT(x) ((x) << 20)
527#define FW_PFVF_CMD_NIQFLINT_GET(x) (((x) >> 20) & 0xfff)
528
529#define FW_PFVF_CMD_NIQ(x) ((x) << 0)
530#define FW_PFVF_CMD_NIQ_GET(x) (((x) >> 0) & 0xfffff)
531
532#define FW_PFVF_CMD_CMASK(x) ((x) << 24)
533#define FW_PFVF_CMD_CMASK_GET(x) (((x) >> 24) & 0xf)
534
535#define FW_PFVF_CMD_PMASK(x) ((x) << 20)
536#define FW_PFVF_CMD_PMASK_GET(x) (((x) >> 20) & 0xf)
537
538#define FW_PFVF_CMD_NEQ(x) ((x) << 0)
539#define FW_PFVF_CMD_NEQ_GET(x) (((x) >> 0) & 0xfffff)
540
541#define FW_PFVF_CMD_TC(x) ((x) << 24)
542#define FW_PFVF_CMD_TC_GET(x) (((x) >> 24) & 0xff)
543
544#define FW_PFVF_CMD_NVI(x) ((x) << 16)
545#define FW_PFVF_CMD_NVI_GET(x) (((x) >> 16) & 0xff)
546
547#define FW_PFVF_CMD_NEXACTF(x) ((x) << 0)
548#define FW_PFVF_CMD_NEXACTF_GET(x) (((x) >> 0) & 0xffff)
549
550#define FW_PFVF_CMD_R_CAPS(x) ((x) << 24)
551#define FW_PFVF_CMD_R_CAPS_GET(x) (((x) >> 24) & 0xff)
552
553#define FW_PFVF_CMD_WX_CAPS(x) ((x) << 16)
554#define FW_PFVF_CMD_WX_CAPS_GET(x) (((x) >> 16) & 0xff)
555
556#define FW_PFVF_CMD_NETHCTRL(x) ((x) << 0)
557#define FW_PFVF_CMD_NETHCTRL_GET(x) (((x) >> 0) & 0xffff)
558
559enum fw_iq_type {
560 FW_IQ_TYPE_FL_INT_CAP,
561 FW_IQ_TYPE_NO_FL_INT_CAP
562};
563
564struct fw_iq_cmd {
565 __be32 op_to_vfn;
566 __be32 alloc_to_len16;
567 __be16 physiqid;
568 __be16 iqid;
569 __be16 fl0id;
570 __be16 fl1id;
571 __be32 type_to_iqandstindex;
572 __be16 iqdroprss_to_iqesize;
573 __be16 iqsize;
574 __be64 iqaddr;
575 __be32 iqns_to_fl0congen;
576 __be16 fl0dcaen_to_fl0cidxfthresh;
577 __be16 fl0size;
578 __be64 fl0addr;
579 __be32 fl1cngchmap_to_fl1congen;
580 __be16 fl1dcaen_to_fl1cidxfthresh;
581 __be16 fl1size;
582 __be64 fl1addr;
583};
584
585#define FW_IQ_CMD_PFN(x) ((x) << 8)
586#define FW_IQ_CMD_VFN(x) ((x) << 0)
587
588#define FW_IQ_CMD_ALLOC (1U << 31)
589#define FW_IQ_CMD_FREE (1U << 30)
590#define FW_IQ_CMD_MODIFY (1U << 29)
591#define FW_IQ_CMD_IQSTART(x) ((x) << 28)
592#define FW_IQ_CMD_IQSTOP(x) ((x) << 27)
593
594#define FW_IQ_CMD_TYPE(x) ((x) << 29)
595#define FW_IQ_CMD_IQASYNCH(x) ((x) << 28)
596#define FW_IQ_CMD_VIID(x) ((x) << 16)
597#define FW_IQ_CMD_IQANDST(x) ((x) << 15)
598#define FW_IQ_CMD_IQANUS(x) ((x) << 14)
599#define FW_IQ_CMD_IQANUD(x) ((x) << 12)
600#define FW_IQ_CMD_IQANDSTINDEX(x) ((x) << 0)
601
602#define FW_IQ_CMD_IQDROPRSS (1U << 15)
603#define FW_IQ_CMD_IQGTSMODE (1U << 14)
604#define FW_IQ_CMD_IQPCIECH(x) ((x) << 12)
605#define FW_IQ_CMD_IQDCAEN(x) ((x) << 11)
606#define FW_IQ_CMD_IQDCACPU(x) ((x) << 6)
607#define FW_IQ_CMD_IQINTCNTTHRESH(x) ((x) << 4)
608#define FW_IQ_CMD_IQO (1U << 3)
609#define FW_IQ_CMD_IQCPRIO(x) ((x) << 2)
610#define FW_IQ_CMD_IQESIZE(x) ((x) << 0)
611
612#define FW_IQ_CMD_IQNS(x) ((x) << 31)
613#define FW_IQ_CMD_IQRO(x) ((x) << 30)
614#define FW_IQ_CMD_IQFLINTIQHSEN(x) ((x) << 28)
615#define FW_IQ_CMD_IQFLINTCONGEN(x) ((x) << 27)
616#define FW_IQ_CMD_IQFLINTISCSIC(x) ((x) << 26)
617#define FW_IQ_CMD_FL0CNGCHMAP(x) ((x) << 20)
618#define FW_IQ_CMD_FL0CACHELOCK(x) ((x) << 15)
619#define FW_IQ_CMD_FL0DBP(x) ((x) << 14)
620#define FW_IQ_CMD_FL0DATANS(x) ((x) << 13)
621#define FW_IQ_CMD_FL0DATARO(x) ((x) << 12)
622#define FW_IQ_CMD_FL0CONGCIF(x) ((x) << 11)
623#define FW_IQ_CMD_FL0ONCHIP(x) ((x) << 10)
624#define FW_IQ_CMD_FL0STATUSPGNS(x) ((x) << 9)
625#define FW_IQ_CMD_FL0STATUSPGRO(x) ((x) << 8)
626#define FW_IQ_CMD_FL0FETCHNS(x) ((x) << 7)
627#define FW_IQ_CMD_FL0FETCHRO(x) ((x) << 6)
628#define FW_IQ_CMD_FL0HOSTFCMODE(x) ((x) << 4)
629#define FW_IQ_CMD_FL0CPRIO(x) ((x) << 3)
630#define FW_IQ_CMD_FL0PADEN (1U << 2)
631#define FW_IQ_CMD_FL0PACKEN (1U << 1)
632#define FW_IQ_CMD_FL0CONGEN (1U << 0)
633
634#define FW_IQ_CMD_FL0DCAEN(x) ((x) << 15)
635#define FW_IQ_CMD_FL0DCACPU(x) ((x) << 10)
636#define FW_IQ_CMD_FL0FBMIN(x) ((x) << 7)
637#define FW_IQ_CMD_FL0FBMAX(x) ((x) << 4)
638#define FW_IQ_CMD_FL0CIDXFTHRESHO (1U << 3)
639#define FW_IQ_CMD_FL0CIDXFTHRESH(x) ((x) << 0)
640
641#define FW_IQ_CMD_FL1CNGCHMAP(x) ((x) << 20)
642#define FW_IQ_CMD_FL1CACHELOCK(x) ((x) << 15)
643#define FW_IQ_CMD_FL1DBP(x) ((x) << 14)
644#define FW_IQ_CMD_FL1DATANS(x) ((x) << 13)
645#define FW_IQ_CMD_FL1DATARO(x) ((x) << 12)
646#define FW_IQ_CMD_FL1CONGCIF(x) ((x) << 11)
647#define FW_IQ_CMD_FL1ONCHIP(x) ((x) << 10)
648#define FW_IQ_CMD_FL1STATUSPGNS(x) ((x) << 9)
649#define FW_IQ_CMD_FL1STATUSPGRO(x) ((x) << 8)
650#define FW_IQ_CMD_FL1FETCHNS(x) ((x) << 7)
651#define FW_IQ_CMD_FL1FETCHRO(x) ((x) << 6)
652#define FW_IQ_CMD_FL1HOSTFCMODE(x) ((x) << 4)
653#define FW_IQ_CMD_FL1CPRIO(x) ((x) << 3)
654#define FW_IQ_CMD_FL1PADEN (1U << 2)
655#define FW_IQ_CMD_FL1PACKEN (1U << 1)
656#define FW_IQ_CMD_FL1CONGEN (1U << 0)
657
658#define FW_IQ_CMD_FL1DCAEN(x) ((x) << 15)
659#define FW_IQ_CMD_FL1DCACPU(x) ((x) << 10)
660#define FW_IQ_CMD_FL1FBMIN(x) ((x) << 7)
661#define FW_IQ_CMD_FL1FBMAX(x) ((x) << 4)
662#define FW_IQ_CMD_FL1CIDXFTHRESHO (1U << 3)
663#define FW_IQ_CMD_FL1CIDXFTHRESH(x) ((x) << 0)
664
665struct fw_eq_eth_cmd {
666 __be32 op_to_vfn;
667 __be32 alloc_to_len16;
668 __be32 eqid_pkd;
669 __be32 physeqid_pkd;
670 __be32 fetchszm_to_iqid;
671 __be32 dcaen_to_eqsize;
672 __be64 eqaddr;
673 __be32 viid_pkd;
674 __be32 r8_lo;
675 __be64 r9;
676};
677
678#define FW_EQ_ETH_CMD_PFN(x) ((x) << 8)
679#define FW_EQ_ETH_CMD_VFN(x) ((x) << 0)
680#define FW_EQ_ETH_CMD_ALLOC (1U << 31)
681#define FW_EQ_ETH_CMD_FREE (1U << 30)
682#define FW_EQ_ETH_CMD_MODIFY (1U << 29)
683#define FW_EQ_ETH_CMD_EQSTART (1U << 28)
684#define FW_EQ_ETH_CMD_EQSTOP (1U << 27)
685
686#define FW_EQ_ETH_CMD_EQID(x) ((x) << 0)
687#define FW_EQ_ETH_CMD_EQID_GET(x) (((x) >> 0) & 0xfffff)
688#define FW_EQ_ETH_CMD_PHYSEQID(x) ((x) << 0)
689
690#define FW_EQ_ETH_CMD_FETCHSZM(x) ((x) << 26)
691#define FW_EQ_ETH_CMD_STATUSPGNS(x) ((x) << 25)
692#define FW_EQ_ETH_CMD_STATUSPGRO(x) ((x) << 24)
693#define FW_EQ_ETH_CMD_FETCHNS(x) ((x) << 23)
694#define FW_EQ_ETH_CMD_FETCHRO(x) ((x) << 22)
695#define FW_EQ_ETH_CMD_HOSTFCMODE(x) ((x) << 20)
696#define FW_EQ_ETH_CMD_CPRIO(x) ((x) << 19)
697#define FW_EQ_ETH_CMD_ONCHIP(x) ((x) << 18)
698#define FW_EQ_ETH_CMD_PCIECHN(x) ((x) << 16)
699#define FW_EQ_ETH_CMD_IQID(x) ((x) << 0)
700
701#define FW_EQ_ETH_CMD_DCAEN(x) ((x) << 31)
702#define FW_EQ_ETH_CMD_DCACPU(x) ((x) << 26)
703#define FW_EQ_ETH_CMD_FBMIN(x) ((x) << 23)
704#define FW_EQ_ETH_CMD_FBMAX(x) ((x) << 20)
705#define FW_EQ_ETH_CMD_CIDXFTHRESHO(x) ((x) << 19)
706#define FW_EQ_ETH_CMD_CIDXFTHRESH(x) ((x) << 16)
707#define FW_EQ_ETH_CMD_EQSIZE(x) ((x) << 0)
708
709#define FW_EQ_ETH_CMD_VIID(x) ((x) << 16)
710
711struct fw_eq_ctrl_cmd {
712 __be32 op_to_vfn;
713 __be32 alloc_to_len16;
714 __be32 cmpliqid_eqid;
715 __be32 physeqid_pkd;
716 __be32 fetchszm_to_iqid;
717 __be32 dcaen_to_eqsize;
718 __be64 eqaddr;
719};
720
721#define FW_EQ_CTRL_CMD_PFN(x) ((x) << 8)
722#define FW_EQ_CTRL_CMD_VFN(x) ((x) << 0)
723
724#define FW_EQ_CTRL_CMD_ALLOC (1U << 31)
725#define FW_EQ_CTRL_CMD_FREE (1U << 30)
726#define FW_EQ_CTRL_CMD_MODIFY (1U << 29)
727#define FW_EQ_CTRL_CMD_EQSTART (1U << 28)
728#define FW_EQ_CTRL_CMD_EQSTOP (1U << 27)
729
730#define FW_EQ_CTRL_CMD_CMPLIQID(x) ((x) << 20)
731#define FW_EQ_CTRL_CMD_EQID(x) ((x) << 0)
732#define FW_EQ_CTRL_CMD_EQID_GET(x) (((x) >> 0) & 0xfffff)
733#define FW_EQ_CTRL_CMD_PHYSEQID_GET(x) (((x) >> 0) & 0xfffff)
734
735#define FW_EQ_CTRL_CMD_FETCHSZM (1U << 26)
736#define FW_EQ_CTRL_CMD_STATUSPGNS (1U << 25)
737#define FW_EQ_CTRL_CMD_STATUSPGRO (1U << 24)
738#define FW_EQ_CTRL_CMD_FETCHNS (1U << 23)
739#define FW_EQ_CTRL_CMD_FETCHRO (1U << 22)
740#define FW_EQ_CTRL_CMD_HOSTFCMODE(x) ((x) << 20)
741#define FW_EQ_CTRL_CMD_CPRIO(x) ((x) << 19)
742#define FW_EQ_CTRL_CMD_ONCHIP(x) ((x) << 18)
743#define FW_EQ_CTRL_CMD_PCIECHN(x) ((x) << 16)
744#define FW_EQ_CTRL_CMD_IQID(x) ((x) << 0)
745
746#define FW_EQ_CTRL_CMD_DCAEN(x) ((x) << 31)
747#define FW_EQ_CTRL_CMD_DCACPU(x) ((x) << 26)
748#define FW_EQ_CTRL_CMD_FBMIN(x) ((x) << 23)
749#define FW_EQ_CTRL_CMD_FBMAX(x) ((x) << 20)
750#define FW_EQ_CTRL_CMD_CIDXFTHRESHO(x) ((x) << 19)
751#define FW_EQ_CTRL_CMD_CIDXFTHRESH(x) ((x) << 16)
752#define FW_EQ_CTRL_CMD_EQSIZE(x) ((x) << 0)
753
754struct fw_eq_ofld_cmd {
755 __be32 op_to_vfn;
756 __be32 alloc_to_len16;
757 __be32 eqid_pkd;
758 __be32 physeqid_pkd;
759 __be32 fetchszm_to_iqid;
760 __be32 dcaen_to_eqsize;
761 __be64 eqaddr;
762};
763
764#define FW_EQ_OFLD_CMD_PFN(x) ((x) << 8)
765#define FW_EQ_OFLD_CMD_VFN(x) ((x) << 0)
766
767#define FW_EQ_OFLD_CMD_ALLOC (1U << 31)
768#define FW_EQ_OFLD_CMD_FREE (1U << 30)
769#define FW_EQ_OFLD_CMD_MODIFY (1U << 29)
770#define FW_EQ_OFLD_CMD_EQSTART (1U << 28)
771#define FW_EQ_OFLD_CMD_EQSTOP (1U << 27)
772
773#define FW_EQ_OFLD_CMD_EQID(x) ((x) << 0)
774#define FW_EQ_OFLD_CMD_EQID_GET(x) (((x) >> 0) & 0xfffff)
775#define FW_EQ_OFLD_CMD_PHYSEQID_GET(x) (((x) >> 0) & 0xfffff)
776
777#define FW_EQ_OFLD_CMD_FETCHSZM(x) ((x) << 26)
778#define FW_EQ_OFLD_CMD_STATUSPGNS(x) ((x) << 25)
779#define FW_EQ_OFLD_CMD_STATUSPGRO(x) ((x) << 24)
780#define FW_EQ_OFLD_CMD_FETCHNS(x) ((x) << 23)
781#define FW_EQ_OFLD_CMD_FETCHRO(x) ((x) << 22)
782#define FW_EQ_OFLD_CMD_HOSTFCMODE(x) ((x) << 20)
783#define FW_EQ_OFLD_CMD_CPRIO(x) ((x) << 19)
784#define FW_EQ_OFLD_CMD_ONCHIP(x) ((x) << 18)
785#define FW_EQ_OFLD_CMD_PCIECHN(x) ((x) << 16)
786#define FW_EQ_OFLD_CMD_IQID(x) ((x) << 0)
787
788#define FW_EQ_OFLD_CMD_DCAEN(x) ((x) << 31)
789#define FW_EQ_OFLD_CMD_DCACPU(x) ((x) << 26)
790#define FW_EQ_OFLD_CMD_FBMIN(x) ((x) << 23)
791#define FW_EQ_OFLD_CMD_FBMAX(x) ((x) << 20)
792#define FW_EQ_OFLD_CMD_CIDXFTHRESHO(x) ((x) << 19)
793#define FW_EQ_OFLD_CMD_CIDXFTHRESH(x) ((x) << 16)
794#define FW_EQ_OFLD_CMD_EQSIZE(x) ((x) << 0)
795
796/*
797 * Macros for VIID parsing:
798 * VIID - [10:8] PFN, [7] VI Valid, [6:0] VI number
799 */
800#define FW_VIID_PFN_GET(x) (((x) >> 8) & 0x7)
801#define FW_VIID_VIVLD_GET(x) (((x) >> 7) & 0x1)
802#define FW_VIID_VIN_GET(x) (((x) >> 0) & 0x7F)
803
804struct fw_vi_cmd {
805 __be32 op_to_vfn;
806 __be32 alloc_to_len16;
807 __be16 viid_pkd;
808 u8 mac[6];
809 u8 portid_pkd;
810 u8 nmac;
811 u8 nmac0[6];
812 __be16 rsssize_pkd;
813 u8 nmac1[6];
814 __be16 r7;
815 u8 nmac2[6];
816 __be16 r8;
817 u8 nmac3[6];
818 __be64 r9;
819 __be64 r10;
820};
821
822#define FW_VI_CMD_PFN(x) ((x) << 8)
823#define FW_VI_CMD_VFN(x) ((x) << 0)
824#define FW_VI_CMD_ALLOC (1U << 31)
825#define FW_VI_CMD_FREE (1U << 30)
826#define FW_VI_CMD_VIID(x) ((x) << 0)
827#define FW_VI_CMD_PORTID(x) ((x) << 4)
828#define FW_VI_CMD_RSSSIZE_GET(x) (((x) >> 0) & 0x7ff)
829
830/* Special VI_MAC command index ids */
831#define FW_VI_MAC_ADD_MAC 0x3FF
832#define FW_VI_MAC_ADD_PERSIST_MAC 0x3FE
833#define FW_VI_MAC_MAC_BASED_FREE 0x3FD
834
835enum fw_vi_mac_smac {
836 FW_VI_MAC_MPS_TCAM_ENTRY,
837 FW_VI_MAC_MPS_TCAM_ONLY,
838 FW_VI_MAC_SMT_ONLY,
839 FW_VI_MAC_SMT_AND_MPSTCAM
840};
841
842enum fw_vi_mac_result {
843 FW_VI_MAC_R_SUCCESS,
844 FW_VI_MAC_R_F_NONEXISTENT_NOMEM,
845 FW_VI_MAC_R_SMAC_FAIL,
846 FW_VI_MAC_R_F_ACL_CHECK
847};
848
849struct fw_vi_mac_cmd {
850 __be32 op_to_viid;
851 __be32 freemacs_to_len16;
852 union fw_vi_mac {
853 struct fw_vi_mac_exact {
854 __be16 valid_to_idx;
855 u8 macaddr[6];
856 } exact[7];
857 struct fw_vi_mac_hash {
858 __be64 hashvec;
859 } hash;
860 } u;
861};
862
863#define FW_VI_MAC_CMD_VIID(x) ((x) << 0)
864#define FW_VI_MAC_CMD_FREEMACS(x) ((x) << 31)
865#define FW_VI_MAC_CMD_HASHVECEN (1U << 23)
866#define FW_VI_MAC_CMD_HASHUNIEN(x) ((x) << 22)
867#define FW_VI_MAC_CMD_VALID (1U << 15)
868#define FW_VI_MAC_CMD_PRIO(x) ((x) << 12)
869#define FW_VI_MAC_CMD_SMAC_RESULT(x) ((x) << 10)
870#define FW_VI_MAC_CMD_SMAC_RESULT_GET(x) (((x) >> 10) & 0x3)
871#define FW_VI_MAC_CMD_IDX(x) ((x) << 0)
872#define FW_VI_MAC_CMD_IDX_GET(x) (((x) >> 0) & 0x3ff)
873
874#define FW_RXMODE_MTU_NO_CHG 65535
875
876struct fw_vi_rxmode_cmd {
877 __be32 op_to_viid;
878 __be32 retval_len16;
879 __be32 mtu_to_broadcasten;
880 __be32 r4_lo;
881};
882
883#define FW_VI_RXMODE_CMD_VIID(x) ((x) << 0)
884#define FW_VI_RXMODE_CMD_MTU(x) ((x) << 16)
885#define FW_VI_RXMODE_CMD_PROMISCEN_MASK 0x3
886#define FW_VI_RXMODE_CMD_PROMISCEN(x) ((x) << 14)
887#define FW_VI_RXMODE_CMD_ALLMULTIEN_MASK 0x3
888#define FW_VI_RXMODE_CMD_ALLMULTIEN(x) ((x) << 12)
889#define FW_VI_RXMODE_CMD_BROADCASTEN_MASK 0x3
890#define FW_VI_RXMODE_CMD_BROADCASTEN(x) ((x) << 10)
891
892struct fw_vi_enable_cmd {
893 __be32 op_to_viid;
894 __be32 ien_to_len16;
895 __be16 blinkdur;
896 __be16 r3;
897 __be32 r4;
898};
899
900#define FW_VI_ENABLE_CMD_VIID(x) ((x) << 0)
901#define FW_VI_ENABLE_CMD_IEN(x) ((x) << 31)
902#define FW_VI_ENABLE_CMD_EEN(x) ((x) << 30)
903#define FW_VI_ENABLE_CMD_LED (1U << 29)
904
905/* VI VF stats offset definitions */
906#define VI_VF_NUM_STATS 16
907enum fw_vi_stats_vf_index {
908 FW_VI_VF_STAT_TX_BCAST_BYTES_IX,
909 FW_VI_VF_STAT_TX_BCAST_FRAMES_IX,
910 FW_VI_VF_STAT_TX_MCAST_BYTES_IX,
911 FW_VI_VF_STAT_TX_MCAST_FRAMES_IX,
912 FW_VI_VF_STAT_TX_UCAST_BYTES_IX,
913 FW_VI_VF_STAT_TX_UCAST_FRAMES_IX,
914 FW_VI_VF_STAT_TX_DROP_FRAMES_IX,
915 FW_VI_VF_STAT_TX_OFLD_BYTES_IX,
916 FW_VI_VF_STAT_TX_OFLD_FRAMES_IX,
917 FW_VI_VF_STAT_RX_BCAST_BYTES_IX,
918 FW_VI_VF_STAT_RX_BCAST_FRAMES_IX,
919 FW_VI_VF_STAT_RX_MCAST_BYTES_IX,
920 FW_VI_VF_STAT_RX_MCAST_FRAMES_IX,
921 FW_VI_VF_STAT_RX_UCAST_BYTES_IX,
922 FW_VI_VF_STAT_RX_UCAST_FRAMES_IX,
923 FW_VI_VF_STAT_RX_ERR_FRAMES_IX
924};
925
926/* VI PF stats offset definitions */
927#define VI_PF_NUM_STATS 17
928enum fw_vi_stats_pf_index {
929 FW_VI_PF_STAT_TX_BCAST_BYTES_IX,
930 FW_VI_PF_STAT_TX_BCAST_FRAMES_IX,
931 FW_VI_PF_STAT_TX_MCAST_BYTES_IX,
932 FW_VI_PF_STAT_TX_MCAST_FRAMES_IX,
933 FW_VI_PF_STAT_TX_UCAST_BYTES_IX,
934 FW_VI_PF_STAT_TX_UCAST_FRAMES_IX,
935 FW_VI_PF_STAT_TX_OFLD_BYTES_IX,
936 FW_VI_PF_STAT_TX_OFLD_FRAMES_IX,
937 FW_VI_PF_STAT_RX_BYTES_IX,
938 FW_VI_PF_STAT_RX_FRAMES_IX,
939 FW_VI_PF_STAT_RX_BCAST_BYTES_IX,
940 FW_VI_PF_STAT_RX_BCAST_FRAMES_IX,
941 FW_VI_PF_STAT_RX_MCAST_BYTES_IX,
942 FW_VI_PF_STAT_RX_MCAST_FRAMES_IX,
943 FW_VI_PF_STAT_RX_UCAST_BYTES_IX,
944 FW_VI_PF_STAT_RX_UCAST_FRAMES_IX,
945 FW_VI_PF_STAT_RX_ERR_FRAMES_IX
946};
947
948struct fw_vi_stats_cmd {
949 __be32 op_to_viid;
950 __be32 retval_len16;
951 union fw_vi_stats {
952 struct fw_vi_stats_ctl {
953 __be16 nstats_ix;
954 __be16 r6;
955 __be32 r7;
956 __be64 stat0;
957 __be64 stat1;
958 __be64 stat2;
959 __be64 stat3;
960 __be64 stat4;
961 __be64 stat5;
962 } ctl;
963 struct fw_vi_stats_pf {
964 __be64 tx_bcast_bytes;
965 __be64 tx_bcast_frames;
966 __be64 tx_mcast_bytes;
967 __be64 tx_mcast_frames;
968 __be64 tx_ucast_bytes;
969 __be64 tx_ucast_frames;
970 __be64 tx_offload_bytes;
971 __be64 tx_offload_frames;
972 __be64 rx_pf_bytes;
973 __be64 rx_pf_frames;
974 __be64 rx_bcast_bytes;
975 __be64 rx_bcast_frames;
976 __be64 rx_mcast_bytes;
977 __be64 rx_mcast_frames;
978 __be64 rx_ucast_bytes;
979 __be64 rx_ucast_frames;
980 __be64 rx_err_frames;
981 } pf;
982 struct fw_vi_stats_vf {
983 __be64 tx_bcast_bytes;
984 __be64 tx_bcast_frames;
985 __be64 tx_mcast_bytes;
986 __be64 tx_mcast_frames;
987 __be64 tx_ucast_bytes;
988 __be64 tx_ucast_frames;
989 __be64 tx_drop_frames;
990 __be64 tx_offload_bytes;
991 __be64 tx_offload_frames;
992 __be64 rx_bcast_bytes;
993 __be64 rx_bcast_frames;
994 __be64 rx_mcast_bytes;
995 __be64 rx_mcast_frames;
996 __be64 rx_ucast_bytes;
997 __be64 rx_ucast_frames;
998 __be64 rx_err_frames;
999 } vf;
1000 } u;
1001};
1002
1003#define FW_VI_STATS_CMD_VIID(x) ((x) << 0)
1004#define FW_VI_STATS_CMD_NSTATS(x) ((x) << 12)
1005#define FW_VI_STATS_CMD_IX(x) ((x) << 0)
1006
1007struct fw_acl_mac_cmd {
1008 __be32 op_to_vfn;
1009 __be32 en_to_len16;
1010 u8 nmac;
1011 u8 r3[7];
1012 __be16 r4;
1013 u8 macaddr0[6];
1014 __be16 r5;
1015 u8 macaddr1[6];
1016 __be16 r6;
1017 u8 macaddr2[6];
1018 __be16 r7;
1019 u8 macaddr3[6];
1020};
1021
1022#define FW_ACL_MAC_CMD_PFN(x) ((x) << 8)
1023#define FW_ACL_MAC_CMD_VFN(x) ((x) << 0)
1024#define FW_ACL_MAC_CMD_EN(x) ((x) << 31)
1025
1026struct fw_acl_vlan_cmd {
1027 __be32 op_to_vfn;
1028 __be32 en_to_len16;
1029 u8 nvlan;
1030 u8 dropnovlan_fm;
1031 u8 r3_lo[6];
1032 __be16 vlanid[16];
1033};
1034
1035#define FW_ACL_VLAN_CMD_PFN(x) ((x) << 8)
1036#define FW_ACL_VLAN_CMD_VFN(x) ((x) << 0)
1037#define FW_ACL_VLAN_CMD_EN(x) ((x) << 31)
1038#define FW_ACL_VLAN_CMD_DROPNOVLAN(x) ((x) << 7)
1039#define FW_ACL_VLAN_CMD_FM(x) ((x) << 6)
1040
1041enum fw_port_cap {
1042 FW_PORT_CAP_SPEED_100M = 0x0001,
1043 FW_PORT_CAP_SPEED_1G = 0x0002,
1044 FW_PORT_CAP_SPEED_2_5G = 0x0004,
1045 FW_PORT_CAP_SPEED_10G = 0x0008,
1046 FW_PORT_CAP_SPEED_40G = 0x0010,
1047 FW_PORT_CAP_SPEED_100G = 0x0020,
1048 FW_PORT_CAP_FC_RX = 0x0040,
1049 FW_PORT_CAP_FC_TX = 0x0080,
1050 FW_PORT_CAP_ANEG = 0x0100,
1051 FW_PORT_CAP_MDI_0 = 0x0200,
1052 FW_PORT_CAP_MDI_1 = 0x0400,
1053 FW_PORT_CAP_BEAN = 0x0800,
1054 FW_PORT_CAP_PMA_LPBK = 0x1000,
1055 FW_PORT_CAP_PCS_LPBK = 0x2000,
1056 FW_PORT_CAP_PHYXS_LPBK = 0x4000,
1057 FW_PORT_CAP_FAR_END_LPBK = 0x8000,
1058};
1059
1060enum fw_port_mdi {
1061 FW_PORT_MDI_UNCHANGED,
1062 FW_PORT_MDI_AUTO,
1063 FW_PORT_MDI_F_STRAIGHT,
1064 FW_PORT_MDI_F_CROSSOVER
1065};
1066
1067#define FW_PORT_MDI(x) ((x) << 9)
1068
1069enum fw_port_action {
1070 FW_PORT_ACTION_L1_CFG = 0x0001,
1071 FW_PORT_ACTION_L2_CFG = 0x0002,
1072 FW_PORT_ACTION_GET_PORT_INFO = 0x0003,
1073 FW_PORT_ACTION_L2_PPP_CFG = 0x0004,
1074 FW_PORT_ACTION_L2_DCB_CFG = 0x0005,
1075 FW_PORT_ACTION_LOW_PWR_TO_NORMAL = 0x0010,
1076 FW_PORT_ACTION_L1_LOW_PWR_EN = 0x0011,
1077 FW_PORT_ACTION_L2_WOL_MODE_EN = 0x0012,
1078 FW_PORT_ACTION_LPBK_TO_NORMAL = 0x0020,
1079 FW_PORT_ACTION_L1_LPBK = 0x0021,
1080 FW_PORT_ACTION_L1_PMA_LPBK = 0x0022,
1081 FW_PORT_ACTION_L1_PCS_LPBK = 0x0023,
1082 FW_PORT_ACTION_L1_PHYXS_CSIDE_LPBK = 0x0024,
1083 FW_PORT_ACTION_L1_PHYXS_ESIDE_LPBK = 0x0025,
1084 FW_PORT_ACTION_PHY_RESET = 0x0040,
1085 FW_PORT_ACTION_PMA_RESET = 0x0041,
1086 FW_PORT_ACTION_PCS_RESET = 0x0042,
1087 FW_PORT_ACTION_PHYXS_RESET = 0x0043,
1088 FW_PORT_ACTION_DTEXS_REEST = 0x0044,
1089 FW_PORT_ACTION_AN_RESET = 0x0045
1090};
1091
1092enum fw_port_l2cfg_ctlbf {
1093 FW_PORT_L2_CTLBF_OVLAN0 = 0x01,
1094 FW_PORT_L2_CTLBF_OVLAN1 = 0x02,
1095 FW_PORT_L2_CTLBF_OVLAN2 = 0x04,
1096 FW_PORT_L2_CTLBF_OVLAN3 = 0x08,
1097 FW_PORT_L2_CTLBF_IVLAN = 0x10,
1098 FW_PORT_L2_CTLBF_TXIPG = 0x20
1099};
1100
1101enum fw_port_dcb_cfg {
1102 FW_PORT_DCB_CFG_PG = 0x01,
1103 FW_PORT_DCB_CFG_PFC = 0x02,
1104 FW_PORT_DCB_CFG_APPL = 0x04
1105};
1106
1107enum fw_port_dcb_cfg_rc {
1108 FW_PORT_DCB_CFG_SUCCESS = 0x0,
1109 FW_PORT_DCB_CFG_ERROR = 0x1
1110};
1111
1112struct fw_port_cmd {
1113 __be32 op_to_portid;
1114 __be32 action_to_len16;
1115 union fw_port {
1116 struct fw_port_l1cfg {
1117 __be32 rcap;
1118 __be32 r;
1119 } l1cfg;
1120 struct fw_port_l2cfg {
1121 __be16 ctlbf_to_ivlan0;
1122 __be16 ivlantype;
1123 __be32 txipg_pkd;
1124 __be16 ovlan0mask;
1125 __be16 ovlan0type;
1126 __be16 ovlan1mask;
1127 __be16 ovlan1type;
1128 __be16 ovlan2mask;
1129 __be16 ovlan2type;
1130 __be16 ovlan3mask;
1131 __be16 ovlan3type;
1132 } l2cfg;
1133 struct fw_port_info {
1134 __be32 lstatus_to_modtype;
1135 __be16 pcap;
1136 __be16 acap;
1137 } info;
1138 struct fw_port_ppp {
1139 __be32 pppen_to_ncsich;
1140 __be32 r11;
1141 } ppp;
1142 struct fw_port_dcb {
1143 __be16 cfg;
1144 u8 up_map;
1145 u8 sf_cfgrc;
1146 __be16 prot_ix;
1147 u8 pe7_to_pe0;
1148 u8 numTCPFCs;
1149 __be32 pgid0_to_pgid7;
1150 __be32 numTCs_oui;
1151 u8 pgpc[8];
1152 } dcb;
1153 } u;
1154};
1155
1156#define FW_PORT_CMD_READ (1U << 22)
1157
1158#define FW_PORT_CMD_PORTID(x) ((x) << 0)
1159#define FW_PORT_CMD_PORTID_GET(x) (((x) >> 0) & 0xf)
1160
1161#define FW_PORT_CMD_ACTION(x) ((x) << 16)
1162
1163#define FW_PORT_CMD_CTLBF(x) ((x) << 10)
1164#define FW_PORT_CMD_OVLAN3(x) ((x) << 7)
1165#define FW_PORT_CMD_OVLAN2(x) ((x) << 6)
1166#define FW_PORT_CMD_OVLAN1(x) ((x) << 5)
1167#define FW_PORT_CMD_OVLAN0(x) ((x) << 4)
1168#define FW_PORT_CMD_IVLAN0(x) ((x) << 3)
1169
1170#define FW_PORT_CMD_TXIPG(x) ((x) << 19)
1171
1172#define FW_PORT_CMD_LSTATUS (1U << 31)
1173#define FW_PORT_CMD_LSPEED(x) ((x) << 24)
1174#define FW_PORT_CMD_LSPEED_GET(x) (((x) >> 24) & 0x3f)
1175#define FW_PORT_CMD_TXPAUSE (1U << 23)
1176#define FW_PORT_CMD_RXPAUSE (1U << 22)
1177#define FW_PORT_CMD_MDIOCAP (1U << 21)
1178#define FW_PORT_CMD_MDIOADDR_GET(x) (((x) >> 16) & 0x1f)
1179#define FW_PORT_CMD_LPTXPAUSE (1U << 15)
1180#define FW_PORT_CMD_LPRXPAUSE (1U << 14)
1181#define FW_PORT_CMD_PTYPE_MASK 0x1f
1182#define FW_PORT_CMD_PTYPE_GET(x) (((x) >> 8) & FW_PORT_CMD_PTYPE_MASK)
1183#define FW_PORT_CMD_MODTYPE_MASK 0x1f
1184#define FW_PORT_CMD_MODTYPE_GET(x) (((x) >> 0) & FW_PORT_CMD_MODTYPE_MASK)
1185
1186#define FW_PORT_CMD_PPPEN(x) ((x) << 31)
1187#define FW_PORT_CMD_TPSRC(x) ((x) << 28)
1188#define FW_PORT_CMD_NCSISRC(x) ((x) << 24)
1189
1190#define FW_PORT_CMD_CH0(x) ((x) << 20)
1191#define FW_PORT_CMD_CH1(x) ((x) << 16)
1192#define FW_PORT_CMD_CH2(x) ((x) << 12)
1193#define FW_PORT_CMD_CH3(x) ((x) << 8)
1194#define FW_PORT_CMD_NCSICH(x) ((x) << 4)
1195
1196enum fw_port_type {
1197 FW_PORT_TYPE_FIBER,
1198 FW_PORT_TYPE_KX4,
1199 FW_PORT_TYPE_BT_SGMII,
1200 FW_PORT_TYPE_KX,
1201 FW_PORT_TYPE_BT_XAUI,
1202 FW_PORT_TYPE_KR,
1203 FW_PORT_TYPE_CX4,
1204 FW_PORT_TYPE_TWINAX,
1205
1206 FW_PORT_TYPE_NONE = FW_PORT_CMD_PTYPE_MASK
1207};
1208
1209enum fw_port_module_type {
1210 FW_PORT_MOD_TYPE_NA,
1211 FW_PORT_MOD_TYPE_LR,
1212 FW_PORT_MOD_TYPE_SR,
1213 FW_PORT_MOD_TYPE_ER,
1214
1215 FW_PORT_MOD_TYPE_NONE = FW_PORT_CMD_MODTYPE_MASK
1216};
1217
1218/* port stats */
1219#define FW_NUM_PORT_STATS 50
1220#define FW_NUM_PORT_TX_STATS 23
1221#define FW_NUM_PORT_RX_STATS 27
1222
1223enum fw_port_stats_tx_index {
1224 FW_STAT_TX_PORT_BYTES_IX,
1225 FW_STAT_TX_PORT_FRAMES_IX,
1226 FW_STAT_TX_PORT_BCAST_IX,
1227 FW_STAT_TX_PORT_MCAST_IX,
1228 FW_STAT_TX_PORT_UCAST_IX,
1229 FW_STAT_TX_PORT_ERROR_IX,
1230 FW_STAT_TX_PORT_64B_IX,
1231 FW_STAT_TX_PORT_65B_127B_IX,
1232 FW_STAT_TX_PORT_128B_255B_IX,
1233 FW_STAT_TX_PORT_256B_511B_IX,
1234 FW_STAT_TX_PORT_512B_1023B_IX,
1235 FW_STAT_TX_PORT_1024B_1518B_IX,
1236 FW_STAT_TX_PORT_1519B_MAX_IX,
1237 FW_STAT_TX_PORT_DROP_IX,
1238 FW_STAT_TX_PORT_PAUSE_IX,
1239 FW_STAT_TX_PORT_PPP0_IX,
1240 FW_STAT_TX_PORT_PPP1_IX,
1241 FW_STAT_TX_PORT_PPP2_IX,
1242 FW_STAT_TX_PORT_PPP3_IX,
1243 FW_STAT_TX_PORT_PPP4_IX,
1244 FW_STAT_TX_PORT_PPP5_IX,
1245 FW_STAT_TX_PORT_PPP6_IX,
1246 FW_STAT_TX_PORT_PPP7_IX
1247};
1248
1249enum fw_port_stat_rx_index {
1250 FW_STAT_RX_PORT_BYTES_IX,
1251 FW_STAT_RX_PORT_FRAMES_IX,
1252 FW_STAT_RX_PORT_BCAST_IX,
1253 FW_STAT_RX_PORT_MCAST_IX,
1254 FW_STAT_RX_PORT_UCAST_IX,
1255 FW_STAT_RX_PORT_MTU_ERROR_IX,
1256 FW_STAT_RX_PORT_MTU_CRC_ERROR_IX,
1257 FW_STAT_RX_PORT_CRC_ERROR_IX,
1258 FW_STAT_RX_PORT_LEN_ERROR_IX,
1259 FW_STAT_RX_PORT_SYM_ERROR_IX,
1260 FW_STAT_RX_PORT_64B_IX,
1261 FW_STAT_RX_PORT_65B_127B_IX,
1262 FW_STAT_RX_PORT_128B_255B_IX,
1263 FW_STAT_RX_PORT_256B_511B_IX,
1264 FW_STAT_RX_PORT_512B_1023B_IX,
1265 FW_STAT_RX_PORT_1024B_1518B_IX,
1266 FW_STAT_RX_PORT_1519B_MAX_IX,
1267 FW_STAT_RX_PORT_PAUSE_IX,
1268 FW_STAT_RX_PORT_PPP0_IX,
1269 FW_STAT_RX_PORT_PPP1_IX,
1270 FW_STAT_RX_PORT_PPP2_IX,
1271 FW_STAT_RX_PORT_PPP3_IX,
1272 FW_STAT_RX_PORT_PPP4_IX,
1273 FW_STAT_RX_PORT_PPP5_IX,
1274 FW_STAT_RX_PORT_PPP6_IX,
1275 FW_STAT_RX_PORT_PPP7_IX,
1276 FW_STAT_RX_PORT_LESS_64B_IX
1277};
1278
1279struct fw_port_stats_cmd {
1280 __be32 op_to_portid;
1281 __be32 retval_len16;
1282 union fw_port_stats {
1283 struct fw_port_stats_ctl {
1284 u8 nstats_bg_bm;
1285 u8 tx_ix;
1286 __be16 r6;
1287 __be32 r7;
1288 __be64 stat0;
1289 __be64 stat1;
1290 __be64 stat2;
1291 __be64 stat3;
1292 __be64 stat4;
1293 __be64 stat5;
1294 } ctl;
1295 struct fw_port_stats_all {
1296 __be64 tx_bytes;
1297 __be64 tx_frames;
1298 __be64 tx_bcast;
1299 __be64 tx_mcast;
1300 __be64 tx_ucast;
1301 __be64 tx_error;
1302 __be64 tx_64b;
1303 __be64 tx_65b_127b;
1304 __be64 tx_128b_255b;
1305 __be64 tx_256b_511b;
1306 __be64 tx_512b_1023b;
1307 __be64 tx_1024b_1518b;
1308 __be64 tx_1519b_max;
1309 __be64 tx_drop;
1310 __be64 tx_pause;
1311 __be64 tx_ppp0;
1312 __be64 tx_ppp1;
1313 __be64 tx_ppp2;
1314 __be64 tx_ppp3;
1315 __be64 tx_ppp4;
1316 __be64 tx_ppp5;
1317 __be64 tx_ppp6;
1318 __be64 tx_ppp7;
1319 __be64 rx_bytes;
1320 __be64 rx_frames;
1321 __be64 rx_bcast;
1322 __be64 rx_mcast;
1323 __be64 rx_ucast;
1324 __be64 rx_mtu_error;
1325 __be64 rx_mtu_crc_error;
1326 __be64 rx_crc_error;
1327 __be64 rx_len_error;
1328 __be64 rx_sym_error;
1329 __be64 rx_64b;
1330 __be64 rx_65b_127b;
1331 __be64 rx_128b_255b;
1332 __be64 rx_256b_511b;
1333 __be64 rx_512b_1023b;
1334 __be64 rx_1024b_1518b;
1335 __be64 rx_1519b_max;
1336 __be64 rx_pause;
1337 __be64 rx_ppp0;
1338 __be64 rx_ppp1;
1339 __be64 rx_ppp2;
1340 __be64 rx_ppp3;
1341 __be64 rx_ppp4;
1342 __be64 rx_ppp5;
1343 __be64 rx_ppp6;
1344 __be64 rx_ppp7;
1345 __be64 rx_less_64b;
1346 __be64 rx_bg_drop;
1347 __be64 rx_bg_trunc;
1348 } all;
1349 } u;
1350};
1351
1352#define FW_PORT_STATS_CMD_NSTATS(x) ((x) << 4)
1353#define FW_PORT_STATS_CMD_BG_BM(x) ((x) << 0)
1354#define FW_PORT_STATS_CMD_TX(x) ((x) << 7)
1355#define FW_PORT_STATS_CMD_IX(x) ((x) << 0)
1356
1357/* port loopback stats */
1358#define FW_NUM_LB_STATS 16
1359enum fw_port_lb_stats_index {
1360 FW_STAT_LB_PORT_BYTES_IX,
1361 FW_STAT_LB_PORT_FRAMES_IX,
1362 FW_STAT_LB_PORT_BCAST_IX,
1363 FW_STAT_LB_PORT_MCAST_IX,
1364 FW_STAT_LB_PORT_UCAST_IX,
1365 FW_STAT_LB_PORT_ERROR_IX,
1366 FW_STAT_LB_PORT_64B_IX,
1367 FW_STAT_LB_PORT_65B_127B_IX,
1368 FW_STAT_LB_PORT_128B_255B_IX,
1369 FW_STAT_LB_PORT_256B_511B_IX,
1370 FW_STAT_LB_PORT_512B_1023B_IX,
1371 FW_STAT_LB_PORT_1024B_1518B_IX,
1372 FW_STAT_LB_PORT_1519B_MAX_IX,
1373 FW_STAT_LB_PORT_DROP_FRAMES_IX
1374};
1375
1376struct fw_port_lb_stats_cmd {
1377 __be32 op_to_lbport;
1378 __be32 retval_len16;
1379 union fw_port_lb_stats {
1380 struct fw_port_lb_stats_ctl {
1381 u8 nstats_bg_bm;
1382 u8 ix_pkd;
1383 __be16 r6;
1384 __be32 r7;
1385 __be64 stat0;
1386 __be64 stat1;
1387 __be64 stat2;
1388 __be64 stat3;
1389 __be64 stat4;
1390 __be64 stat5;
1391 } ctl;
1392 struct fw_port_lb_stats_all {
1393 __be64 tx_bytes;
1394 __be64 tx_frames;
1395 __be64 tx_bcast;
1396 __be64 tx_mcast;
1397 __be64 tx_ucast;
1398 __be64 tx_error;
1399 __be64 tx_64b;
1400 __be64 tx_65b_127b;
1401 __be64 tx_128b_255b;
1402 __be64 tx_256b_511b;
1403 __be64 tx_512b_1023b;
1404 __be64 tx_1024b_1518b;
1405 __be64 tx_1519b_max;
1406 __be64 rx_lb_drop;
1407 __be64 rx_lb_trunc;
1408 } all;
1409 } u;
1410};
1411
1412#define FW_PORT_LB_STATS_CMD_LBPORT(x) ((x) << 0)
1413#define FW_PORT_LB_STATS_CMD_NSTATS(x) ((x) << 4)
1414#define FW_PORT_LB_STATS_CMD_BG_BM(x) ((x) << 0)
1415#define FW_PORT_LB_STATS_CMD_IX(x) ((x) << 0)
1416
1417struct fw_rss_ind_tbl_cmd {
1418 __be32 op_to_viid;
1419#define FW_RSS_IND_TBL_CMD_VIID(x) ((x) << 0)
1420 __be32 retval_len16;
1421 __be16 niqid;
1422 __be16 startidx;
1423 __be32 r3;
1424 __be32 iq0_to_iq2;
1425#define FW_RSS_IND_TBL_CMD_IQ0(x) ((x) << 20)
1426#define FW_RSS_IND_TBL_CMD_IQ1(x) ((x) << 10)
1427#define FW_RSS_IND_TBL_CMD_IQ2(x) ((x) << 0)
1428 __be32 iq3_to_iq5;
1429 __be32 iq6_to_iq8;
1430 __be32 iq9_to_iq11;
1431 __be32 iq12_to_iq14;
1432 __be32 iq15_to_iq17;
1433 __be32 iq18_to_iq20;
1434 __be32 iq21_to_iq23;
1435 __be32 iq24_to_iq26;
1436 __be32 iq27_to_iq29;
1437 __be32 iq30_iq31;
1438 __be32 r15_lo;
1439};
1440
1441struct fw_rss_glb_config_cmd {
1442 __be32 op_to_write;
1443 __be32 retval_len16;
1444 union fw_rss_glb_config {
1445 struct fw_rss_glb_config_manual {
1446 __be32 mode_pkd;
1447 __be32 r3;
1448 __be64 r4;
1449 __be64 r5;
1450 } manual;
1451 struct fw_rss_glb_config_basicvirtual {
1452 __be32 mode_pkd;
1453 __be32 synmapen_to_hashtoeplitz;
1454#define FW_RSS_GLB_CONFIG_CMD_SYNMAPEN (1U << 8)
1455#define FW_RSS_GLB_CONFIG_CMD_SYN4TUPENIPV6 (1U << 7)
1456#define FW_RSS_GLB_CONFIG_CMD_SYN2TUPENIPV6 (1U << 6)
1457#define FW_RSS_GLB_CONFIG_CMD_SYN4TUPENIPV4 (1U << 5)
1458#define FW_RSS_GLB_CONFIG_CMD_SYN2TUPENIPV4 (1U << 4)
1459#define FW_RSS_GLB_CONFIG_CMD_OFDMAPEN (1U << 3)
1460#define FW_RSS_GLB_CONFIG_CMD_TNLMAPEN (1U << 2)
1461#define FW_RSS_GLB_CONFIG_CMD_TNLALLLKP (1U << 1)
1462#define FW_RSS_GLB_CONFIG_CMD_HASHTOEPLITZ (1U << 0)
1463 __be64 r8;
1464 __be64 r9;
1465 } basicvirtual;
1466 } u;
1467};
1468
1469#define FW_RSS_GLB_CONFIG_CMD_MODE(x) ((x) << 28)
1470
1471#define FW_RSS_GLB_CONFIG_CMD_MODE_MANUAL 0
1472#define FW_RSS_GLB_CONFIG_CMD_MODE_BASICVIRTUAL 1
1473
1474struct fw_rss_vi_config_cmd {
1475 __be32 op_to_viid;
1476#define FW_RSS_VI_CONFIG_CMD_VIID(x) ((x) << 0)
1477 __be32 retval_len16;
1478 union fw_rss_vi_config {
1479 struct fw_rss_vi_config_manual {
1480 __be64 r3;
1481 __be64 r4;
1482 __be64 r5;
1483 } manual;
1484 struct fw_rss_vi_config_basicvirtual {
1485 __be32 r6;
1486 __be32 defaultq_to_ip4udpen;
1487#define FW_RSS_VI_CONFIG_CMD_DEFAULTQ(x) ((x) << 16)
1488#define FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN (1U << 4)
1489#define FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN (1U << 3)
1490#define FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN (1U << 2)
1491#define FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN (1U << 1)
1492#define FW_RSS_VI_CONFIG_CMD_IP4UDPEN (1U << 0)
1493 __be64 r9;
1494 __be64 r10;
1495 } basicvirtual;
1496 } u;
1497};
1498
1499enum fw_error_type {
1500 FW_ERROR_TYPE_EXCEPTION = 0x0,
1501 FW_ERROR_TYPE_HWMODULE = 0x1,
1502 FW_ERROR_TYPE_WR = 0x2,
1503 FW_ERROR_TYPE_ACL = 0x3,
1504};
1505
1506struct fw_error_cmd {
1507 __be32 op_to_type;
1508 __be32 len16_pkd;
1509 union fw_error {
1510 struct fw_error_exception {
1511 __be32 info[6];
1512 } exception;
1513 struct fw_error_hwmodule {
1514 __be32 regaddr;
1515 __be32 regval;
1516 } hwmodule;
1517 struct fw_error_wr {
1518 __be16 cidx;
1519 __be16 pfn_vfn;
1520 __be32 eqid;
1521 u8 wrhdr[16];
1522 } wr;
1523 struct fw_error_acl {
1524 __be16 cidx;
1525 __be16 pfn_vfn;
1526 __be32 eqid;
1527 __be16 mv_pkd;
1528 u8 val[6];
1529 __be64 r4;
1530 } acl;
1531 } u;
1532};
1533
1534struct fw_debug_cmd {
1535 __be32 op_type;
1536#define FW_DEBUG_CMD_TYPE_GET(x) ((x) & 0xff)
1537 __be32 len16_pkd;
1538 union fw_debug {
1539 struct fw_debug_assert {
1540 __be32 fcid;
1541 __be32 line;
1542 __be32 x;
1543 __be32 y;
1544 u8 filename_0_7[8];
1545 u8 filename_8_15[8];
1546 __be64 r3;
1547 } assert;
1548 struct fw_debug_prt {
1549 __be16 dprtstridx;
1550 __be16 r3[3];
1551 __be32 dprtstrparam0;
1552 __be32 dprtstrparam1;
1553 __be32 dprtstrparam2;
1554 __be32 dprtstrparam3;
1555 } prt;
1556 } u;
1557};
1558
1559struct fw_hdr {
1560 u8 ver;
1561 u8 reserved1;
1562 __be16 len512; /* bin length in units of 512-bytes */
1563 __be32 fw_ver; /* firmware version */
1564 __be32 tp_microcode_ver;
1565 u8 intfver_nic;
1566 u8 intfver_vnic;
1567 u8 intfver_ofld;
1568 u8 intfver_ri;
1569 u8 intfver_iscsipdu;
1570 u8 intfver_iscsi;
1571 u8 intfver_fcoe;
1572 u8 reserved2;
1573 __be32 reserved3[27];
1574};
1575
1576#define FW_HDR_FW_VER_MAJOR_GET(x) (((x) >> 24) & 0xff)
1577#define FW_HDR_FW_VER_MINOR_GET(x) (((x) >> 16) & 0xff)
1578#define FW_HDR_FW_VER_MICRO_GET(x) (((x) >> 8) & 0xff)
1579#define FW_HDR_FW_VER_BUILD_GET(x) (((x) >> 0) & 0xff)
1580#endif /* _T4FW_INTERFACE_H_ */
diff --git a/drivers/net/e1000/e1000.h b/drivers/net/e1000/e1000.h
index 9902b33b7160..2f29c2131851 100644
--- a/drivers/net/e1000/e1000.h
+++ b/drivers/net/e1000/e1000.h
@@ -261,7 +261,6 @@ struct e1000_adapter {
261 /* TX */ 261 /* TX */
262 struct e1000_tx_ring *tx_ring; /* One per active queue */ 262 struct e1000_tx_ring *tx_ring; /* One per active queue */
263 unsigned int restart_queue; 263 unsigned int restart_queue;
264 unsigned long tx_queue_len;
265 u32 txd_cmd; 264 u32 txd_cmd;
266 u32 tx_int_delay; 265 u32 tx_int_delay;
267 u32 tx_abs_int_delay; 266 u32 tx_abs_int_delay;
diff --git a/drivers/net/e1000/e1000_main.c b/drivers/net/e1000/e1000_main.c
index 41330349b07a..47da5fc1e9f4 100644
--- a/drivers/net/e1000/e1000_main.c
+++ b/drivers/net/e1000/e1000_main.c
@@ -383,8 +383,6 @@ static void e1000_configure(struct e1000_adapter *adapter)
383 adapter->alloc_rx_buf(adapter, ring, 383 adapter->alloc_rx_buf(adapter, ring,
384 E1000_DESC_UNUSED(ring)); 384 E1000_DESC_UNUSED(ring));
385 } 385 }
386
387 adapter->tx_queue_len = netdev->tx_queue_len;
388} 386}
389 387
390int e1000_up(struct e1000_adapter *adapter) 388int e1000_up(struct e1000_adapter *adapter)
@@ -503,7 +501,6 @@ void e1000_down(struct e1000_adapter *adapter)
503 del_timer_sync(&adapter->watchdog_timer); 501 del_timer_sync(&adapter->watchdog_timer);
504 del_timer_sync(&adapter->phy_info_timer); 502 del_timer_sync(&adapter->phy_info_timer);
505 503
506 netdev->tx_queue_len = adapter->tx_queue_len;
507 adapter->link_speed = 0; 504 adapter->link_speed = 0;
508 adapter->link_duplex = 0; 505 adapter->link_duplex = 0;
509 netif_carrier_off(netdev); 506 netif_carrier_off(netdev);
@@ -2315,19 +2312,15 @@ static void e1000_watchdog(unsigned long data)
2315 E1000_CTRL_RFCE) ? "RX" : ((ctrl & 2312 E1000_CTRL_RFCE) ? "RX" : ((ctrl &
2316 E1000_CTRL_TFCE) ? "TX" : "None" ))); 2313 E1000_CTRL_TFCE) ? "TX" : "None" )));
2317 2314
2318 /* tweak tx_queue_len according to speed/duplex 2315 /* adjust timeout factor according to speed/duplex */
2319 * and adjust the timeout factor */
2320 netdev->tx_queue_len = adapter->tx_queue_len;
2321 adapter->tx_timeout_factor = 1; 2316 adapter->tx_timeout_factor = 1;
2322 switch (adapter->link_speed) { 2317 switch (adapter->link_speed) {
2323 case SPEED_10: 2318 case SPEED_10:
2324 txb2b = false; 2319 txb2b = false;
2325 netdev->tx_queue_len = 10;
2326 adapter->tx_timeout_factor = 16; 2320 adapter->tx_timeout_factor = 16;
2327 break; 2321 break;
2328 case SPEED_100: 2322 case SPEED_100:
2329 txb2b = false; 2323 txb2b = false;
2330 netdev->tx_queue_len = 100;
2331 /* maybe add some timeout factor ? */ 2324 /* maybe add some timeout factor ? */
2332 break; 2325 break;
2333 } 2326 }
diff --git a/drivers/net/e1000e/e1000.h b/drivers/net/e1000e/e1000.h
index 11e02e1f187c..12648a1cdb78 100644
--- a/drivers/net/e1000e/e1000.h
+++ b/drivers/net/e1000e/e1000.h
@@ -273,7 +273,6 @@ struct e1000_adapter {
273 273
274 struct napi_struct napi; 274 struct napi_struct napi;
275 275
276 unsigned long tx_queue_len;
277 unsigned int restart_queue; 276 unsigned int restart_queue;
278 u32 txd_cmd; 277 u32 txd_cmd;
279 278
diff --git a/drivers/net/e1000e/netdev.c b/drivers/net/e1000e/netdev.c
index 02f7d20f3c80..167b1aedfb42 100644
--- a/drivers/net/e1000e/netdev.c
+++ b/drivers/net/e1000e/netdev.c
@@ -2292,8 +2292,6 @@ static void e1000_configure_tx(struct e1000_adapter *adapter)
2292 ew32(TCTL, tctl); 2292 ew32(TCTL, tctl);
2293 2293
2294 e1000e_config_collision_dist(hw); 2294 e1000e_config_collision_dist(hw);
2295
2296 adapter->tx_queue_len = adapter->netdev->tx_queue_len;
2297} 2295}
2298 2296
2299/** 2297/**
@@ -2879,7 +2877,6 @@ void e1000e_down(struct e1000_adapter *adapter)
2879 del_timer_sync(&adapter->watchdog_timer); 2877 del_timer_sync(&adapter->watchdog_timer);
2880 del_timer_sync(&adapter->phy_info_timer); 2878 del_timer_sync(&adapter->phy_info_timer);
2881 2879
2882 netdev->tx_queue_len = adapter->tx_queue_len;
2883 netif_carrier_off(netdev); 2880 netif_carrier_off(netdev);
2884 adapter->link_speed = 0; 2881 adapter->link_speed = 0;
2885 adapter->link_duplex = 0; 2882 adapter->link_duplex = 0;
@@ -3612,21 +3609,15 @@ static void e1000_watchdog_task(struct work_struct *work)
3612 "link gets many collisions.\n"); 3609 "link gets many collisions.\n");
3613 } 3610 }
3614 3611
3615 /* 3612 /* adjust timeout factor according to speed/duplex */
3616 * tweak tx_queue_len according to speed/duplex
3617 * and adjust the timeout factor
3618 */
3619 netdev->tx_queue_len = adapter->tx_queue_len;
3620 adapter->tx_timeout_factor = 1; 3613 adapter->tx_timeout_factor = 1;
3621 switch (adapter->link_speed) { 3614 switch (adapter->link_speed) {
3622 case SPEED_10: 3615 case SPEED_10:
3623 txb2b = 0; 3616 txb2b = 0;
3624 netdev->tx_queue_len = 10;
3625 adapter->tx_timeout_factor = 16; 3617 adapter->tx_timeout_factor = 16;
3626 break; 3618 break;
3627 case SPEED_100: 3619 case SPEED_100:
3628 txb2b = 0; 3620 txb2b = 0;
3629 netdev->tx_queue_len = 100;
3630 adapter->tx_timeout_factor = 10; 3621 adapter->tx_timeout_factor = 10;
3631 break; 3622 break;
3632 } 3623 }
diff --git a/drivers/net/gianfar.c b/drivers/net/gianfar.c
index fdd26c2b1a2f..5175233f11f2 100644
--- a/drivers/net/gianfar.c
+++ b/drivers/net/gianfar.c
@@ -676,7 +676,7 @@ static int gfar_of_init(struct of_device *ofdev, struct net_device **pdev)
676 priv->rx_queue[i] = NULL; 676 priv->rx_queue[i] = NULL;
677 677
678 for (i = 0; i < priv->num_tx_queues; i++) { 678 for (i = 0; i < priv->num_tx_queues; i++) {
679 priv->tx_queue[i] = (struct gfar_priv_tx_q *)kmalloc( 679 priv->tx_queue[i] = (struct gfar_priv_tx_q *)kzalloc(
680 sizeof (struct gfar_priv_tx_q), GFP_KERNEL); 680 sizeof (struct gfar_priv_tx_q), GFP_KERNEL);
681 if (!priv->tx_queue[i]) { 681 if (!priv->tx_queue[i]) {
682 err = -ENOMEM; 682 err = -ENOMEM;
@@ -689,7 +689,7 @@ static int gfar_of_init(struct of_device *ofdev, struct net_device **pdev)
689 } 689 }
690 690
691 for (i = 0; i < priv->num_rx_queues; i++) { 691 for (i = 0; i < priv->num_rx_queues; i++) {
692 priv->rx_queue[i] = (struct gfar_priv_rx_q *)kmalloc( 692 priv->rx_queue[i] = (struct gfar_priv_rx_q *)kzalloc(
693 sizeof (struct gfar_priv_rx_q), GFP_KERNEL); 693 sizeof (struct gfar_priv_rx_q), GFP_KERNEL);
694 if (!priv->rx_queue[i]) { 694 if (!priv->rx_queue[i]) {
695 err = -ENOMEM; 695 err = -ENOMEM;
@@ -1120,10 +1120,10 @@ static int gfar_probe(struct of_device *ofdev,
1120 /* provided which set of benchmarks. */ 1120 /* provided which set of benchmarks. */
1121 printk(KERN_INFO "%s: Running with NAPI enabled\n", dev->name); 1121 printk(KERN_INFO "%s: Running with NAPI enabled\n", dev->name);
1122 for (i = 0; i < priv->num_rx_queues; i++) 1122 for (i = 0; i < priv->num_rx_queues; i++)
1123 printk(KERN_INFO "%s: :RX BD ring size for Q[%d]: %d\n", 1123 printk(KERN_INFO "%s: RX BD ring size for Q[%d]: %d\n",
1124 dev->name, i, priv->rx_queue[i]->rx_ring_size); 1124 dev->name, i, priv->rx_queue[i]->rx_ring_size);
1125 for(i = 0; i < priv->num_tx_queues; i++) 1125 for(i = 0; i < priv->num_tx_queues; i++)
1126 printk(KERN_INFO "%s:TX BD ring size for Q[%d]: %d\n", 1126 printk(KERN_INFO "%s: TX BD ring size for Q[%d]: %d\n",
1127 dev->name, i, priv->tx_queue[i]->tx_ring_size); 1127 dev->name, i, priv->tx_queue[i]->tx_ring_size);
1128 1128
1129 return 0; 1129 return 0;
@@ -1638,13 +1638,13 @@ static void free_skb_resources(struct gfar_private *priv)
1638 /* Go through all the buffer descriptors and free their data buffers */ 1638 /* Go through all the buffer descriptors and free their data buffers */
1639 for (i = 0; i < priv->num_tx_queues; i++) { 1639 for (i = 0; i < priv->num_tx_queues; i++) {
1640 tx_queue = priv->tx_queue[i]; 1640 tx_queue = priv->tx_queue[i];
1641 if(!tx_queue->tx_skbuff) 1641 if(tx_queue->tx_skbuff)
1642 free_skb_tx_queue(tx_queue); 1642 free_skb_tx_queue(tx_queue);
1643 } 1643 }
1644 1644
1645 for (i = 0; i < priv->num_rx_queues; i++) { 1645 for (i = 0; i < priv->num_rx_queues; i++) {
1646 rx_queue = priv->rx_queue[i]; 1646 rx_queue = priv->rx_queue[i];
1647 if(!rx_queue->rx_skbuff) 1647 if(rx_queue->rx_skbuff)
1648 free_skb_rx_queue(rx_queue); 1648 free_skb_rx_queue(rx_queue);
1649 } 1649 }
1650 1650
@@ -2393,6 +2393,7 @@ struct sk_buff * gfar_new_skb(struct net_device *dev)
2393 * as many bytes as needed to align the data properly 2393 * as many bytes as needed to align the data properly
2394 */ 2394 */
2395 skb_reserve(skb, alignamount); 2395 skb_reserve(skb, alignamount);
2396 GFAR_CB(skb)->alignamount = alignamount;
2396 2397
2397 return skb; 2398 return skb;
2398} 2399}
@@ -2533,13 +2534,13 @@ int gfar_clean_rx_ring(struct gfar_priv_rx_q *rx_queue, int rx_work_limit)
2533 newskb = skb; 2534 newskb = skb;
2534 else if (skb) { 2535 else if (skb) {
2535 /* 2536 /*
2536 * We need to reset ->data to what it 2537 * We need to un-reserve() the skb to what it
2537 * was before gfar_new_skb() re-aligned 2538 * was before gfar_new_skb() re-aligned
2538 * it to an RXBUF_ALIGNMENT boundary 2539 * it to an RXBUF_ALIGNMENT boundary
2539 * before we put the skb back on the 2540 * before we put the skb back on the
2540 * recycle list. 2541 * recycle list.
2541 */ 2542 */
2542 skb->data = skb->head + NET_SKB_PAD; 2543 skb_reserve(skb, -GFAR_CB(skb)->alignamount);
2543 __skb_queue_head(&priv->rx_recycle, skb); 2544 __skb_queue_head(&priv->rx_recycle, skb);
2544 } 2545 }
2545 } else { 2546 } else {
diff --git a/drivers/net/gianfar.h b/drivers/net/gianfar.h
index 3d72dc43dca5..17d25e714236 100644
--- a/drivers/net/gianfar.h
+++ b/drivers/net/gianfar.h
@@ -566,6 +566,12 @@ struct rxfcb {
566 u16 vlctl; /* VLAN control word */ 566 u16 vlctl; /* VLAN control word */
567}; 567};
568 568
569struct gianfar_skb_cb {
570 int alignamount;
571};
572
573#define GFAR_CB(skb) ((struct gianfar_skb_cb *)((skb)->cb))
574
569struct rmon_mib 575struct rmon_mib
570{ 576{
571 u32 tr64; /* 0x.680 - Transmit and Receive 64-byte Frame Counter */ 577 u32 tr64; /* 0x.680 - Transmit and Receive 64-byte Frame Counter */
diff --git a/drivers/net/igb/e1000_mac.c b/drivers/net/igb/e1000_mac.c
index 2a8a886b37eb..be8d010e4021 100644
--- a/drivers/net/igb/e1000_mac.c
+++ b/drivers/net/igb/e1000_mac.c
@@ -1367,7 +1367,8 @@ out:
1367 * igb_enable_mng_pass_thru - Enable processing of ARP's 1367 * igb_enable_mng_pass_thru - Enable processing of ARP's
1368 * @hw: pointer to the HW structure 1368 * @hw: pointer to the HW structure
1369 * 1369 *
1370 * Verifies the hardware needs to allow ARPs to be processed by the host. 1370 * Verifies the hardware needs to leave interface enabled so that frames can
1371 * be directed to and from the management interface.
1371 **/ 1372 **/
1372bool igb_enable_mng_pass_thru(struct e1000_hw *hw) 1373bool igb_enable_mng_pass_thru(struct e1000_hw *hw)
1373{ 1374{
@@ -1380,8 +1381,7 @@ bool igb_enable_mng_pass_thru(struct e1000_hw *hw)
1380 1381
1381 manc = rd32(E1000_MANC); 1382 manc = rd32(E1000_MANC);
1382 1383
1383 if (!(manc & E1000_MANC_RCV_TCO_EN) || 1384 if (!(manc & E1000_MANC_RCV_TCO_EN))
1384 !(manc & E1000_MANC_EN_MAC_ADDR_FILTER))
1385 goto out; 1385 goto out;
1386 1386
1387 if (hw->mac.arc_subsystem_valid) { 1387 if (hw->mac.arc_subsystem_valid) {
diff --git a/drivers/net/igb/igb.h b/drivers/net/igb/igb.h
index 4f69b6d951b3..7d288ccca1ca 100644
--- a/drivers/net/igb/igb.h
+++ b/drivers/net/igb/igb.h
@@ -268,7 +268,6 @@ struct igb_adapter {
268 268
269 /* TX */ 269 /* TX */
270 struct igb_ring *tx_ring[16]; 270 struct igb_ring *tx_ring[16];
271 unsigned long tx_queue_len;
272 u32 tx_timeout_count; 271 u32 tx_timeout_count;
273 272
274 /* RX */ 273 /* RX */
diff --git a/drivers/net/igb/igb_main.c b/drivers/net/igb/igb_main.c
index 78cc742e233f..2745e17fd021 100644
--- a/drivers/net/igb/igb_main.c
+++ b/drivers/net/igb/igb_main.c
@@ -1086,9 +1086,6 @@ static void igb_configure(struct igb_adapter *adapter)
1086 struct igb_ring *ring = adapter->rx_ring[i]; 1086 struct igb_ring *ring = adapter->rx_ring[i];
1087 igb_alloc_rx_buffers_adv(ring, igb_desc_unused(ring)); 1087 igb_alloc_rx_buffers_adv(ring, igb_desc_unused(ring));
1088 } 1088 }
1089
1090
1091 adapter->tx_queue_len = netdev->tx_queue_len;
1092} 1089}
1093 1090
1094/** 1091/**
@@ -1194,7 +1191,6 @@ void igb_down(struct igb_adapter *adapter)
1194 del_timer_sync(&adapter->watchdog_timer); 1191 del_timer_sync(&adapter->watchdog_timer);
1195 del_timer_sync(&adapter->phy_info_timer); 1192 del_timer_sync(&adapter->phy_info_timer);
1196 1193
1197 netdev->tx_queue_len = adapter->tx_queue_len;
1198 netif_carrier_off(netdev); 1194 netif_carrier_off(netdev);
1199 1195
1200 /* record the stats before reset*/ 1196 /* record the stats before reset*/
@@ -3092,17 +3088,13 @@ static void igb_watchdog_task(struct work_struct *work)
3092 ((ctrl & E1000_CTRL_RFCE) ? "RX" : 3088 ((ctrl & E1000_CTRL_RFCE) ? "RX" :
3093 ((ctrl & E1000_CTRL_TFCE) ? "TX" : "None"))); 3089 ((ctrl & E1000_CTRL_TFCE) ? "TX" : "None")));
3094 3090
3095 /* tweak tx_queue_len according to speed/duplex and 3091 /* adjust timeout factor according to speed/duplex */
3096 * adjust the timeout factor */
3097 netdev->tx_queue_len = adapter->tx_queue_len;
3098 adapter->tx_timeout_factor = 1; 3092 adapter->tx_timeout_factor = 1;
3099 switch (adapter->link_speed) { 3093 switch (adapter->link_speed) {
3100 case SPEED_10: 3094 case SPEED_10:
3101 netdev->tx_queue_len = 10;
3102 adapter->tx_timeout_factor = 14; 3095 adapter->tx_timeout_factor = 14;
3103 break; 3096 break;
3104 case SPEED_100: 3097 case SPEED_100:
3105 netdev->tx_queue_len = 100;
3106 /* maybe add some timeout factor ? */ 3098 /* maybe add some timeout factor ? */
3107 break; 3099 break;
3108 } 3100 }
@@ -3960,7 +3952,7 @@ void igb_update_stats(struct igb_adapter *adapter)
3960 struct net_device_stats *net_stats = igb_get_stats(adapter->netdev); 3952 struct net_device_stats *net_stats = igb_get_stats(adapter->netdev);
3961 struct e1000_hw *hw = &adapter->hw; 3953 struct e1000_hw *hw = &adapter->hw;
3962 struct pci_dev *pdev = adapter->pdev; 3954 struct pci_dev *pdev = adapter->pdev;
3963 u32 rnbc, reg; 3955 u32 reg, mpc;
3964 u16 phy_tmp; 3956 u16 phy_tmp;
3965 int i; 3957 int i;
3966 u64 bytes, packets; 3958 u64 bytes, packets;
@@ -4018,7 +4010,9 @@ void igb_update_stats(struct igb_adapter *adapter)
4018 adapter->stats.symerrs += rd32(E1000_SYMERRS); 4010 adapter->stats.symerrs += rd32(E1000_SYMERRS);
4019 adapter->stats.sec += rd32(E1000_SEC); 4011 adapter->stats.sec += rd32(E1000_SEC);
4020 4012
4021 adapter->stats.mpc += rd32(E1000_MPC); 4013 mpc = rd32(E1000_MPC);
4014 adapter->stats.mpc += mpc;
4015 net_stats->rx_fifo_errors += mpc;
4022 adapter->stats.scc += rd32(E1000_SCC); 4016 adapter->stats.scc += rd32(E1000_SCC);
4023 adapter->stats.ecol += rd32(E1000_ECOL); 4017 adapter->stats.ecol += rd32(E1000_ECOL);
4024 adapter->stats.mcc += rd32(E1000_MCC); 4018 adapter->stats.mcc += rd32(E1000_MCC);
@@ -4033,9 +4027,7 @@ void igb_update_stats(struct igb_adapter *adapter)
4033 adapter->stats.gptc += rd32(E1000_GPTC); 4027 adapter->stats.gptc += rd32(E1000_GPTC);
4034 adapter->stats.gotc += rd32(E1000_GOTCL); 4028 adapter->stats.gotc += rd32(E1000_GOTCL);
4035 rd32(E1000_GOTCH); /* clear GOTCL */ 4029 rd32(E1000_GOTCH); /* clear GOTCL */
4036 rnbc = rd32(E1000_RNBC); 4030 adapter->stats.rnbc += rd32(E1000_RNBC);
4037 adapter->stats.rnbc += rnbc;
4038 net_stats->rx_fifo_errors += rnbc;
4039 adapter->stats.ruc += rd32(E1000_RUC); 4031 adapter->stats.ruc += rd32(E1000_RUC);
4040 adapter->stats.rfc += rd32(E1000_RFC); 4032 adapter->stats.rfc += rd32(E1000_RFC);
4041 adapter->stats.rjc += rd32(E1000_RJC); 4033 adapter->stats.rjc += rd32(E1000_RJC);
@@ -5107,7 +5099,7 @@ static void igb_receive_skb(struct igb_q_vector *q_vector,
5107{ 5099{
5108 struct igb_adapter *adapter = q_vector->adapter; 5100 struct igb_adapter *adapter = q_vector->adapter;
5109 5101
5110 if (vlan_tag) 5102 if (vlan_tag && adapter->vlgrp)
5111 vlan_gro_receive(&q_vector->napi, adapter->vlgrp, 5103 vlan_gro_receive(&q_vector->napi, adapter->vlgrp,
5112 vlan_tag, skb); 5104 vlan_tag, skb);
5113 else 5105 else
diff --git a/drivers/net/igbvf/igbvf.h b/drivers/net/igbvf/igbvf.h
index a1774b29d222..debeee2dc717 100644
--- a/drivers/net/igbvf/igbvf.h
+++ b/drivers/net/igbvf/igbvf.h
@@ -198,7 +198,6 @@ struct igbvf_adapter {
198 struct igbvf_ring *tx_ring /* One per active queue */ 198 struct igbvf_ring *tx_ring /* One per active queue */
199 ____cacheline_aligned_in_smp; 199 ____cacheline_aligned_in_smp;
200 200
201 unsigned long tx_queue_len;
202 unsigned int restart_queue; 201 unsigned int restart_queue;
203 u32 txd_cmd; 202 u32 txd_cmd;
204 203
diff --git a/drivers/net/igbvf/netdev.c b/drivers/net/igbvf/netdev.c
index ea8abf5c1ef2..868855078ebc 100644
--- a/drivers/net/igbvf/netdev.c
+++ b/drivers/net/igbvf/netdev.c
@@ -1304,8 +1304,6 @@ static void igbvf_configure_tx(struct igbvf_adapter *adapter)
1304 1304
1305 /* enable Report Status bit */ 1305 /* enable Report Status bit */
1306 adapter->txd_cmd |= E1000_ADVTXD_DCMD_RS; 1306 adapter->txd_cmd |= E1000_ADVTXD_DCMD_RS;
1307
1308 adapter->tx_queue_len = adapter->netdev->tx_queue_len;
1309} 1307}
1310 1308
1311/** 1309/**
@@ -1524,7 +1522,6 @@ void igbvf_down(struct igbvf_adapter *adapter)
1524 1522
1525 del_timer_sync(&adapter->watchdog_timer); 1523 del_timer_sync(&adapter->watchdog_timer);
1526 1524
1527 netdev->tx_queue_len = adapter->tx_queue_len;
1528 netif_carrier_off(netdev); 1525 netif_carrier_off(netdev);
1529 1526
1530 /* record the stats before reset*/ 1527 /* record the stats before reset*/
@@ -1857,21 +1854,15 @@ static void igbvf_watchdog_task(struct work_struct *work)
1857 &adapter->link_duplex); 1854 &adapter->link_duplex);
1858 igbvf_print_link_info(adapter); 1855 igbvf_print_link_info(adapter);
1859 1856
1860 /* 1857 /* adjust timeout factor according to speed/duplex */
1861 * tweak tx_queue_len according to speed/duplex
1862 * and adjust the timeout factor
1863 */
1864 netdev->tx_queue_len = adapter->tx_queue_len;
1865 adapter->tx_timeout_factor = 1; 1858 adapter->tx_timeout_factor = 1;
1866 switch (adapter->link_speed) { 1859 switch (adapter->link_speed) {
1867 case SPEED_10: 1860 case SPEED_10:
1868 txb2b = 0; 1861 txb2b = 0;
1869 netdev->tx_queue_len = 10;
1870 adapter->tx_timeout_factor = 16; 1862 adapter->tx_timeout_factor = 16;
1871 break; 1863 break;
1872 case SPEED_100: 1864 case SPEED_100:
1873 txb2b = 0; 1865 txb2b = 0;
1874 netdev->tx_queue_len = 100;
1875 /* maybe add some timeout factor ? */ 1866 /* maybe add some timeout factor ? */
1876 break; 1867 break;
1877 } 1868 }
diff --git a/drivers/net/ixgbe/ixgbe.h b/drivers/net/ixgbe/ixgbe.h
index 19e94ee155a2..79c35ae3718c 100644
--- a/drivers/net/ixgbe/ixgbe.h
+++ b/drivers/net/ixgbe/ixgbe.h
@@ -204,14 +204,17 @@ enum ixgbe_ring_f_enum {
204#define IXGBE_MAX_FDIR_INDICES 64 204#define IXGBE_MAX_FDIR_INDICES 64
205#ifdef IXGBE_FCOE 205#ifdef IXGBE_FCOE
206#define IXGBE_MAX_FCOE_INDICES 8 206#define IXGBE_MAX_FCOE_INDICES 8
207#define MAX_RX_QUEUES (IXGBE_MAX_FDIR_INDICES + IXGBE_MAX_FCOE_INDICES)
208#define MAX_TX_QUEUES (IXGBE_MAX_FDIR_INDICES + IXGBE_MAX_FCOE_INDICES)
209#else
210#define MAX_RX_QUEUES IXGBE_MAX_FDIR_INDICES
211#define MAX_TX_QUEUES IXGBE_MAX_FDIR_INDICES
207#endif /* IXGBE_FCOE */ 212#endif /* IXGBE_FCOE */
208struct ixgbe_ring_feature { 213struct ixgbe_ring_feature {
209 int indices; 214 int indices;
210 int mask; 215 int mask;
211} ____cacheline_internodealigned_in_smp; 216} ____cacheline_internodealigned_in_smp;
212 217
213#define MAX_RX_QUEUES 128
214#define MAX_TX_QUEUES 128
215 218
216#define MAX_RX_PACKET_BUFFERS ((adapter->flags & IXGBE_FLAG_DCB_ENABLED) \ 219#define MAX_RX_PACKET_BUFFERS ((adapter->flags & IXGBE_FLAG_DCB_ENABLED) \
217 ? 8 : 1) 220 ? 8 : 1)
diff --git a/drivers/net/ixgbe/ixgbe_ethtool.c b/drivers/net/ixgbe/ixgbe_ethtool.c
index 7949a446e4c7..1959ef76c962 100644
--- a/drivers/net/ixgbe/ixgbe_ethtool.c
+++ b/drivers/net/ixgbe/ixgbe_ethtool.c
@@ -1853,6 +1853,26 @@ static void ixgbe_diag_test(struct net_device *netdev,
1853 if (ixgbe_link_test(adapter, &data[4])) 1853 if (ixgbe_link_test(adapter, &data[4]))
1854 eth_test->flags |= ETH_TEST_FL_FAILED; 1854 eth_test->flags |= ETH_TEST_FL_FAILED;
1855 1855
1856 if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) {
1857 int i;
1858 for (i = 0; i < adapter->num_vfs; i++) {
1859 if (adapter->vfinfo[i].clear_to_send) {
1860 netdev_warn(netdev, "%s",
1861 "offline diagnostic is not "
1862 "supported when VFs are "
1863 "present\n");
1864 data[0] = 1;
1865 data[1] = 1;
1866 data[2] = 1;
1867 data[3] = 1;
1868 eth_test->flags |= ETH_TEST_FL_FAILED;
1869 clear_bit(__IXGBE_TESTING,
1870 &adapter->state);
1871 goto skip_ol_tests;
1872 }
1873 }
1874 }
1875
1856 if (if_running) 1876 if (if_running)
1857 /* indicate we're in test mode */ 1877 /* indicate we're in test mode */
1858 dev_close(netdev); 1878 dev_close(netdev);
@@ -1908,6 +1928,7 @@ skip_loopback:
1908 1928
1909 clear_bit(__IXGBE_TESTING, &adapter->state); 1929 clear_bit(__IXGBE_TESTING, &adapter->state);
1910 } 1930 }
1931skip_ol_tests:
1911 msleep_interruptible(4 * 1000); 1932 msleep_interruptible(4 * 1000);
1912} 1933}
1913 1934
diff --git a/drivers/net/ixgbe/ixgbe_fcoe.c b/drivers/net/ixgbe/ixgbe_fcoe.c
index 700cfc0aa1b9..9276d5965b0d 100644
--- a/drivers/net/ixgbe/ixgbe_fcoe.c
+++ b/drivers/net/ixgbe/ixgbe_fcoe.c
@@ -202,6 +202,15 @@ int ixgbe_fcoe_ddp_get(struct net_device *netdev, u16 xid,
202 addr = sg_dma_address(sg); 202 addr = sg_dma_address(sg);
203 len = sg_dma_len(sg); 203 len = sg_dma_len(sg);
204 while (len) { 204 while (len) {
205 /* max number of buffers allowed in one DDP context */
206 if (j >= IXGBE_BUFFCNT_MAX) {
207 netif_err(adapter, drv, adapter->netdev,
208 "xid=%x:%d,%d,%d:addr=%llx "
209 "not enough descriptors\n",
210 xid, i, j, dmacount, (u64)addr);
211 goto out_noddp_free;
212 }
213
205 /* get the offset of length of current buffer */ 214 /* get the offset of length of current buffer */
206 thisoff = addr & ((dma_addr_t)bufflen - 1); 215 thisoff = addr & ((dma_addr_t)bufflen - 1);
207 thislen = min((bufflen - thisoff), len); 216 thislen = min((bufflen - thisoff), len);
@@ -227,20 +236,13 @@ int ixgbe_fcoe_ddp_get(struct net_device *netdev, u16 xid,
227 len -= thislen; 236 len -= thislen;
228 addr += thislen; 237 addr += thislen;
229 j++; 238 j++;
230 /* max number of buffers allowed in one DDP context */
231 if (j > IXGBE_BUFFCNT_MAX) {
232 DPRINTK(DRV, ERR, "xid=%x:%d,%d,%d:addr=%llx "
233 "not enough descriptors\n",
234 xid, i, j, dmacount, (u64)addr);
235 goto out_noddp_free;
236 }
237 } 239 }
238 } 240 }
239 /* only the last buffer may have non-full bufflen */ 241 /* only the last buffer may have non-full bufflen */
240 lastsize = thisoff + thislen; 242 lastsize = thisoff + thislen;
241 243
242 fcbuff = (IXGBE_FCBUFF_4KB << IXGBE_FCBUFF_BUFFSIZE_SHIFT); 244 fcbuff = (IXGBE_FCBUFF_4KB << IXGBE_FCBUFF_BUFFSIZE_SHIFT);
243 fcbuff |= (j << IXGBE_FCBUFF_BUFFCNT_SHIFT); 245 fcbuff |= ((j & 0xff) << IXGBE_FCBUFF_BUFFCNT_SHIFT);
244 fcbuff |= (firstoff << IXGBE_FCBUFF_OFFSET_SHIFT); 246 fcbuff |= (firstoff << IXGBE_FCBUFF_OFFSET_SHIFT);
245 fcbuff |= (IXGBE_FCBUFF_VALID); 247 fcbuff |= (IXGBE_FCBUFF_VALID);
246 248
@@ -520,6 +522,9 @@ void ixgbe_configure_fcoe(struct ixgbe_adapter *adapter)
520 /* Enable L2 eth type filter for FCoE */ 522 /* Enable L2 eth type filter for FCoE */
521 IXGBE_WRITE_REG(hw, IXGBE_ETQF(IXGBE_ETQF_FILTER_FCOE), 523 IXGBE_WRITE_REG(hw, IXGBE_ETQF(IXGBE_ETQF_FILTER_FCOE),
522 (ETH_P_FCOE | IXGBE_ETQF_FCOE | IXGBE_ETQF_FILTER_EN)); 524 (ETH_P_FCOE | IXGBE_ETQF_FCOE | IXGBE_ETQF_FILTER_EN));
525 /* Enable L2 eth type filter for FIP */
526 IXGBE_WRITE_REG(hw, IXGBE_ETQF(IXGBE_ETQF_FILTER_FIP),
527 (ETH_P_FIP | IXGBE_ETQF_FILTER_EN));
523 if (adapter->ring_feature[RING_F_FCOE].indices) { 528 if (adapter->ring_feature[RING_F_FCOE].indices) {
524 /* Use multiple rx queues for FCoE by redirection table */ 529 /* Use multiple rx queues for FCoE by redirection table */
525 for (i = 0; i < IXGBE_FCRETA_SIZE; i++) { 530 for (i = 0; i < IXGBE_FCRETA_SIZE; i++) {
@@ -530,6 +535,12 @@ void ixgbe_configure_fcoe(struct ixgbe_adapter *adapter)
530 } 535 }
531 IXGBE_WRITE_REG(hw, IXGBE_FCRECTL, IXGBE_FCRECTL_ENA); 536 IXGBE_WRITE_REG(hw, IXGBE_FCRECTL, IXGBE_FCRECTL_ENA);
532 IXGBE_WRITE_REG(hw, IXGBE_ETQS(IXGBE_ETQF_FILTER_FCOE), 0); 537 IXGBE_WRITE_REG(hw, IXGBE_ETQS(IXGBE_ETQF_FILTER_FCOE), 0);
538 fcoe_i = f->mask;
539 fcoe_i &= IXGBE_FCRETA_ENTRY_MASK;
540 fcoe_q = adapter->rx_ring[fcoe_i]->reg_idx;
541 IXGBE_WRITE_REG(hw, IXGBE_ETQS(IXGBE_ETQF_FILTER_FIP),
542 IXGBE_ETQS_QUEUE_EN |
543 (fcoe_q << IXGBE_ETQS_RX_QUEUE_SHIFT));
533 } else { 544 } else {
534 /* Use single rx queue for FCoE */ 545 /* Use single rx queue for FCoE */
535 fcoe_i = f->mask; 546 fcoe_i = f->mask;
@@ -539,6 +550,12 @@ void ixgbe_configure_fcoe(struct ixgbe_adapter *adapter)
539 IXGBE_ETQS_QUEUE_EN | 550 IXGBE_ETQS_QUEUE_EN |
540 (fcoe_q << IXGBE_ETQS_RX_QUEUE_SHIFT)); 551 (fcoe_q << IXGBE_ETQS_RX_QUEUE_SHIFT));
541 } 552 }
553 /* send FIP frames to the first FCoE queue */
554 fcoe_i = f->mask;
555 fcoe_q = adapter->rx_ring[fcoe_i]->reg_idx;
556 IXGBE_WRITE_REG(hw, IXGBE_ETQS(IXGBE_ETQF_FILTER_FIP),
557 IXGBE_ETQS_QUEUE_EN |
558 (fcoe_q << IXGBE_ETQS_RX_QUEUE_SHIFT));
542 559
543 IXGBE_WRITE_REG(hw, IXGBE_FCRXCTRL, 560 IXGBE_WRITE_REG(hw, IXGBE_FCRXCTRL,
544 IXGBE_FCRXCTRL_FCOELLI | 561 IXGBE_FCRXCTRL_FCOELLI |
diff --git a/drivers/net/ixgbe/ixgbe_main.c b/drivers/net/ixgbe/ixgbe_main.c
index 6effa2ca157d..7216db218442 100644
--- a/drivers/net/ixgbe/ixgbe_main.c
+++ b/drivers/net/ixgbe/ixgbe_main.c
@@ -3036,6 +3036,14 @@ void ixgbe_reinit_locked(struct ixgbe_adapter *adapter)
3036 while (test_and_set_bit(__IXGBE_RESETTING, &adapter->state)) 3036 while (test_and_set_bit(__IXGBE_RESETTING, &adapter->state))
3037 msleep(1); 3037 msleep(1);
3038 ixgbe_down(adapter); 3038 ixgbe_down(adapter);
3039 /*
3040 * If SR-IOV enabled then wait a bit before bringing the adapter
3041 * back up to give the VFs time to respond to the reset. The
3042 * two second wait is based upon the watchdog timer cycle in
3043 * the VF driver.
3044 */
3045 if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)
3046 msleep(2000);
3039 ixgbe_up(adapter); 3047 ixgbe_up(adapter);
3040 clear_bit(__IXGBE_RESETTING, &adapter->state); 3048 clear_bit(__IXGBE_RESETTING, &adapter->state);
3041} 3049}
@@ -3216,13 +3224,15 @@ void ixgbe_down(struct ixgbe_adapter *adapter)
3216 3224
3217 /* disable receive for all VFs and wait one second */ 3225 /* disable receive for all VFs and wait one second */
3218 if (adapter->num_vfs) { 3226 if (adapter->num_vfs) {
3219 for (i = 0 ; i < adapter->num_vfs; i++)
3220 adapter->vfinfo[i].clear_to_send = 0;
3221
3222 /* ping all the active vfs to let them know we are going down */ 3227 /* ping all the active vfs to let them know we are going down */
3223 ixgbe_ping_all_vfs(adapter); 3228 ixgbe_ping_all_vfs(adapter);
3229
3224 /* Disable all VFTE/VFRE TX/RX */ 3230 /* Disable all VFTE/VFRE TX/RX */
3225 ixgbe_disable_tx_rx(adapter); 3231 ixgbe_disable_tx_rx(adapter);
3232
3233 /* Mark all the VFs as inactive */
3234 for (i = 0 ; i < adapter->num_vfs; i++)
3235 adapter->vfinfo[i].clear_to_send = 0;
3226 } 3236 }
3227 3237
3228 /* disable receives */ 3238 /* disable receives */
@@ -5618,7 +5628,8 @@ static u16 ixgbe_select_queue(struct net_device *dev, struct sk_buff *skb)
5618 5628
5619#ifdef IXGBE_FCOE 5629#ifdef IXGBE_FCOE
5620 if ((adapter->flags & IXGBE_FLAG_FCOE_ENABLED) && 5630 if ((adapter->flags & IXGBE_FLAG_FCOE_ENABLED) &&
5621 (skb->protocol == htons(ETH_P_FCOE))) { 5631 ((skb->protocol == htons(ETH_P_FCOE)) ||
5632 (skb->protocol == htons(ETH_P_FIP)))) {
5622 txq &= (adapter->ring_feature[RING_F_FCOE].indices - 1); 5633 txq &= (adapter->ring_feature[RING_F_FCOE].indices - 1);
5623 txq += adapter->ring_feature[RING_F_FCOE].mask; 5634 txq += adapter->ring_feature[RING_F_FCOE].mask;
5624 return txq; 5635 return txq;
@@ -5665,18 +5676,25 @@ static netdev_tx_t ixgbe_xmit_frame(struct sk_buff *skb,
5665 5676
5666 tx_ring = adapter->tx_ring[skb->queue_mapping]; 5677 tx_ring = adapter->tx_ring[skb->queue_mapping];
5667 5678
5668 if ((adapter->flags & IXGBE_FLAG_FCOE_ENABLED) &&
5669 (skb->protocol == htons(ETH_P_FCOE))) {
5670 tx_flags |= IXGBE_TX_FLAGS_FCOE;
5671#ifdef IXGBE_FCOE 5679#ifdef IXGBE_FCOE
5680 if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED) {
5672#ifdef CONFIG_IXGBE_DCB 5681#ifdef CONFIG_IXGBE_DCB
5673 tx_flags &= ~(IXGBE_TX_FLAGS_VLAN_PRIO_MASK 5682 /* for FCoE with DCB, we force the priority to what
5674 << IXGBE_TX_FLAGS_VLAN_SHIFT); 5683 * was specified by the switch */
5675 tx_flags |= ((adapter->fcoe.up << 13) 5684 if ((skb->protocol == htons(ETH_P_FCOE)) ||
5676 << IXGBE_TX_FLAGS_VLAN_SHIFT); 5685 (skb->protocol == htons(ETH_P_FIP))) {
5677#endif 5686 tx_flags &= ~(IXGBE_TX_FLAGS_VLAN_PRIO_MASK
5687 << IXGBE_TX_FLAGS_VLAN_SHIFT);
5688 tx_flags |= ((adapter->fcoe.up << 13)
5689 << IXGBE_TX_FLAGS_VLAN_SHIFT);
5690 }
5678#endif 5691#endif
5692 /* flag for FCoE offloads */
5693 if (skb->protocol == htons(ETH_P_FCOE))
5694 tx_flags |= IXGBE_TX_FLAGS_FCOE;
5679 } 5695 }
5696#endif
5697
5680 /* four things can cause us to need a context descriptor */ 5698 /* four things can cause us to need a context descriptor */
5681 if (skb_is_gso(skb) || 5699 if (skb_is_gso(skb) ||
5682 (skb->ip_summed == CHECKSUM_PARTIAL) || 5700 (skb->ip_summed == CHECKSUM_PARTIAL) ||
@@ -6031,7 +6049,6 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev,
6031 indices += min_t(unsigned int, num_possible_cpus(), 6049 indices += min_t(unsigned int, num_possible_cpus(),
6032 IXGBE_MAX_FCOE_INDICES); 6050 IXGBE_MAX_FCOE_INDICES);
6033#endif 6051#endif
6034 indices = min_t(unsigned int, indices, MAX_TX_QUEUES);
6035 netdev = alloc_etherdev_mq(sizeof(struct ixgbe_adapter), indices); 6052 netdev = alloc_etherdev_mq(sizeof(struct ixgbe_adapter), indices);
6036 if (!netdev) { 6053 if (!netdev) {
6037 err = -ENOMEM; 6054 err = -ENOMEM;
diff --git a/drivers/net/ixgbe/ixgbe_type.h b/drivers/net/ixgbe/ixgbe_type.h
index c574d0a68f2a..aed4ed665648 100644
--- a/drivers/net/ixgbe/ixgbe_type.h
+++ b/drivers/net/ixgbe/ixgbe_type.h
@@ -1298,6 +1298,7 @@
1298#define IXGBE_ETQF_FILTER_BCN 1 1298#define IXGBE_ETQF_FILTER_BCN 1
1299#define IXGBE_ETQF_FILTER_FCOE 2 1299#define IXGBE_ETQF_FILTER_FCOE 2
1300#define IXGBE_ETQF_FILTER_1588 3 1300#define IXGBE_ETQF_FILTER_1588 3
1301#define IXGBE_ETQF_FILTER_FIP 4
1301/* VLAN Control Bit Masks */ 1302/* VLAN Control Bit Masks */
1302#define IXGBE_VLNCTRL_VET 0x0000FFFF /* bits 0-15 */ 1303#define IXGBE_VLNCTRL_VET 0x0000FFFF /* bits 0-15 */
1303#define IXGBE_VLNCTRL_CFI 0x10000000 /* bit 28 */ 1304#define IXGBE_VLNCTRL_CFI 0x10000000 /* bit 28 */
diff --git a/drivers/net/ixgbevf/ixgbevf_main.c b/drivers/net/ixgbevf/ixgbevf_main.c
index 6ced5efc0e07..65cb133a6a1f 100644
--- a/drivers/net/ixgbevf/ixgbevf_main.c
+++ b/drivers/net/ixgbevf/ixgbevf_main.c
@@ -2921,9 +2921,10 @@ static int ixgbevf_tx_map(struct ixgbevf_adapter *adapter,
2921 struct ixgbevf_tx_buffer *tx_buffer_info; 2921 struct ixgbevf_tx_buffer *tx_buffer_info;
2922 unsigned int len; 2922 unsigned int len;
2923 unsigned int total = skb->len; 2923 unsigned int total = skb->len;
2924 unsigned int offset = 0, size, count = 0, i; 2924 unsigned int offset = 0, size, count = 0;
2925 unsigned int nr_frags = skb_shinfo(skb)->nr_frags; 2925 unsigned int nr_frags = skb_shinfo(skb)->nr_frags;
2926 unsigned int f; 2926 unsigned int f;
2927 int i;
2927 2928
2928 i = tx_ring->next_to_use; 2929 i = tx_ring->next_to_use;
2929 2930
diff --git a/drivers/net/ksz884x.c b/drivers/net/ksz884x.c
index 348769521615..097796423b52 100644
--- a/drivers/net/ksz884x.c
+++ b/drivers/net/ksz884x.c
@@ -6318,7 +6318,7 @@ static int netdev_set_eeprom(struct net_device *dev,
6318 int len; 6318 int len;
6319 6319
6320 if (eeprom->magic != EEPROM_MAGIC) 6320 if (eeprom->magic != EEPROM_MAGIC)
6321 return 1; 6321 return -EINVAL;
6322 6322
6323 len = (eeprom->offset + eeprom->len + 1) / 2; 6323 len = (eeprom->offset + eeprom->len + 1) / 2;
6324 for (i = eeprom->offset / 2; i < len; i++) 6324 for (i = eeprom->offset / 2; i < len; i++)
diff --git a/drivers/net/mlx4/main.c b/drivers/net/mlx4/main.c
index 8f6e816a7395..b402a95c87c7 100644
--- a/drivers/net/mlx4/main.c
+++ b/drivers/net/mlx4/main.c
@@ -1023,6 +1023,7 @@ static int mlx4_init_port_info(struct mlx4_dev *dev, int port)
1023 info->port_attr.attr.mode = S_IRUGO | S_IWUSR; 1023 info->port_attr.attr.mode = S_IRUGO | S_IWUSR;
1024 info->port_attr.show = show_port_type; 1024 info->port_attr.show = show_port_type;
1025 info->port_attr.store = set_port_type; 1025 info->port_attr.store = set_port_type;
1026 sysfs_attr_init(&info->port_attr.attr);
1026 1027
1027 err = device_create_file(&dev->pdev->dev, &info->port_attr); 1028 err = device_create_file(&dev->pdev->dev, &info->port_attr);
1028 if (err) { 1029 if (err) {
diff --git a/drivers/net/netxen/netxen_nic.h b/drivers/net/netxen/netxen_nic.h
index 33ae5e13b608..174ac8ef82fa 100644
--- a/drivers/net/netxen/netxen_nic.h
+++ b/drivers/net/netxen/netxen_nic.h
@@ -53,8 +53,8 @@
53 53
54#define _NETXEN_NIC_LINUX_MAJOR 4 54#define _NETXEN_NIC_LINUX_MAJOR 4
55#define _NETXEN_NIC_LINUX_MINOR 0 55#define _NETXEN_NIC_LINUX_MINOR 0
56#define _NETXEN_NIC_LINUX_SUBVERSION 72 56#define _NETXEN_NIC_LINUX_SUBVERSION 73
57#define NETXEN_NIC_LINUX_VERSIONID "4.0.72" 57#define NETXEN_NIC_LINUX_VERSIONID "4.0.73"
58 58
59#define NETXEN_VERSION_CODE(a, b, c) (((a) << 24) + ((b) << 16) + (c)) 59#define NETXEN_VERSION_CODE(a, b, c) (((a) << 24) + ((b) << 16) + (c))
60#define _major(v) (((v) >> 24) & 0xff) 60#define _major(v) (((v) >> 24) & 0xff)
diff --git a/drivers/net/netxen/netxen_nic_ctx.c b/drivers/net/netxen/netxen_nic_ctx.c
index 2a8ef5fc9663..f26e54716c88 100644
--- a/drivers/net/netxen/netxen_nic_ctx.c
+++ b/drivers/net/netxen/netxen_nic_ctx.c
@@ -669,13 +669,15 @@ int netxen_alloc_hw_resources(struct netxen_adapter *adapter)
669 } 669 }
670 sds_ring->desc_head = (struct status_desc *)addr; 670 sds_ring->desc_head = (struct status_desc *)addr;
671 671
672 sds_ring->crb_sts_consumer = 672 if (NX_IS_REVISION_P2(adapter->ahw.revision_id)) {
673 netxen_get_ioaddr(adapter, 673 sds_ring->crb_sts_consumer =
674 recv_crb_registers[port].crb_sts_consumer[ring]); 674 netxen_get_ioaddr(adapter,
675 recv_crb_registers[port].crb_sts_consumer[ring]);
675 676
676 sds_ring->crb_intr_mask = 677 sds_ring->crb_intr_mask =
677 netxen_get_ioaddr(adapter, 678 netxen_get_ioaddr(adapter,
678 recv_crb_registers[port].sw_int_mask[ring]); 679 recv_crb_registers[port].sw_int_mask[ring]);
680 }
679 } 681 }
680 682
681 683
diff --git a/drivers/net/netxen/netxen_nic_init.c b/drivers/net/netxen/netxen_nic_init.c
index 439f3e859693..ecb6eed1d8e2 100644
--- a/drivers/net/netxen/netxen_nic_init.c
+++ b/drivers/net/netxen/netxen_nic_init.c
@@ -891,7 +891,7 @@ nx_get_bios_version(struct netxen_adapter *adapter)
891 if (adapter->fw_type == NX_UNIFIED_ROMIMAGE) { 891 if (adapter->fw_type == NX_UNIFIED_ROMIMAGE) {
892 bios_ver = cpu_to_le32(*((u32 *) (&fw->data[prd_off]) 892 bios_ver = cpu_to_le32(*((u32 *) (&fw->data[prd_off])
893 + NX_UNI_BIOS_VERSION_OFF)); 893 + NX_UNI_BIOS_VERSION_OFF));
894 return (bios_ver << 24) + ((bios_ver >> 8) & 0xff00) + 894 return (bios_ver << 16) + ((bios_ver >> 8) & 0xff00) +
895 (bios_ver >> 24); 895 (bios_ver >> 24);
896 } else 896 } else
897 return cpu_to_le32(*(u32 *)&fw->data[NX_BIOS_VERSION_OFFSET]); 897 return cpu_to_le32(*(u32 *)&fw->data[NX_BIOS_VERSION_OFFSET]);
diff --git a/drivers/net/netxen/netxen_nic_main.c b/drivers/net/netxen/netxen_nic_main.c
index f1daa9a8be07..9e82061c0235 100644
--- a/drivers/net/netxen/netxen_nic_main.c
+++ b/drivers/net/netxen/netxen_nic_main.c
@@ -604,16 +604,14 @@ netxen_cleanup_pci_map(struct netxen_adapter *adapter)
604static int 604static int
605netxen_setup_pci_map(struct netxen_adapter *adapter) 605netxen_setup_pci_map(struct netxen_adapter *adapter)
606{ 606{
607 void __iomem *mem_ptr0 = NULL;
608 void __iomem *mem_ptr1 = NULL;
609 void __iomem *mem_ptr2 = NULL;
610 void __iomem *db_ptr = NULL; 607 void __iomem *db_ptr = NULL;
611 608
612 resource_size_t mem_base, db_base; 609 resource_size_t mem_base, db_base;
613 unsigned long mem_len, db_len = 0, pci_len0 = 0; 610 unsigned long mem_len, db_len = 0;
614 611
615 struct pci_dev *pdev = adapter->pdev; 612 struct pci_dev *pdev = adapter->pdev;
616 int pci_func = adapter->ahw.pci_func; 613 int pci_func = adapter->ahw.pci_func;
614 struct netxen_hardware_context *ahw = &adapter->ahw;
617 615
618 int err = 0; 616 int err = 0;
619 617
@@ -630,24 +628,40 @@ netxen_setup_pci_map(struct netxen_adapter *adapter)
630 628
631 /* 128 Meg of memory */ 629 /* 128 Meg of memory */
632 if (mem_len == NETXEN_PCI_128MB_SIZE) { 630 if (mem_len == NETXEN_PCI_128MB_SIZE) {
633 mem_ptr0 = ioremap(mem_base, FIRST_PAGE_GROUP_SIZE); 631
634 mem_ptr1 = ioremap(mem_base + SECOND_PAGE_GROUP_START, 632 ahw->pci_base0 = ioremap(mem_base, FIRST_PAGE_GROUP_SIZE);
633 ahw->pci_base1 = ioremap(mem_base + SECOND_PAGE_GROUP_START,
635 SECOND_PAGE_GROUP_SIZE); 634 SECOND_PAGE_GROUP_SIZE);
636 mem_ptr2 = ioremap(mem_base + THIRD_PAGE_GROUP_START, 635 ahw->pci_base2 = ioremap(mem_base + THIRD_PAGE_GROUP_START,
637 THIRD_PAGE_GROUP_SIZE); 636 THIRD_PAGE_GROUP_SIZE);
638 pci_len0 = FIRST_PAGE_GROUP_SIZE; 637 if (ahw->pci_base0 == NULL || ahw->pci_base1 == NULL ||
638 ahw->pci_base2 == NULL) {
639 dev_err(&pdev->dev, "failed to map PCI bar 0\n");
640 err = -EIO;
641 goto err_out;
642 }
643
644 ahw->pci_len0 = FIRST_PAGE_GROUP_SIZE;
645
639 } else if (mem_len == NETXEN_PCI_32MB_SIZE) { 646 } else if (mem_len == NETXEN_PCI_32MB_SIZE) {
640 mem_ptr1 = ioremap(mem_base, SECOND_PAGE_GROUP_SIZE); 647
641 mem_ptr2 = ioremap(mem_base + THIRD_PAGE_GROUP_START - 648 ahw->pci_base1 = ioremap(mem_base, SECOND_PAGE_GROUP_SIZE);
649 ahw->pci_base2 = ioremap(mem_base + THIRD_PAGE_GROUP_START -
642 SECOND_PAGE_GROUP_START, THIRD_PAGE_GROUP_SIZE); 650 SECOND_PAGE_GROUP_START, THIRD_PAGE_GROUP_SIZE);
651 if (ahw->pci_base1 == NULL || ahw->pci_base2 == NULL) {
652 dev_err(&pdev->dev, "failed to map PCI bar 0\n");
653 err = -EIO;
654 goto err_out;
655 }
656
643 } else if (mem_len == NETXEN_PCI_2MB_SIZE) { 657 } else if (mem_len == NETXEN_PCI_2MB_SIZE) {
644 658
645 mem_ptr0 = pci_ioremap_bar(pdev, 0); 659 ahw->pci_base0 = pci_ioremap_bar(pdev, 0);
646 if (mem_ptr0 == NULL) { 660 if (ahw->pci_base0 == NULL) {
647 dev_err(&pdev->dev, "failed to map PCI bar 0\n"); 661 dev_err(&pdev->dev, "failed to map PCI bar 0\n");
648 return -EIO; 662 return -EIO;
649 } 663 }
650 pci_len0 = mem_len; 664 ahw->pci_len0 = mem_len;
651 } else { 665 } else {
652 return -EIO; 666 return -EIO;
653 } 667 }
@@ -656,11 +670,6 @@ netxen_setup_pci_map(struct netxen_adapter *adapter)
656 670
657 dev_info(&pdev->dev, "%dMB memory map\n", (int)(mem_len>>20)); 671 dev_info(&pdev->dev, "%dMB memory map\n", (int)(mem_len>>20));
658 672
659 adapter->ahw.pci_base0 = mem_ptr0;
660 adapter->ahw.pci_len0 = pci_len0;
661 adapter->ahw.pci_base1 = mem_ptr1;
662 adapter->ahw.pci_base2 = mem_ptr2;
663
664 if (NX_IS_REVISION_P3P(adapter->ahw.revision_id)) { 673 if (NX_IS_REVISION_P3P(adapter->ahw.revision_id)) {
665 adapter->ahw.ocm_win_crb = netxen_get_ioaddr(adapter, 674 adapter->ahw.ocm_win_crb = netxen_get_ioaddr(adapter,
666 NETXEN_PCIX_PS_REG(PCIX_OCM_WINDOW_REG(pci_func))); 675 NETXEN_PCIX_PS_REG(PCIX_OCM_WINDOW_REG(pci_func)));
@@ -1253,8 +1262,8 @@ netxen_nic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
1253 int pci_func_id = PCI_FUNC(pdev->devfn); 1262 int pci_func_id = PCI_FUNC(pdev->devfn);
1254 uint8_t revision_id; 1263 uint8_t revision_id;
1255 1264
1256 if (pdev->revision >= NX_P3_A0 && pdev->revision < NX_P3_B1) { 1265 if (pdev->revision >= NX_P3_A0 && pdev->revision <= NX_P3_B1) {
1257 pr_warning("%s: chip revisions between 0x%x-0x%x" 1266 pr_warning("%s: chip revisions between 0x%x-0x%x "
1258 "will not be enabled.\n", 1267 "will not be enabled.\n",
1259 module_name(THIS_MODULE), NX_P3_A0, NX_P3_B1); 1268 module_name(THIS_MODULE), NX_P3_A0, NX_P3_B1);
1260 return -ENODEV; 1269 return -ENODEV;
diff --git a/drivers/net/pcmcia/pcnet_cs.c b/drivers/net/pcmcia/pcnet_cs.c
index 776cad2f5715..1028fcb91a28 100644
--- a/drivers/net/pcmcia/pcnet_cs.c
+++ b/drivers/net/pcmcia/pcnet_cs.c
@@ -1549,6 +1549,7 @@ static struct pcmcia_device_id pcnet_ids[] = {
1549 PCMCIA_PFC_DEVICE_MANF_CARD(0, 0x021b, 0x0101), 1549 PCMCIA_PFC_DEVICE_MANF_CARD(0, 0x021b, 0x0101),
1550 PCMCIA_PFC_DEVICE_MANF_CARD(0, 0x08a1, 0xc0ab), 1550 PCMCIA_PFC_DEVICE_MANF_CARD(0, 0x08a1, 0xc0ab),
1551 PCMCIA_PFC_DEVICE_PROD_ID12(0, "AnyCom", "Fast Ethernet + 56K COMBO", 0x578ba6e7, 0xb0ac62c4), 1551 PCMCIA_PFC_DEVICE_PROD_ID12(0, "AnyCom", "Fast Ethernet + 56K COMBO", 0x578ba6e7, 0xb0ac62c4),
1552 PCMCIA_PFC_DEVICE_PROD_ID12(0, "ATKK", "LM33-PCM-T", 0xba9eb7e2, 0x077c174e),
1552 PCMCIA_PFC_DEVICE_PROD_ID12(0, "D-Link", "DME336T", 0x1a424a1c, 0xb23897ff), 1553 PCMCIA_PFC_DEVICE_PROD_ID12(0, "D-Link", "DME336T", 0x1a424a1c, 0xb23897ff),
1553 PCMCIA_PFC_DEVICE_PROD_ID12(0, "Grey Cell", "GCS3000", 0x2a151fac, 0x48b932ae), 1554 PCMCIA_PFC_DEVICE_PROD_ID12(0, "Grey Cell", "GCS3000", 0x2a151fac, 0x48b932ae),
1554 PCMCIA_PFC_DEVICE_PROD_ID12(0, "Linksys", "EtherFast 10&100 + 56K PC Card (PCMLM56)", 0x0733cc81, 0xb3765033), 1555 PCMCIA_PFC_DEVICE_PROD_ID12(0, "Linksys", "EtherFast 10&100 + 56K PC Card (PCMLM56)", 0x0733cc81, 0xb3765033),
@@ -1740,7 +1741,7 @@ static struct pcmcia_device_id pcnet_ids[] = {
1740 PCMCIA_MFC_DEVICE_CIS_PROD_ID12(0, "DAYNA COMMUNICATIONS", "LAN AND MODEM MULTIFUNCTION", 0x8fdf8f89, 0xdd5ed9e8, "cis/DP83903.cis"), 1741 PCMCIA_MFC_DEVICE_CIS_PROD_ID12(0, "DAYNA COMMUNICATIONS", "LAN AND MODEM MULTIFUNCTION", 0x8fdf8f89, 0xdd5ed9e8, "cis/DP83903.cis"),
1741 PCMCIA_MFC_DEVICE_CIS_PROD_ID4(0, "NSC MF LAN/Modem", 0x58fc6056, "cis/DP83903.cis"), 1742 PCMCIA_MFC_DEVICE_CIS_PROD_ID4(0, "NSC MF LAN/Modem", 0x58fc6056, "cis/DP83903.cis"),
1742 PCMCIA_MFC_DEVICE_CIS_MANF_CARD(0, 0x0175, 0x0000, "cis/DP83903.cis"), 1743 PCMCIA_MFC_DEVICE_CIS_MANF_CARD(0, 0x0175, 0x0000, "cis/DP83903.cis"),
1743 PCMCIA_DEVICE_CIS_MANF_CARD(0xc00f, 0x0002, "cis/LA-PCM.cis"), 1744 PCMCIA_DEVICE_CIS_PROD_ID12("Allied Telesis,K.K", "Ethernet LAN Card", 0x2ad62f3c, 0x9fd2f0a2, "cis/LA-PCM.cis"),
1744 PCMCIA_DEVICE_CIS_PROD_ID12("KTI", "PE520 PLUS", 0xad180345, 0x9d58d392, "cis/PE520.cis"), 1745 PCMCIA_DEVICE_CIS_PROD_ID12("KTI", "PE520 PLUS", 0xad180345, 0x9d58d392, "cis/PE520.cis"),
1745 PCMCIA_DEVICE_CIS_PROD_ID12("NDC", "Ethernet", 0x01c43ae1, 0x00b2e941, "cis/NE2K.cis"), 1746 PCMCIA_DEVICE_CIS_PROD_ID12("NDC", "Ethernet", 0x01c43ae1, 0x00b2e941, "cis/NE2K.cis"),
1746 PCMCIA_DEVICE_CIS_PROD_ID12("PMX ", "PE-200", 0x34f3f1c8, 0x10b59f8c, "cis/PE-200.cis"), 1747 PCMCIA_DEVICE_CIS_PROD_ID12("PMX ", "PE-200", 0x34f3f1c8, 0x10b59f8c, "cis/PE-200.cis"),
diff --git a/drivers/net/pcmcia/smc91c92_cs.c b/drivers/net/pcmcia/smc91c92_cs.c
index f45c626003a4..ad2267646187 100644
--- a/drivers/net/pcmcia/smc91c92_cs.c
+++ b/drivers/net/pcmcia/smc91c92_cs.c
@@ -493,13 +493,14 @@ static int pcmcia_get_versmac(struct pcmcia_device *p_dev,
493{ 493{
494 struct net_device *dev = priv; 494 struct net_device *dev = priv;
495 cisparse_t parse; 495 cisparse_t parse;
496 u8 *buf;
496 497
497 if (pcmcia_parse_tuple(tuple, &parse)) 498 if (pcmcia_parse_tuple(tuple, &parse))
498 return -EINVAL; 499 return -EINVAL;
499 500
500 if ((parse.version_1.ns > 3) && 501 buf = parse.version_1.str + parse.version_1.ofs[3];
501 (cvt_ascii_address(dev, 502
502 (parse.version_1.str + parse.version_1.ofs[3])))) 503 if ((parse.version_1.ns > 3) && (cvt_ascii_address(dev, buf) == 0))
503 return 0; 504 return 0;
504 505
505 return -EINVAL; 506 return -EINVAL;
@@ -528,7 +529,7 @@ static int mhz_setup(struct pcmcia_device *link)
528 len = pcmcia_get_tuple(link, 0x81, &buf); 529 len = pcmcia_get_tuple(link, 0x81, &buf);
529 if (buf && len >= 13) { 530 if (buf && len >= 13) {
530 buf[12] = '\0'; 531 buf[12] = '\0';
531 if (cvt_ascii_address(dev, buf)) 532 if (cvt_ascii_address(dev, buf) == 0)
532 rc = 0; 533 rc = 0;
533 } 534 }
534 kfree(buf); 535 kfree(buf);
@@ -910,7 +911,7 @@ static int smc91c92_config(struct pcmcia_device *link)
910 911
911 if (i != 0) { 912 if (i != 0) {
912 printk(KERN_NOTICE "smc91c92_cs: Unable to find hardware address.\n"); 913 printk(KERN_NOTICE "smc91c92_cs: Unable to find hardware address.\n");
913 goto config_undo; 914 goto config_failed;
914 } 915 }
915 916
916 smc->duplex = 0; 917 smc->duplex = 0;
@@ -998,6 +999,7 @@ config_undo:
998 unregister_netdev(dev); 999 unregister_netdev(dev);
999config_failed: 1000config_failed:
1000 smc91c92_release(link); 1001 smc91c92_release(link);
1002 free_netdev(dev);
1001 return -ENODEV; 1003 return -ENODEV;
1002} /* smc91c92_config */ 1004} /* smc91c92_config */
1003 1005
diff --git a/drivers/net/r8169.c b/drivers/net/r8169.c
index 64cd250f642d..340da3915b96 100644
--- a/drivers/net/r8169.c
+++ b/drivers/net/r8169.c
@@ -187,8 +187,13 @@ static DEFINE_PCI_DEVICE_TABLE(rtl8169_pci_tbl) = {
187 187
188MODULE_DEVICE_TABLE(pci, rtl8169_pci_tbl); 188MODULE_DEVICE_TABLE(pci, rtl8169_pci_tbl);
189 189
190static int rx_copybreak = 200; 190/*
191static int use_dac = -1; 191 * we set our copybreak very high so that we don't have
192 * to allocate 16k frames all the time (see note in
193 * rtl8169_open()
194 */
195static int rx_copybreak = 16383;
196static int use_dac;
192static struct { 197static struct {
193 u32 msg_enable; 198 u32 msg_enable;
194} debug = { -1 }; 199} debug = { -1 };
@@ -513,8 +518,7 @@ MODULE_DESCRIPTION("RealTek RTL-8169 Gigabit Ethernet driver");
513module_param(rx_copybreak, int, 0); 518module_param(rx_copybreak, int, 0);
514MODULE_PARM_DESC(rx_copybreak, "Copy breakpoint for copy-only-tiny-frames"); 519MODULE_PARM_DESC(rx_copybreak, "Copy breakpoint for copy-only-tiny-frames");
515module_param(use_dac, int, 0); 520module_param(use_dac, int, 0);
516MODULE_PARM_DESC(use_dac, "Enable PCI DAC. -1 defaults on for PCI Express only." 521MODULE_PARM_DESC(use_dac, "Enable PCI DAC. Unsafe on 32 bit PCI slot.");
517" Unsafe on 32 bit PCI slot.");
518module_param_named(debug, debug.msg_enable, int, 0); 522module_param_named(debug, debug.msg_enable, int, 0);
519MODULE_PARM_DESC(debug, "Debug verbosity level (0=none, ..., 16=all)"); 523MODULE_PARM_DESC(debug, "Debug verbosity level (0=none, ..., 16=all)");
520MODULE_LICENSE("GPL"); 524MODULE_LICENSE("GPL");
@@ -2837,8 +2841,8 @@ static void rtl_rar_set(struct rtl8169_private *tp, u8 *addr)
2837 spin_lock_irq(&tp->lock); 2841 spin_lock_irq(&tp->lock);
2838 2842
2839 RTL_W8(Cfg9346, Cfg9346_Unlock); 2843 RTL_W8(Cfg9346, Cfg9346_Unlock);
2840 RTL_W32(MAC0, low);
2841 RTL_W32(MAC4, high); 2844 RTL_W32(MAC4, high);
2845 RTL_W32(MAC0, low);
2842 RTL_W8(Cfg9346, Cfg9346_Lock); 2846 RTL_W8(Cfg9346, Cfg9346_Lock);
2843 2847
2844 spin_unlock_irq(&tp->lock); 2848 spin_unlock_irq(&tp->lock);
@@ -2990,7 +2994,6 @@ rtl8169_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
2990 void __iomem *ioaddr; 2994 void __iomem *ioaddr;
2991 unsigned int i; 2995 unsigned int i;
2992 int rc; 2996 int rc;
2993 int this_use_dac = use_dac;
2994 2997
2995 if (netif_msg_drv(&debug)) { 2998 if (netif_msg_drv(&debug)) {
2996 printk(KERN_INFO "%s Gigabit Ethernet driver %s loaded\n", 2999 printk(KERN_INFO "%s Gigabit Ethernet driver %s loaded\n",
@@ -3056,17 +3059,8 @@ rtl8169_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
3056 3059
3057 tp->cp_cmd = PCIMulRW | RxChkSum; 3060 tp->cp_cmd = PCIMulRW | RxChkSum;
3058 3061
3059 tp->pcie_cap = pci_find_capability(pdev, PCI_CAP_ID_EXP);
3060 if (!tp->pcie_cap)
3061 netif_info(tp, probe, dev, "no PCI Express capability\n");
3062
3063 if (this_use_dac < 0)
3064 this_use_dac = tp->pcie_cap != 0;
3065
3066 if ((sizeof(dma_addr_t) > 4) && 3062 if ((sizeof(dma_addr_t) > 4) &&
3067 this_use_dac && 3063 !pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) && use_dac) {
3068 !pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) {
3069 netif_info(tp, probe, dev, "using 64-bit DMA\n");
3070 tp->cp_cmd |= PCIDAC; 3064 tp->cp_cmd |= PCIDAC;
3071 dev->features |= NETIF_F_HIGHDMA; 3065 dev->features |= NETIF_F_HIGHDMA;
3072 } else { 3066 } else {
@@ -3085,6 +3079,10 @@ rtl8169_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
3085 goto err_out_free_res_4; 3079 goto err_out_free_res_4;
3086 } 3080 }
3087 3081
3082 tp->pcie_cap = pci_find_capability(pdev, PCI_CAP_ID_EXP);
3083 if (!tp->pcie_cap)
3084 netif_info(tp, probe, dev, "no PCI Express capability\n");
3085
3088 RTL_W16(IntrMask, 0x0000); 3086 RTL_W16(IntrMask, 0x0000);
3089 3087
3090 /* Soft reset the chip. */ 3088 /* Soft reset the chip. */
@@ -3254,9 +3252,13 @@ static void __devexit rtl8169_remove_one(struct pci_dev *pdev)
3254} 3252}
3255 3253
3256static void rtl8169_set_rxbufsize(struct rtl8169_private *tp, 3254static void rtl8169_set_rxbufsize(struct rtl8169_private *tp,
3257 struct net_device *dev) 3255 unsigned int mtu)
3258{ 3256{
3259 unsigned int max_frame = dev->mtu + VLAN_ETH_HLEN + ETH_FCS_LEN; 3257 unsigned int max_frame = mtu + VLAN_ETH_HLEN + ETH_FCS_LEN;
3258
3259 if (max_frame != 16383)
3260 printk(KERN_WARNING PFX "WARNING! Changing of MTU on this "
3261 "NIC may lead to frame reception errors!\n");
3260 3262
3261 tp->rx_buf_sz = (max_frame > RX_BUF_SIZE) ? max_frame : RX_BUF_SIZE; 3263 tp->rx_buf_sz = (max_frame > RX_BUF_SIZE) ? max_frame : RX_BUF_SIZE;
3262} 3264}
@@ -3269,7 +3271,17 @@ static int rtl8169_open(struct net_device *dev)
3269 3271
3270 pm_runtime_get_sync(&pdev->dev); 3272 pm_runtime_get_sync(&pdev->dev);
3271 3273
3272 rtl8169_set_rxbufsize(tp, dev); 3274 /*
3275 * Note that we use a magic value here, its wierd I know
3276 * its done because, some subset of rtl8169 hardware suffers from
3277 * a problem in which frames received that are longer than
3278 * the size set in RxMaxSize register return garbage sizes
3279 * when received. To avoid this we need to turn off filtering,
3280 * which is done by setting a value of 16383 in the RxMaxSize register
3281 * and allocating 16k frames to handle the largest possible rx value
3282 * thats what the magic math below does.
3283 */
3284 rtl8169_set_rxbufsize(tp, 16383 - VLAN_ETH_HLEN - ETH_FCS_LEN);
3273 3285
3274 /* 3286 /*
3275 * Rx and Tx desscriptors needs 256 bytes alignment. 3287 * Rx and Tx desscriptors needs 256 bytes alignment.
@@ -3929,7 +3941,7 @@ static int rtl8169_change_mtu(struct net_device *dev, int new_mtu)
3929 3941
3930 rtl8169_down(dev); 3942 rtl8169_down(dev);
3931 3943
3932 rtl8169_set_rxbufsize(tp, dev); 3944 rtl8169_set_rxbufsize(tp, dev->mtu);
3933 3945
3934 ret = rtl8169_init_ring(dev); 3946 ret = rtl8169_init_ring(dev);
3935 if (ret < 0) 3947 if (ret < 0)
@@ -4808,8 +4820,8 @@ static void rtl_set_rx_mode(struct net_device *dev)
4808 mc_filter[1] = swab32(data); 4820 mc_filter[1] = swab32(data);
4809 } 4821 }
4810 4822
4811 RTL_W32(MAR0 + 0, mc_filter[0]);
4812 RTL_W32(MAR0 + 4, mc_filter[1]); 4823 RTL_W32(MAR0 + 4, mc_filter[1]);
4824 RTL_W32(MAR0 + 0, mc_filter[0]);
4813 4825
4814 RTL_W32(RxConfig, tmp); 4826 RTL_W32(RxConfig, tmp);
4815 4827
diff --git a/drivers/net/sgiseeq.c b/drivers/net/sgiseeq.c
index ed999d31f1fa..37f6a00cde17 100644
--- a/drivers/net/sgiseeq.c
+++ b/drivers/net/sgiseeq.c
@@ -592,8 +592,10 @@ static int sgiseeq_start_xmit(struct sk_buff *skb, struct net_device *dev)
592 /* Setup... */ 592 /* Setup... */
593 len = skb->len; 593 len = skb->len;
594 if (len < ETH_ZLEN) { 594 if (len < ETH_ZLEN) {
595 if (skb_padto(skb, ETH_ZLEN)) 595 if (skb_padto(skb, ETH_ZLEN)) {
596 spin_unlock_irqrestore(&sp->tx_lock, flags);
596 return NETDEV_TX_OK; 597 return NETDEV_TX_OK;
598 }
597 len = ETH_ZLEN; 599 len = ETH_ZLEN;
598 } 600 }
599 601
diff --git a/drivers/net/stmmac/Kconfig b/drivers/net/stmmac/Kconfig
index fb287649a305..eb63d44748a7 100644
--- a/drivers/net/stmmac/Kconfig
+++ b/drivers/net/stmmac/Kconfig
@@ -2,6 +2,7 @@ config STMMAC_ETH
2 tristate "STMicroelectronics 10/100/1000 Ethernet driver" 2 tristate "STMicroelectronics 10/100/1000 Ethernet driver"
3 select MII 3 select MII
4 select PHYLIB 4 select PHYLIB
5 select CRC32
5 depends on NETDEVICES && CPU_SUBTYPE_ST40 6 depends on NETDEVICES && CPU_SUBTYPE_ST40
6 help 7 help
7 This is the driver for the Ethernet IPs are built around a 8 This is the driver for the Ethernet IPs are built around a
diff --git a/drivers/net/tulip/uli526x.c b/drivers/net/tulip/uli526x.c
index b79d908fe34e..7063f56640c3 100644
--- a/drivers/net/tulip/uli526x.c
+++ b/drivers/net/tulip/uli526x.c
@@ -851,13 +851,15 @@ static void uli526x_rx_packet(struct net_device *dev, struct uli526x_board_info
851 851
852 if ( !(rdes0 & 0x8000) || 852 if ( !(rdes0 & 0x8000) ||
853 ((db->cr6_data & CR6_PM) && (rxlen>6)) ) { 853 ((db->cr6_data & CR6_PM) && (rxlen>6)) ) {
854 struct sk_buff *new_skb = NULL;
855
854 skb = rxptr->rx_skb_ptr; 856 skb = rxptr->rx_skb_ptr;
855 857
856 /* Good packet, send to upper layer */ 858 /* Good packet, send to upper layer */
857 /* Shorst packet used new SKB */ 859 /* Shorst packet used new SKB */
858 if ( (rxlen < RX_COPY_SIZE) && 860 if ((rxlen < RX_COPY_SIZE) &&
859 ( (skb = dev_alloc_skb(rxlen + 2) ) 861 (((new_skb = dev_alloc_skb(rxlen + 2)) != NULL))) {
860 != NULL) ) { 862 skb = new_skb;
861 /* size less than COPY_SIZE, allocate a rxlen SKB */ 863 /* size less than COPY_SIZE, allocate a rxlen SKB */
862 skb_reserve(skb, 2); /* 16byte align */ 864 skb_reserve(skb, 2); /* 16byte align */
863 memcpy(skb_put(skb, rxlen), 865 memcpy(skb_put(skb, rxlen),
diff --git a/drivers/net/via-velocity.c b/drivers/net/via-velocity.c
index 078903f10f02..616f8c92b745 100644
--- a/drivers/net/via-velocity.c
+++ b/drivers/net/via-velocity.c
@@ -812,7 +812,7 @@ static void set_mii_flow_control(struct velocity_info *vptr)
812 812
813 case FLOW_CNTL_TX_RX: 813 case FLOW_CNTL_TX_RX:
814 MII_REG_BITS_ON(ADVERTISE_PAUSE_CAP, MII_ADVERTISE, vptr->mac_regs); 814 MII_REG_BITS_ON(ADVERTISE_PAUSE_CAP, MII_ADVERTISE, vptr->mac_regs);
815 MII_REG_BITS_ON(ADVERTISE_PAUSE_ASYM, MII_ADVERTISE, vptr->mac_regs); 815 MII_REG_BITS_OFF(ADVERTISE_PAUSE_ASYM, MII_ADVERTISE, vptr->mac_regs);
816 break; 816 break;
817 817
818 case FLOW_CNTL_DISABLE: 818 case FLOW_CNTL_DISABLE:
diff --git a/drivers/net/wireless/ath/ar9170/usb.c b/drivers/net/wireless/ath/ar9170/usb.c
index 4e30197afff6..6b1cb706e410 100644
--- a/drivers/net/wireless/ath/ar9170/usb.c
+++ b/drivers/net/wireless/ath/ar9170/usb.c
@@ -94,6 +94,8 @@ static struct usb_device_id ar9170_usb_ids[] = {
94 { USB_DEVICE(0x04bb, 0x093f) }, 94 { USB_DEVICE(0x04bb, 0x093f) },
95 /* AVM FRITZ!WLAN USB Stick N */ 95 /* AVM FRITZ!WLAN USB Stick N */
96 { USB_DEVICE(0x057C, 0x8401) }, 96 { USB_DEVICE(0x057C, 0x8401) },
97 /* NEC WL300NU-G */
98 { USB_DEVICE(0x0409, 0x0249) },
97 /* AVM FRITZ!WLAN USB Stick N 2.4 */ 99 /* AVM FRITZ!WLAN USB Stick N 2.4 */
98 { USB_DEVICE(0x057C, 0x8402), .driver_info = AR9170_REQ_FW1_ONLY }, 100 { USB_DEVICE(0x057C, 0x8402), .driver_info = AR9170_REQ_FW1_ONLY },
99 101
@@ -416,7 +418,7 @@ static int ar9170_usb_exec_cmd(struct ar9170 *ar, enum ar9170_cmd cmd,
416 spin_unlock_irqrestore(&aru->common.cmdlock, flags); 418 spin_unlock_irqrestore(&aru->common.cmdlock, flags);
417 419
418 usb_fill_int_urb(urb, aru->udev, 420 usb_fill_int_urb(urb, aru->udev,
419 usb_sndbulkpipe(aru->udev, AR9170_EP_CMD), 421 usb_sndintpipe(aru->udev, AR9170_EP_CMD),
420 aru->common.cmdbuf, plen + 4, 422 aru->common.cmdbuf, plen + 4,
421 ar9170_usb_tx_urb_complete, NULL, 1); 423 ar9170_usb_tx_urb_complete, NULL, 1);
422 424
diff --git a/drivers/net/wireless/iwlwifi/iwl-4965.c b/drivers/net/wireless/iwlwifi/iwl-4965.c
index 3949133d9ee2..3297fc7b80bf 100644
--- a/drivers/net/wireless/iwlwifi/iwl-4965.c
+++ b/drivers/net/wireless/iwlwifi/iwl-4965.c
@@ -2047,16 +2047,14 @@ static void iwl4965_rx_reply_tx(struct iwl_priv *priv,
2047 tx_resp->failure_frame); 2047 tx_resp->failure_frame);
2048 2048
2049 freed = iwl_tx_queue_reclaim(priv, txq_id, index); 2049 freed = iwl_tx_queue_reclaim(priv, txq_id, index);
2050 if (qc && likely(sta_id != IWL_INVALID_STATION)) 2050 iwl_free_tfds_in_queue(priv, sta_id, tid, freed);
2051 priv->stations[sta_id].tid[tid].tfds_in_queue -= freed;
2052 2051
2053 if (priv->mac80211_registered && 2052 if (priv->mac80211_registered &&
2054 (iwl_queue_space(&txq->q) > txq->q.low_mark)) 2053 (iwl_queue_space(&txq->q) > txq->q.low_mark))
2055 iwl_wake_queue(priv, txq_id); 2054 iwl_wake_queue(priv, txq_id);
2056 } 2055 }
2057 2056
2058 if (qc && likely(sta_id != IWL_INVALID_STATION)) 2057 iwl_txq_check_empty(priv, sta_id, tid, txq_id);
2059 iwl_txq_check_empty(priv, sta_id, tid, txq_id);
2060 2058
2061 if (iwl_check_bits(status, TX_ABORT_REQUIRED_MSK)) 2059 if (iwl_check_bits(status, TX_ABORT_REQUIRED_MSK))
2062 IWL_ERR(priv, "TODO: Implement Tx ABORT REQUIRED!!!\n"); 2060 IWL_ERR(priv, "TODO: Implement Tx ABORT REQUIRED!!!\n");
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn.c b/drivers/net/wireless/iwlwifi/iwl-agn.c
index 0a376f720d78..f43a45d0f1dd 100644
--- a/drivers/net/wireless/iwlwifi/iwl-agn.c
+++ b/drivers/net/wireless/iwlwifi/iwl-agn.c
@@ -1236,7 +1236,15 @@ static void iwl_irq_tasklet(struct iwl_priv *priv)
1236 /* Ack/clear/reset pending uCode interrupts. 1236 /* Ack/clear/reset pending uCode interrupts.
1237 * Note: Some bits in CSR_INT are "OR" of bits in CSR_FH_INT_STATUS, 1237 * Note: Some bits in CSR_INT are "OR" of bits in CSR_FH_INT_STATUS,
1238 */ 1238 */
1239 iwl_write32(priv, CSR_INT, priv->_agn.inta); 1239 /* There is a hardware bug in the interrupt mask function that some
1240 * interrupts (i.e. CSR_INT_BIT_SCD) can still be generated even if
1241 * they are disabled in the CSR_INT_MASK register. Furthermore the
1242 * ICT interrupt handling mechanism has another bug that might cause
1243 * these unmasked interrupts fail to be detected. We workaround the
1244 * hardware bugs here by ACKing all the possible interrupts so that
1245 * interrupt coalescing can still be achieved.
1246 */
1247 iwl_write32(priv, CSR_INT, priv->_agn.inta | ~priv->inta_mask);
1240 1248
1241 inta = priv->_agn.inta; 1249 inta = priv->_agn.inta;
1242 1250
@@ -2611,7 +2619,7 @@ static int iwl_mac_setup_register(struct iwl_priv *priv)
2611 BIT(NL80211_IFTYPE_STATION) | 2619 BIT(NL80211_IFTYPE_STATION) |
2612 BIT(NL80211_IFTYPE_ADHOC); 2620 BIT(NL80211_IFTYPE_ADHOC);
2613 2621
2614 hw->wiphy->flags |= WIPHY_FLAG_STRICT_REGULATORY | 2622 hw->wiphy->flags |= WIPHY_FLAG_CUSTOM_REGULATORY |
2615 WIPHY_FLAG_DISABLE_BEACON_HINTS; 2623 WIPHY_FLAG_DISABLE_BEACON_HINTS;
2616 2624
2617 /* 2625 /*
diff --git a/drivers/net/wireless/iwlwifi/iwl3945-base.c b/drivers/net/wireless/iwlwifi/iwl3945-base.c
index 4995134d7e4a..64f150b19771 100644
--- a/drivers/net/wireless/iwlwifi/iwl3945-base.c
+++ b/drivers/net/wireless/iwlwifi/iwl3945-base.c
@@ -1946,7 +1946,7 @@ static void iwl3945_init_hw_rates(struct iwl_priv *priv,
1946{ 1946{
1947 int i; 1947 int i;
1948 1948
1949 for (i = 0; i < IWL_RATE_COUNT; i++) { 1949 for (i = 0; i < IWL_RATE_COUNT_LEGACY; i++) {
1950 rates[i].bitrate = iwl3945_rates[i].ieee * 5; 1950 rates[i].bitrate = iwl3945_rates[i].ieee * 5;
1951 rates[i].hw_value = i; /* Rate scaling will work on indexes */ 1951 rates[i].hw_value = i; /* Rate scaling will work on indexes */
1952 rates[i].hw_value_short = i; 1952 rates[i].hw_value_short = i;
@@ -3950,7 +3950,7 @@ static int iwl3945_setup_mac(struct iwl_priv *priv)
3950 BIT(NL80211_IFTYPE_STATION) | 3950 BIT(NL80211_IFTYPE_STATION) |
3951 BIT(NL80211_IFTYPE_ADHOC); 3951 BIT(NL80211_IFTYPE_ADHOC);
3952 3952
3953 hw->wiphy->flags |= WIPHY_FLAG_STRICT_REGULATORY | 3953 hw->wiphy->flags |= WIPHY_FLAG_CUSTOM_REGULATORY |
3954 WIPHY_FLAG_DISABLE_BEACON_HINTS; 3954 WIPHY_FLAG_DISABLE_BEACON_HINTS;
3955 3955
3956 hw->wiphy->max_scan_ssids = PROBE_OPTION_MAX_3945; 3956 hw->wiphy->max_scan_ssids = PROBE_OPTION_MAX_3945;
diff --git a/drivers/net/wireless/libertas/cfg.c b/drivers/net/wireless/libertas/cfg.c
index 4396dccd12ac..82ebe1461a77 100644
--- a/drivers/net/wireless/libertas/cfg.c
+++ b/drivers/net/wireless/libertas/cfg.c
@@ -172,6 +172,8 @@ int lbs_cfg_register(struct lbs_private *priv)
172 if (ret < 0) 172 if (ret < 0)
173 lbs_pr_err("cannot register wiphy device\n"); 173 lbs_pr_err("cannot register wiphy device\n");
174 174
175 priv->wiphy_registered = true;
176
175 ret = register_netdev(priv->dev); 177 ret = register_netdev(priv->dev);
176 if (ret) 178 if (ret)
177 lbs_pr_err("cannot register network device\n"); 179 lbs_pr_err("cannot register network device\n");
@@ -190,9 +192,11 @@ void lbs_cfg_free(struct lbs_private *priv)
190 if (!wdev) 192 if (!wdev)
191 return; 193 return;
192 194
193 if (wdev->wiphy) { 195 if (priv->wiphy_registered)
194 wiphy_unregister(wdev->wiphy); 196 wiphy_unregister(wdev->wiphy);
197
198 if (wdev->wiphy)
195 wiphy_free(wdev->wiphy); 199 wiphy_free(wdev->wiphy);
196 } 200
197 kfree(wdev); 201 kfree(wdev);
198} 202}
diff --git a/drivers/net/wireless/libertas/dev.h b/drivers/net/wireless/libertas/dev.h
index 058d1720242e..a54880e4ad2b 100644
--- a/drivers/net/wireless/libertas/dev.h
+++ b/drivers/net/wireless/libertas/dev.h
@@ -36,6 +36,7 @@ struct lbs_private {
36 36
37 /* CFG80211 */ 37 /* CFG80211 */
38 struct wireless_dev *wdev; 38 struct wireless_dev *wdev;
39 bool wiphy_registered;
39 40
40 /* Mesh */ 41 /* Mesh */
41 struct net_device *mesh_dev; /* Virtual device */ 42 struct net_device *mesh_dev; /* Virtual device */
diff --git a/drivers/net/wireless/mwl8k.c b/drivers/net/wireless/mwl8k.c
index 6599fd15e675..1b5d0aebbb0e 100644
--- a/drivers/net/wireless/mwl8k.c
+++ b/drivers/net/wireless/mwl8k.c
@@ -3851,6 +3851,7 @@ MODULE_FIRMWARE("mwl8k/helper_8366.fw");
3851MODULE_FIRMWARE("mwl8k/fmimage_8366.fw"); 3851MODULE_FIRMWARE("mwl8k/fmimage_8366.fw");
3852 3852
3853static DEFINE_PCI_DEVICE_TABLE(mwl8k_pci_id_table) = { 3853static DEFINE_PCI_DEVICE_TABLE(mwl8k_pci_id_table) = {
3854 { PCI_VDEVICE(MARVELL, 0x2a0a), .driver_data = MWL8363, },
3854 { PCI_VDEVICE(MARVELL, 0x2a0c), .driver_data = MWL8363, }, 3855 { PCI_VDEVICE(MARVELL, 0x2a0c), .driver_data = MWL8363, },
3855 { PCI_VDEVICE(MARVELL, 0x2a24), .driver_data = MWL8363, }, 3856 { PCI_VDEVICE(MARVELL, 0x2a24), .driver_data = MWL8363, },
3856 { PCI_VDEVICE(MARVELL, 0x2a2b), .driver_data = MWL8687, }, 3857 { PCI_VDEVICE(MARVELL, 0x2a2b), .driver_data = MWL8687, },
diff --git a/drivers/net/wireless/p54/p54usb.c b/drivers/net/wireless/p54/p54usb.c
index b3c4fbd80d8d..e3cfc001d2fd 100644
--- a/drivers/net/wireless/p54/p54usb.c
+++ b/drivers/net/wireless/p54/p54usb.c
@@ -35,6 +35,7 @@ MODULE_FIRMWARE("isl3887usb");
35static struct usb_device_id p54u_table[] __devinitdata = { 35static struct usb_device_id p54u_table[] __devinitdata = {
36 /* Version 1 devices (pci chip + net2280) */ 36 /* Version 1 devices (pci chip + net2280) */
37 {USB_DEVICE(0x0506, 0x0a11)}, /* 3COM 3CRWE254G72 */ 37 {USB_DEVICE(0x0506, 0x0a11)}, /* 3COM 3CRWE254G72 */
38 {USB_DEVICE(0x06b9, 0x0120)}, /* Thomson SpeedTouch 120g */
38 {USB_DEVICE(0x0707, 0xee06)}, /* SMC 2862W-G */ 39 {USB_DEVICE(0x0707, 0xee06)}, /* SMC 2862W-G */
39 {USB_DEVICE(0x07aa, 0x001c)}, /* Corega CG-WLUSB2GT */ 40 {USB_DEVICE(0x07aa, 0x001c)}, /* Corega CG-WLUSB2GT */
40 {USB_DEVICE(0x083a, 0x4501)}, /* Accton 802.11g WN4501 USB */ 41 {USB_DEVICE(0x083a, 0x4501)}, /* Accton 802.11g WN4501 USB */
diff --git a/drivers/net/wireless/rt2x00/rt2500usb.c b/drivers/net/wireless/rt2x00/rt2500usb.c
index 9b04964deced..13444b6b3e37 100644
--- a/drivers/net/wireless/rt2x00/rt2500usb.c
+++ b/drivers/net/wireless/rt2x00/rt2500usb.c
@@ -1643,6 +1643,11 @@ static int rt2500usb_probe_hw_mode(struct rt2x00_dev *rt2x00dev)
1643 unsigned int i; 1643 unsigned int i;
1644 1644
1645 /* 1645 /*
1646 * Disable powersaving as default.
1647 */
1648 rt2x00dev->hw->wiphy->flags &= ~WIPHY_FLAG_PS_ON_BY_DEFAULT;
1649
1650 /*
1646 * Initialize all hw fields. 1651 * Initialize all hw fields.
1647 */ 1652 */
1648 rt2x00dev->hw->flags = 1653 rt2x00dev->hw->flags =
diff --git a/drivers/net/wireless/rt2x00/rt2800lib.c b/drivers/net/wireless/rt2x00/rt2800lib.c
index 18d4d8e4ae6b..326fce78489d 100644
--- a/drivers/net/wireless/rt2x00/rt2800lib.c
+++ b/drivers/net/wireless/rt2x00/rt2800lib.c
@@ -812,9 +812,9 @@ static void rt2800_config_channel_rt3x(struct rt2x00_dev *rt2x00dev,
812 rt2800_rfcsr_write(rt2x00dev, 24, 812 rt2800_rfcsr_write(rt2x00dev, 24,
813 rt2x00dev->calibration[conf_is_ht40(conf)]); 813 rt2x00dev->calibration[conf_is_ht40(conf)]);
814 814
815 rt2800_rfcsr_read(rt2x00dev, 23, &rfcsr); 815 rt2800_rfcsr_read(rt2x00dev, 7, &rfcsr);
816 rt2x00_set_field8(&rfcsr, RFCSR7_RF_TUNING, 1); 816 rt2x00_set_field8(&rfcsr, RFCSR7_RF_TUNING, 1);
817 rt2800_rfcsr_write(rt2x00dev, 23, rfcsr); 817 rt2800_rfcsr_write(rt2x00dev, 7, rfcsr);
818} 818}
819 819
820static void rt2800_config_channel(struct rt2x00_dev *rt2x00dev, 820static void rt2800_config_channel(struct rt2x00_dev *rt2x00dev,
diff --git a/drivers/pci/Makefile b/drivers/pci/Makefile
index 3d102dd87c9f..0b51857fbaf7 100644
--- a/drivers/pci/Makefile
+++ b/drivers/pci/Makefile
@@ -48,6 +48,7 @@ obj-$(CONFIG_PPC) += setup-bus.o
48obj-$(CONFIG_MIPS) += setup-bus.o setup-irq.o 48obj-$(CONFIG_MIPS) += setup-bus.o setup-irq.o
49obj-$(CONFIG_X86_VISWS) += setup-irq.o 49obj-$(CONFIG_X86_VISWS) += setup-irq.o
50obj-$(CONFIG_MN10300) += setup-bus.o 50obj-$(CONFIG_MN10300) += setup-bus.o
51obj-$(CONFIG_MICROBLAZE) += setup-bus.o
51 52
52# 53#
53# ACPI Related PCI FW Functions 54# ACPI Related PCI FW Functions
diff --git a/drivers/pci/pci-sysfs.c b/drivers/pci/pci-sysfs.c
index de296452c957..997668558e79 100644
--- a/drivers/pci/pci-sysfs.c
+++ b/drivers/pci/pci-sysfs.c
@@ -655,8 +655,8 @@ void pci_create_legacy_files(struct pci_bus *b)
655 goto legacy_io_err; 655 goto legacy_io_err;
656 656
657 /* Allocated above after the legacy_io struct */ 657 /* Allocated above after the legacy_io struct */
658 sysfs_bin_attr_init(b->legacy_mem);
659 b->legacy_mem = b->legacy_io + 1; 658 b->legacy_mem = b->legacy_io + 1;
659 sysfs_bin_attr_init(b->legacy_mem);
660 b->legacy_mem->attr.name = "legacy_mem"; 660 b->legacy_mem->attr.name = "legacy_mem";
661 b->legacy_mem->size = 1024*1024; 661 b->legacy_mem->size = 1024*1024;
662 b->legacy_mem->attr.mode = S_IRUSR | S_IWUSR; 662 b->legacy_mem->attr.mode = S_IRUSR | S_IWUSR;
diff --git a/drivers/pcmcia/i82092.c b/drivers/pcmcia/i82092.c
index a04f21c8170f..f5da62653313 100644
--- a/drivers/pcmcia/i82092.c
+++ b/drivers/pcmcia/i82092.c
@@ -133,6 +133,7 @@ static int __devinit i82092aa_pci_probe(struct pci_dev *dev, const struct pci_de
133 sockets[i].socket.map_size = 0x1000; 133 sockets[i].socket.map_size = 0x1000;
134 sockets[i].socket.irq_mask = 0; 134 sockets[i].socket.irq_mask = 0;
135 sockets[i].socket.pci_irq = dev->irq; 135 sockets[i].socket.pci_irq = dev->irq;
136 sockets[i].socket.cb_dev = dev;
136 sockets[i].socket.owner = THIS_MODULE; 137 sockets[i].socket.owner = THIS_MODULE;
137 138
138 sockets[i].number = i; 139 sockets[i].number = i;
diff --git a/drivers/pcmcia/i82365.h b/drivers/pcmcia/i82365.h
index 849ef1b5d687..3f84d7a2dc84 100644
--- a/drivers/pcmcia/i82365.h
+++ b/drivers/pcmcia/i82365.h
@@ -95,6 +95,7 @@
95#define I365_CSC_DETECT 0x08 95#define I365_CSC_DETECT 0x08
96#define I365_CSC_ANY 0x0F 96#define I365_CSC_ANY 0x0F
97#define I365_CSC_GPI 0x10 97#define I365_CSC_GPI 0x10
98#define I365_CSC_IRQ_MASK 0xF0
98 99
99/* Flags for I365_ADDRWIN */ 100/* Flags for I365_ADDRWIN */
100#define I365_ENA_IO(map) (0x40 << (map)) 101#define I365_ENA_IO(map) (0x40 << (map))
diff --git a/drivers/pcmcia/pcmcia_resource.c b/drivers/pcmcia/pcmcia_resource.c
index b2df04199a21..c4612c52e4cb 100644
--- a/drivers/pcmcia/pcmcia_resource.c
+++ b/drivers/pcmcia/pcmcia_resource.c
@@ -256,6 +256,7 @@ int pcmcia_modify_configuration(struct pcmcia_device *p_dev,
256{ 256{
257 struct pcmcia_socket *s; 257 struct pcmcia_socket *s;
258 config_t *c; 258 config_t *c;
259 int ret;
259 260
260 s = p_dev->socket; 261 s = p_dev->socket;
261 262
@@ -264,13 +265,13 @@ int pcmcia_modify_configuration(struct pcmcia_device *p_dev,
264 265
265 if (!(s->state & SOCKET_PRESENT)) { 266 if (!(s->state & SOCKET_PRESENT)) {
266 dev_dbg(&s->dev, "No card present\n"); 267 dev_dbg(&s->dev, "No card present\n");
267 mutex_unlock(&s->ops_mutex); 268 ret = -ENODEV;
268 return -ENODEV; 269 goto unlock;
269 } 270 }
270 if (!(c->state & CONFIG_LOCKED)) { 271 if (!(c->state & CONFIG_LOCKED)) {
271 dev_dbg(&s->dev, "Configuration isnt't locked\n"); 272 dev_dbg(&s->dev, "Configuration isnt't locked\n");
272 mutex_unlock(&s->ops_mutex); 273 ret = -EACCES;
273 return -EACCES; 274 goto unlock;
274 } 275 }
275 276
276 if (mod->Attributes & CONF_IRQ_CHANGE_VALID) { 277 if (mod->Attributes & CONF_IRQ_CHANGE_VALID) {
@@ -286,7 +287,8 @@ int pcmcia_modify_configuration(struct pcmcia_device *p_dev,
286 287
287 if (mod->Attributes & CONF_VCC_CHANGE_VALID) { 288 if (mod->Attributes & CONF_VCC_CHANGE_VALID) {
288 dev_dbg(&s->dev, "changing Vcc is not allowed at this time\n"); 289 dev_dbg(&s->dev, "changing Vcc is not allowed at this time\n");
289 return -EINVAL; 290 ret = -EINVAL;
291 goto unlock;
290 } 292 }
291 293
292 /* We only allow changing Vpp1 and Vpp2 to the same value */ 294 /* We only allow changing Vpp1 and Vpp2 to the same value */
@@ -294,21 +296,21 @@ int pcmcia_modify_configuration(struct pcmcia_device *p_dev,
294 (mod->Attributes & CONF_VPP2_CHANGE_VALID)) { 296 (mod->Attributes & CONF_VPP2_CHANGE_VALID)) {
295 if (mod->Vpp1 != mod->Vpp2) { 297 if (mod->Vpp1 != mod->Vpp2) {
296 dev_dbg(&s->dev, "Vpp1 and Vpp2 must be the same\n"); 298 dev_dbg(&s->dev, "Vpp1 and Vpp2 must be the same\n");
297 mutex_unlock(&s->ops_mutex); 299 ret = -EINVAL;
298 return -EINVAL; 300 goto unlock;
299 } 301 }
300 s->socket.Vpp = mod->Vpp1; 302 s->socket.Vpp = mod->Vpp1;
301 if (s->ops->set_socket(s, &s->socket)) { 303 if (s->ops->set_socket(s, &s->socket)) {
302 mutex_unlock(&s->ops_mutex);
303 dev_printk(KERN_WARNING, &s->dev, 304 dev_printk(KERN_WARNING, &s->dev,
304 "Unable to set VPP\n"); 305 "Unable to set VPP\n");
305 return -EIO; 306 ret = -EIO;
307 goto unlock;
306 } 308 }
307 } else if ((mod->Attributes & CONF_VPP1_CHANGE_VALID) || 309 } else if ((mod->Attributes & CONF_VPP1_CHANGE_VALID) ||
308 (mod->Attributes & CONF_VPP2_CHANGE_VALID)) { 310 (mod->Attributes & CONF_VPP2_CHANGE_VALID)) {
309 dev_dbg(&s->dev, "changing Vcc is not allowed at this time\n"); 311 dev_dbg(&s->dev, "changing Vcc is not allowed at this time\n");
310 mutex_unlock(&s->ops_mutex); 312 ret = -EINVAL;
311 return -EINVAL; 313 goto unlock;
312 } 314 }
313 315
314 if (mod->Attributes & CONF_IO_CHANGE_WIDTH) { 316 if (mod->Attributes & CONF_IO_CHANGE_WIDTH) {
@@ -332,9 +334,11 @@ int pcmcia_modify_configuration(struct pcmcia_device *p_dev,
332 s->ops->set_io_map(s, &io_on); 334 s->ops->set_io_map(s, &io_on);
333 } 335 }
334 } 336 }
337 ret = 0;
338unlock:
335 mutex_unlock(&s->ops_mutex); 339 mutex_unlock(&s->ops_mutex);
336 340
337 return 0; 341 return ret;
338} /* modify_configuration */ 342} /* modify_configuration */
339EXPORT_SYMBOL(pcmcia_modify_configuration); 343EXPORT_SYMBOL(pcmcia_modify_configuration);
340 344
@@ -752,14 +756,6 @@ int pcmcia_request_irq(struct pcmcia_device *p_dev, irq_req_t *req)
752 756
753#ifdef CONFIG_PCMCIA_PROBE 757#ifdef CONFIG_PCMCIA_PROBE
754 758
755#ifdef IRQ_NOAUTOEN
756 /* if the underlying IRQ infrastructure allows for it, only allocate
757 * the IRQ, but do not enable it
758 */
759 if (!(req->Handler))
760 type |= IRQ_NOAUTOEN;
761#endif /* IRQ_NOAUTOEN */
762
763 if (s->irq.AssignedIRQ != 0) { 759 if (s->irq.AssignedIRQ != 0) {
764 /* If the interrupt is already assigned, it must be the same */ 760 /* If the interrupt is already assigned, it must be the same */
765 irq = s->irq.AssignedIRQ; 761 irq = s->irq.AssignedIRQ;
diff --git a/drivers/pcmcia/pd6729.c b/drivers/pcmcia/pd6729.c
index 7c204910a777..7ba57a565cd7 100644
--- a/drivers/pcmcia/pd6729.c
+++ b/drivers/pcmcia/pd6729.c
@@ -671,6 +671,7 @@ static int __devinit pd6729_pci_probe(struct pci_dev *dev,
671 socket[i].socket.map_size = 0x1000; 671 socket[i].socket.map_size = 0x1000;
672 socket[i].socket.irq_mask = mask; 672 socket[i].socket.irq_mask = mask;
673 socket[i].socket.pci_irq = dev->irq; 673 socket[i].socket.pci_irq = dev->irq;
674 socket[i].socket.cb_dev = dev;
674 socket[i].socket.owner = THIS_MODULE; 675 socket[i].socket.owner = THIS_MODULE;
675 676
676 socket[i].number = i; 677 socket[i].number = i;
diff --git a/drivers/pcmcia/ti113x.h b/drivers/pcmcia/ti113x.h
index aaa70227bfb0..9ffa97d0b16c 100644
--- a/drivers/pcmcia/ti113x.h
+++ b/drivers/pcmcia/ti113x.h
@@ -296,7 +296,7 @@ static int ti_init(struct yenta_socket *socket)
296 u8 new, reg = exca_readb(socket, I365_INTCTL); 296 u8 new, reg = exca_readb(socket, I365_INTCTL);
297 297
298 new = reg & ~I365_INTR_ENA; 298 new = reg & ~I365_INTR_ENA;
299 if (socket->cb_irq) 299 if (socket->dev->irq)
300 new |= I365_INTR_ENA; 300 new |= I365_INTR_ENA;
301 if (new != reg) 301 if (new != reg)
302 exca_writeb(socket, I365_INTCTL, new); 302 exca_writeb(socket, I365_INTCTL, new);
@@ -316,14 +316,47 @@ static int ti_override(struct yenta_socket *socket)
316 return 0; 316 return 0;
317} 317}
318 318
319static void ti113x_use_isa_irq(struct yenta_socket *socket)
320{
321 int isa_irq = -1;
322 u8 intctl;
323 u32 isa_irq_mask = 0;
324
325 if (!isa_probe)
326 return;
327
328 /* get a free isa int */
329 isa_irq_mask = yenta_probe_irq(socket, isa_interrupts);
330 if (!isa_irq_mask)
331 return; /* no useable isa irq found */
332
333 /* choose highest available */
334 for (; isa_irq_mask; isa_irq++)
335 isa_irq_mask >>= 1;
336 socket->cb_irq = isa_irq;
337
338 exca_writeb(socket, I365_CSCINT, (isa_irq << 4));
339
340 intctl = exca_readb(socket, I365_INTCTL);
341 intctl &= ~(I365_INTR_ENA | I365_IRQ_MASK); /* CSC Enable */
342 exca_writeb(socket, I365_INTCTL, intctl);
343
344 dev_info(&socket->dev->dev,
345 "Yenta TI113x: using isa irq %d for CardBus\n", isa_irq);
346}
347
348
319static int ti113x_override(struct yenta_socket *socket) 349static int ti113x_override(struct yenta_socket *socket)
320{ 350{
321 u8 cardctl; 351 u8 cardctl;
322 352
323 cardctl = config_readb(socket, TI113X_CARD_CONTROL); 353 cardctl = config_readb(socket, TI113X_CARD_CONTROL);
324 cardctl &= ~(TI113X_CCR_PCI_IRQ_ENA | TI113X_CCR_PCI_IREQ | TI113X_CCR_PCI_CSC); 354 cardctl &= ~(TI113X_CCR_PCI_IRQ_ENA | TI113X_CCR_PCI_IREQ | TI113X_CCR_PCI_CSC);
325 if (socket->cb_irq) 355 if (socket->dev->irq)
326 cardctl |= TI113X_CCR_PCI_IRQ_ENA | TI113X_CCR_PCI_CSC | TI113X_CCR_PCI_IREQ; 356 cardctl |= TI113X_CCR_PCI_IRQ_ENA | TI113X_CCR_PCI_CSC | TI113X_CCR_PCI_IREQ;
357 else
358 ti113x_use_isa_irq(socket);
359
327 config_writeb(socket, TI113X_CARD_CONTROL, cardctl); 360 config_writeb(socket, TI113X_CARD_CONTROL, cardctl);
328 361
329 return ti_override(socket); 362 return ti_override(socket);
diff --git a/drivers/pcmcia/vrc4171_card.c b/drivers/pcmcia/vrc4171_card.c
index c9fcbdc164ea..aaccdb9f4ba1 100644
--- a/drivers/pcmcia/vrc4171_card.c
+++ b/drivers/pcmcia/vrc4171_card.c
@@ -105,6 +105,7 @@ typedef struct vrc4171_socket {
105 char name[24]; 105 char name[24];
106 int csc_irq; 106 int csc_irq;
107 int io_irq; 107 int io_irq;
108 spinlock_t lock;
108} vrc4171_socket_t; 109} vrc4171_socket_t;
109 110
110static vrc4171_socket_t vrc4171_sockets[CARD_MAX_SLOTS]; 111static vrc4171_socket_t vrc4171_sockets[CARD_MAX_SLOTS];
@@ -327,7 +328,7 @@ static int pccard_set_socket(struct pcmcia_socket *sock, socket_state_t *state)
327 slot = sock->sock; 328 slot = sock->sock;
328 socket = &vrc4171_sockets[slot]; 329 socket = &vrc4171_sockets[slot];
329 330
330 spin_lock_irq(&sock->lock); 331 spin_lock_irq(&socket->lock);
331 332
332 voltage = set_Vcc_value(state->Vcc); 333 voltage = set_Vcc_value(state->Vcc);
333 exca_write_byte(slot, CARD_VOLTAGE_SELECT, voltage); 334 exca_write_byte(slot, CARD_VOLTAGE_SELECT, voltage);
@@ -370,7 +371,7 @@ static int pccard_set_socket(struct pcmcia_socket *sock, socket_state_t *state)
370 cscint |= I365_CSC_DETECT; 371 cscint |= I365_CSC_DETECT;
371 exca_write_byte(slot, I365_CSCINT, cscint); 372 exca_write_byte(slot, I365_CSCINT, cscint);
372 373
373 spin_unlock_irq(&sock->lock); 374 spin_unlock_irq(&socket->lock);
374 375
375 return 0; 376 return 0;
376} 377}
diff --git a/drivers/pcmcia/yenta_socket.c b/drivers/pcmcia/yenta_socket.c
index 967c766f53ba..418988ab6edf 100644
--- a/drivers/pcmcia/yenta_socket.c
+++ b/drivers/pcmcia/yenta_socket.c
@@ -42,6 +42,18 @@ module_param_string(o2_speedup, o2_speedup, sizeof(o2_speedup), 0444);
42MODULE_PARM_DESC(o2_speedup, "Use prefetch/burst for O2-bridges: 'on', 'off' " 42MODULE_PARM_DESC(o2_speedup, "Use prefetch/burst for O2-bridges: 'on', 'off' "
43 "or 'default' (uses recommended behaviour for the detected bridge)"); 43 "or 'default' (uses recommended behaviour for the detected bridge)");
44 44
45/*
46 * Only probe "regular" interrupts, don't
47 * touch dangerous spots like the mouse irq,
48 * because there are mice that apparently
49 * get really confused if they get fondled
50 * too intimately.
51 *
52 * Default to 11, 10, 9, 7, 6, 5, 4, 3.
53 */
54static u32 isa_interrupts = 0x0ef8;
55
56
45#define debug(x, s, args...) dev_dbg(&s->dev->dev, x, ##args) 57#define debug(x, s, args...) dev_dbg(&s->dev->dev, x, ##args)
46 58
47/* Don't ask.. */ 59/* Don't ask.. */
@@ -54,6 +66,8 @@ MODULE_PARM_DESC(o2_speedup, "Use prefetch/burst for O2-bridges: 'on', 'off' "
54 */ 66 */
55#ifdef CONFIG_YENTA_TI 67#ifdef CONFIG_YENTA_TI
56static int yenta_probe_cb_irq(struct yenta_socket *socket); 68static int yenta_probe_cb_irq(struct yenta_socket *socket);
69static unsigned int yenta_probe_irq(struct yenta_socket *socket,
70 u32 isa_irq_mask);
57#endif 71#endif
58 72
59 73
@@ -329,8 +343,8 @@ static int yenta_set_socket(struct pcmcia_socket *sock, socket_state_t *state)
329 /* ISA interrupt control? */ 343 /* ISA interrupt control? */
330 intr = exca_readb(socket, I365_INTCTL); 344 intr = exca_readb(socket, I365_INTCTL);
331 intr = (intr & ~0xf); 345 intr = (intr & ~0xf);
332 if (!socket->cb_irq) { 346 if (!socket->dev->irq) {
333 intr |= state->io_irq; 347 intr |= socket->cb_irq ? socket->cb_irq : state->io_irq;
334 bridge |= CB_BRIDGE_INTR; 348 bridge |= CB_BRIDGE_INTR;
335 } 349 }
336 exca_writeb(socket, I365_INTCTL, intr); 350 exca_writeb(socket, I365_INTCTL, intr);
@@ -340,7 +354,7 @@ static int yenta_set_socket(struct pcmcia_socket *sock, socket_state_t *state)
340 reg = exca_readb(socket, I365_INTCTL) & (I365_RING_ENA | I365_INTR_ENA); 354 reg = exca_readb(socket, I365_INTCTL) & (I365_RING_ENA | I365_INTR_ENA);
341 reg |= (state->flags & SS_RESET) ? 0 : I365_PC_RESET; 355 reg |= (state->flags & SS_RESET) ? 0 : I365_PC_RESET;
342 reg |= (state->flags & SS_IOCARD) ? I365_PC_IOCARD : 0; 356 reg |= (state->flags & SS_IOCARD) ? I365_PC_IOCARD : 0;
343 if (state->io_irq != socket->cb_irq) { 357 if (state->io_irq != socket->dev->irq) {
344 reg |= state->io_irq; 358 reg |= state->io_irq;
345 bridge |= CB_BRIDGE_INTR; 359 bridge |= CB_BRIDGE_INTR;
346 } 360 }
@@ -356,7 +370,9 @@ static int yenta_set_socket(struct pcmcia_socket *sock, socket_state_t *state)
356 exca_writeb(socket, I365_POWER, reg); 370 exca_writeb(socket, I365_POWER, reg);
357 371
358 /* CSC interrupt: no ISA irq for CSC */ 372 /* CSC interrupt: no ISA irq for CSC */
359 reg = I365_CSC_DETECT; 373 reg = exca_readb(socket, I365_CSCINT);
374 reg &= I365_CSC_IRQ_MASK;
375 reg |= I365_CSC_DETECT;
360 if (state->flags & SS_IOCARD) { 376 if (state->flags & SS_IOCARD) {
361 if (state->csc_mask & SS_STSCHG) 377 if (state->csc_mask & SS_STSCHG)
362 reg |= I365_CSC_STSCHG; 378 reg |= I365_CSC_STSCHG;
@@ -896,22 +912,12 @@ static struct cardbus_type cardbus_type[] = {
896}; 912};
897 913
898 914
899/*
900 * Only probe "regular" interrupts, don't
901 * touch dangerous spots like the mouse irq,
902 * because there are mice that apparently
903 * get really confused if they get fondled
904 * too intimately.
905 *
906 * Default to 11, 10, 9, 7, 6, 5, 4, 3.
907 */
908static u32 isa_interrupts = 0x0ef8;
909
910static unsigned int yenta_probe_irq(struct yenta_socket *socket, u32 isa_irq_mask) 915static unsigned int yenta_probe_irq(struct yenta_socket *socket, u32 isa_irq_mask)
911{ 916{
912 int i; 917 int i;
913 unsigned long val; 918 unsigned long val;
914 u32 mask; 919 u32 mask;
920 u8 reg;
915 921
916 /* 922 /*
917 * Probe for usable interrupts using the force 923 * Probe for usable interrupts using the force
@@ -919,6 +925,7 @@ static unsigned int yenta_probe_irq(struct yenta_socket *socket, u32 isa_irq_mas
919 */ 925 */
920 cb_writel(socket, CB_SOCKET_EVENT, -1); 926 cb_writel(socket, CB_SOCKET_EVENT, -1);
921 cb_writel(socket, CB_SOCKET_MASK, CB_CSTSMASK); 927 cb_writel(socket, CB_SOCKET_MASK, CB_CSTSMASK);
928 reg = exca_readb(socket, I365_CSCINT);
922 exca_writeb(socket, I365_CSCINT, 0); 929 exca_writeb(socket, I365_CSCINT, 0);
923 val = probe_irq_on() & isa_irq_mask; 930 val = probe_irq_on() & isa_irq_mask;
924 for (i = 1; i < 16; i++) { 931 for (i = 1; i < 16; i++) {
@@ -930,7 +937,7 @@ static unsigned int yenta_probe_irq(struct yenta_socket *socket, u32 isa_irq_mas
930 cb_writel(socket, CB_SOCKET_EVENT, -1); 937 cb_writel(socket, CB_SOCKET_EVENT, -1);
931 } 938 }
932 cb_writel(socket, CB_SOCKET_MASK, 0); 939 cb_writel(socket, CB_SOCKET_MASK, 0);
933 exca_writeb(socket, I365_CSCINT, 0); 940 exca_writeb(socket, I365_CSCINT, reg);
934 941
935 mask = probe_irq_mask(val) & 0xffff; 942 mask = probe_irq_mask(val) & 0xffff;
936 943
@@ -967,6 +974,8 @@ static irqreturn_t yenta_probe_handler(int irq, void *dev_id)
967/* probes the PCI interrupt, use only on override functions */ 974/* probes the PCI interrupt, use only on override functions */
968static int yenta_probe_cb_irq(struct yenta_socket *socket) 975static int yenta_probe_cb_irq(struct yenta_socket *socket)
969{ 976{
977 u8 reg;
978
970 if (!socket->cb_irq) 979 if (!socket->cb_irq)
971 return -1; 980 return -1;
972 981
@@ -979,7 +988,8 @@ static int yenta_probe_cb_irq(struct yenta_socket *socket)
979 } 988 }
980 989
981 /* generate interrupt, wait */ 990 /* generate interrupt, wait */
982 exca_writeb(socket, I365_CSCINT, I365_CSC_STSCHG); 991 reg = exca_readb(socket, I365_CSCINT);
992 exca_writeb(socket, I365_CSCINT, reg | I365_CSC_STSCHG);
983 cb_writel(socket, CB_SOCKET_EVENT, -1); 993 cb_writel(socket, CB_SOCKET_EVENT, -1);
984 cb_writel(socket, CB_SOCKET_MASK, CB_CSTSMASK); 994 cb_writel(socket, CB_SOCKET_MASK, CB_CSTSMASK);
985 cb_writel(socket, CB_SOCKET_FORCE, CB_FCARDSTS); 995 cb_writel(socket, CB_SOCKET_FORCE, CB_FCARDSTS);
@@ -988,7 +998,7 @@ static int yenta_probe_cb_irq(struct yenta_socket *socket)
988 998
989 /* disable interrupts */ 999 /* disable interrupts */
990 cb_writel(socket, CB_SOCKET_MASK, 0); 1000 cb_writel(socket, CB_SOCKET_MASK, 0);
991 exca_writeb(socket, I365_CSCINT, 0); 1001 exca_writeb(socket, I365_CSCINT, reg);
992 cb_writel(socket, CB_SOCKET_EVENT, -1); 1002 cb_writel(socket, CB_SOCKET_EVENT, -1);
993 exca_readb(socket, I365_CSC); 1003 exca_readb(socket, I365_CSC);
994 1004
diff --git a/drivers/platform/x86/acer-wmi.c b/drivers/platform/x86/acer-wmi.c
index 226b3e93498c..cbca40aa4006 100644
--- a/drivers/platform/x86/acer-wmi.c
+++ b/drivers/platform/x86/acer-wmi.c
@@ -922,9 +922,13 @@ static struct backlight_ops acer_bl_ops = {
922 922
923static int __devinit acer_backlight_init(struct device *dev) 923static int __devinit acer_backlight_init(struct device *dev)
924{ 924{
925 struct backlight_properties props;
925 struct backlight_device *bd; 926 struct backlight_device *bd;
926 927
927 bd = backlight_device_register("acer-wmi", dev, NULL, &acer_bl_ops); 928 memset(&props, 0, sizeof(struct backlight_properties));
929 props.max_brightness = max_brightness;
930 bd = backlight_device_register("acer-wmi", dev, NULL, &acer_bl_ops,
931 &props);
928 if (IS_ERR(bd)) { 932 if (IS_ERR(bd)) {
929 printk(ACER_ERR "Could not register Acer backlight device\n"); 933 printk(ACER_ERR "Could not register Acer backlight device\n");
930 acer_backlight_device = NULL; 934 acer_backlight_device = NULL;
@@ -935,7 +939,6 @@ static int __devinit acer_backlight_init(struct device *dev)
935 939
936 bd->props.power = FB_BLANK_UNBLANK; 940 bd->props.power = FB_BLANK_UNBLANK;
937 bd->props.brightness = read_brightness(bd); 941 bd->props.brightness = read_brightness(bd);
938 bd->props.max_brightness = max_brightness;
939 backlight_update_status(bd); 942 backlight_update_status(bd);
940 return 0; 943 return 0;
941} 944}
diff --git a/drivers/platform/x86/asus-laptop.c b/drivers/platform/x86/asus-laptop.c
index 791fcf321506..db5f7db2ba33 100644
--- a/drivers/platform/x86/asus-laptop.c
+++ b/drivers/platform/x86/asus-laptop.c
@@ -639,12 +639,16 @@ static int asus_backlight_init(struct asus_laptop *asus)
639{ 639{
640 struct backlight_device *bd; 640 struct backlight_device *bd;
641 struct device *dev = &asus->platform_device->dev; 641 struct device *dev = &asus->platform_device->dev;
642 struct backlight_properties props;
642 643
643 if (!acpi_check_handle(asus->handle, METHOD_BRIGHTNESS_GET, NULL) && 644 if (!acpi_check_handle(asus->handle, METHOD_BRIGHTNESS_GET, NULL) &&
644 !acpi_check_handle(asus->handle, METHOD_BRIGHTNESS_SET, NULL) && 645 !acpi_check_handle(asus->handle, METHOD_BRIGHTNESS_SET, NULL) &&
645 lcd_switch_handle) { 646 lcd_switch_handle) {
647 memset(&props, 0, sizeof(struct backlight_properties));
648 props.max_brightness = 15;
649
646 bd = backlight_device_register(ASUS_LAPTOP_FILE, dev, 650 bd = backlight_device_register(ASUS_LAPTOP_FILE, dev,
647 asus, &asusbl_ops); 651 asus, &asusbl_ops, &props);
648 if (IS_ERR(bd)) { 652 if (IS_ERR(bd)) {
649 pr_err("Could not register asus backlight device\n"); 653 pr_err("Could not register asus backlight device\n");
650 asus->backlight_device = NULL; 654 asus->backlight_device = NULL;
@@ -653,7 +657,6 @@ static int asus_backlight_init(struct asus_laptop *asus)
653 657
654 asus->backlight_device = bd; 658 asus->backlight_device = bd;
655 659
656 bd->props.max_brightness = 15;
657 bd->props.power = FB_BLANK_UNBLANK; 660 bd->props.power = FB_BLANK_UNBLANK;
658 bd->props.brightness = asus_read_brightness(bd); 661 bd->props.brightness = asus_read_brightness(bd);
659 backlight_update_status(bd); 662 backlight_update_status(bd);
diff --git a/drivers/platform/x86/asus_acpi.c b/drivers/platform/x86/asus_acpi.c
index 1381430e1105..ee520357abaa 100644
--- a/drivers/platform/x86/asus_acpi.c
+++ b/drivers/platform/x86/asus_acpi.c
@@ -1481,6 +1481,7 @@ static void asus_acpi_exit(void)
1481 1481
1482static int __init asus_acpi_init(void) 1482static int __init asus_acpi_init(void)
1483{ 1483{
1484 struct backlight_properties props;
1484 int result; 1485 int result;
1485 1486
1486 result = acpi_bus_register_driver(&asus_hotk_driver); 1487 result = acpi_bus_register_driver(&asus_hotk_driver);
@@ -1507,15 +1508,17 @@ static int __init asus_acpi_init(void)
1507 return -ENODEV; 1508 return -ENODEV;
1508 } 1509 }
1509 1510
1511 memset(&props, 0, sizeof(struct backlight_properties));
1512 props.max_brightness = 15;
1510 asus_backlight_device = backlight_device_register("asus", NULL, NULL, 1513 asus_backlight_device = backlight_device_register("asus", NULL, NULL,
1511 &asus_backlight_data); 1514 &asus_backlight_data,
1515 &props);
1512 if (IS_ERR(asus_backlight_device)) { 1516 if (IS_ERR(asus_backlight_device)) {
1513 printk(KERN_ERR "Could not register asus backlight device\n"); 1517 printk(KERN_ERR "Could not register asus backlight device\n");
1514 asus_backlight_device = NULL; 1518 asus_backlight_device = NULL;
1515 asus_acpi_exit(); 1519 asus_acpi_exit();
1516 return -ENODEV; 1520 return -ENODEV;
1517 } 1521 }
1518 asus_backlight_device->props.max_brightness = 15;
1519 1522
1520 return 0; 1523 return 0;
1521} 1524}
diff --git a/drivers/platform/x86/classmate-laptop.c b/drivers/platform/x86/classmate-laptop.c
index 035a7dd65a3f..c696cf1c2616 100644
--- a/drivers/platform/x86/classmate-laptop.c
+++ b/drivers/platform/x86/classmate-laptop.c
@@ -455,18 +455,22 @@ static int cmpc_bl_update_status(struct backlight_device *bd)
455 return -1; 455 return -1;
456} 456}
457 457
458static struct backlight_ops cmpc_bl_ops = { 458static const struct backlight_ops cmpc_bl_ops = {
459 .get_brightness = cmpc_bl_get_brightness, 459 .get_brightness = cmpc_bl_get_brightness,
460 .update_status = cmpc_bl_update_status 460 .update_status = cmpc_bl_update_status
461}; 461};
462 462
463static int cmpc_bl_add(struct acpi_device *acpi) 463static int cmpc_bl_add(struct acpi_device *acpi)
464{ 464{
465 struct backlight_properties props;
465 struct backlight_device *bd; 466 struct backlight_device *bd;
466 467
467 bd = backlight_device_register("cmpc_bl", &acpi->dev, 468 memset(&props, 0, sizeof(struct backlight_properties));
468 acpi->handle, &cmpc_bl_ops); 469 props.max_brightness = 7;
469 bd->props.max_brightness = 7; 470 bd = backlight_device_register("cmpc_bl", &acpi->dev, acpi->handle,
471 &cmpc_bl_ops, &props);
472 if (IS_ERR(bd))
473 return PTR_ERR(bd);
470 dev_set_drvdata(&acpi->dev, bd); 474 dev_set_drvdata(&acpi->dev, bd);
471 return 0; 475 return 0;
472} 476}
diff --git a/drivers/platform/x86/compal-laptop.c b/drivers/platform/x86/compal-laptop.c
index 2740b40aad9b..71ff1545a93e 100644
--- a/drivers/platform/x86/compal-laptop.c
+++ b/drivers/platform/x86/compal-laptop.c
@@ -291,12 +291,15 @@ static int __init compal_init(void)
291 /* Register backlight stuff */ 291 /* Register backlight stuff */
292 292
293 if (!acpi_video_backlight_support()) { 293 if (!acpi_video_backlight_support()) {
294 compalbl_device = backlight_device_register("compal-laptop", NULL, NULL, 294 struct backlight_properties props;
295 &compalbl_ops); 295 memset(&props, 0, sizeof(struct backlight_properties));
296 props.max_brightness = COMPAL_LCD_LEVEL_MAX - 1;
297 compalbl_device = backlight_device_register("compal-laptop",
298 NULL, NULL,
299 &compalbl_ops,
300 &props);
296 if (IS_ERR(compalbl_device)) 301 if (IS_ERR(compalbl_device))
297 return PTR_ERR(compalbl_device); 302 return PTR_ERR(compalbl_device);
298
299 compalbl_device->props.max_brightness = COMPAL_LCD_LEVEL_MAX-1;
300 } 303 }
301 304
302 ret = platform_driver_register(&compal_driver); 305 ret = platform_driver_register(&compal_driver);
diff --git a/drivers/platform/x86/dell-laptop.c b/drivers/platform/x86/dell-laptop.c
index ef614979afe9..46435ac4684f 100644
--- a/drivers/platform/x86/dell-laptop.c
+++ b/drivers/platform/x86/dell-laptop.c
@@ -559,10 +559,14 @@ static int __init dell_init(void)
559 release_buffer(); 559 release_buffer();
560 560
561 if (max_intensity) { 561 if (max_intensity) {
562 dell_backlight_device = backlight_device_register( 562 struct backlight_properties props;
563 "dell_backlight", 563 memset(&props, 0, sizeof(struct backlight_properties));
564 &platform_device->dev, NULL, 564 props.max_brightness = max_intensity;
565 &dell_ops); 565 dell_backlight_device = backlight_device_register("dell_backlight",
566 &platform_device->dev,
567 NULL,
568 &dell_ops,
569 &props);
566 570
567 if (IS_ERR(dell_backlight_device)) { 571 if (IS_ERR(dell_backlight_device)) {
568 ret = PTR_ERR(dell_backlight_device); 572 ret = PTR_ERR(dell_backlight_device);
@@ -570,7 +574,6 @@ static int __init dell_init(void)
570 goto fail_backlight; 574 goto fail_backlight;
571 } 575 }
572 576
573 dell_backlight_device->props.max_brightness = max_intensity;
574 dell_backlight_device->props.brightness = 577 dell_backlight_device->props.brightness =
575 dell_get_intensity(dell_backlight_device); 578 dell_get_intensity(dell_backlight_device);
576 backlight_update_status(dell_backlight_device); 579 backlight_update_status(dell_backlight_device);
diff --git a/drivers/platform/x86/eeepc-laptop.c b/drivers/platform/x86/eeepc-laptop.c
index 9a844caa3756..3fdf21e0052e 100644
--- a/drivers/platform/x86/eeepc-laptop.c
+++ b/drivers/platform/x86/eeepc-laptop.c
@@ -1131,18 +1131,20 @@ static int eeepc_backlight_notify(struct eeepc_laptop *eeepc)
1131 1131
1132static int eeepc_backlight_init(struct eeepc_laptop *eeepc) 1132static int eeepc_backlight_init(struct eeepc_laptop *eeepc)
1133{ 1133{
1134 struct backlight_properties props;
1134 struct backlight_device *bd; 1135 struct backlight_device *bd;
1135 1136
1137 memset(&props, 0, sizeof(struct backlight_properties));
1138 props.max_brightness = 15;
1136 bd = backlight_device_register(EEEPC_LAPTOP_FILE, 1139 bd = backlight_device_register(EEEPC_LAPTOP_FILE,
1137 &eeepc->platform_device->dev, 1140 &eeepc->platform_device->dev, eeepc,
1138 eeepc, &eeepcbl_ops); 1141 &eeepcbl_ops, &props);
1139 if (IS_ERR(bd)) { 1142 if (IS_ERR(bd)) {
1140 pr_err("Could not register eeepc backlight device\n"); 1143 pr_err("Could not register eeepc backlight device\n");
1141 eeepc->backlight_device = NULL; 1144 eeepc->backlight_device = NULL;
1142 return PTR_ERR(bd); 1145 return PTR_ERR(bd);
1143 } 1146 }
1144 eeepc->backlight_device = bd; 1147 eeepc->backlight_device = bd;
1145 bd->props.max_brightness = 15;
1146 bd->props.brightness = read_brightness(bd); 1148 bd->props.brightness = read_brightness(bd);
1147 bd->props.power = FB_BLANK_UNBLANK; 1149 bd->props.power = FB_BLANK_UNBLANK;
1148 backlight_update_status(bd); 1150 backlight_update_status(bd);
diff --git a/drivers/platform/x86/fujitsu-laptop.c b/drivers/platform/x86/fujitsu-laptop.c
index 5f3320d468f6..c1074b32490e 100644
--- a/drivers/platform/x86/fujitsu-laptop.c
+++ b/drivers/platform/x86/fujitsu-laptop.c
@@ -1126,16 +1126,20 @@ static int __init fujitsu_init(void)
1126 /* Register backlight stuff */ 1126 /* Register backlight stuff */
1127 1127
1128 if (!acpi_video_backlight_support()) { 1128 if (!acpi_video_backlight_support()) {
1129 fujitsu->bl_device = 1129 struct backlight_properties props;
1130 backlight_device_register("fujitsu-laptop", NULL, NULL, 1130
1131 &fujitsubl_ops); 1131 memset(&props, 0, sizeof(struct backlight_properties));
1132 max_brightness = fujitsu->max_brightness;
1133 props.max_brightness = max_brightness - 1;
1134 fujitsu->bl_device = backlight_device_register("fujitsu-laptop",
1135 NULL, NULL,
1136 &fujitsubl_ops,
1137 &props);
1132 if (IS_ERR(fujitsu->bl_device)) { 1138 if (IS_ERR(fujitsu->bl_device)) {
1133 ret = PTR_ERR(fujitsu->bl_device); 1139 ret = PTR_ERR(fujitsu->bl_device);
1134 fujitsu->bl_device = NULL; 1140 fujitsu->bl_device = NULL;
1135 goto fail_sysfs_group; 1141 goto fail_sysfs_group;
1136 } 1142 }
1137 max_brightness = fujitsu->max_brightness;
1138 fujitsu->bl_device->props.max_brightness = max_brightness - 1;
1139 fujitsu->bl_device->props.brightness = fujitsu->brightness_level; 1143 fujitsu->bl_device->props.brightness = fujitsu->brightness_level;
1140 } 1144 }
1141 1145
diff --git a/drivers/platform/x86/msi-laptop.c b/drivers/platform/x86/msi-laptop.c
index c2b05da4289a..996223a7c009 100644
--- a/drivers/platform/x86/msi-laptop.c
+++ b/drivers/platform/x86/msi-laptop.c
@@ -683,11 +683,14 @@ static int __init msi_init(void)
683 printk(KERN_INFO "MSI: Brightness ignored, must be controlled " 683 printk(KERN_INFO "MSI: Brightness ignored, must be controlled "
684 "by ACPI video driver\n"); 684 "by ACPI video driver\n");
685 } else { 685 } else {
686 struct backlight_properties props;
687 memset(&props, 0, sizeof(struct backlight_properties));
688 props.max_brightness = MSI_LCD_LEVEL_MAX - 1;
686 msibl_device = backlight_device_register("msi-laptop-bl", NULL, 689 msibl_device = backlight_device_register("msi-laptop-bl", NULL,
687 NULL, &msibl_ops); 690 NULL, &msibl_ops,
691 &props);
688 if (IS_ERR(msibl_device)) 692 if (IS_ERR(msibl_device))
689 return PTR_ERR(msibl_device); 693 return PTR_ERR(msibl_device);
690 msibl_device->props.max_brightness = MSI_LCD_LEVEL_MAX-1;
691 } 694 }
692 695
693 ret = platform_driver_register(&msipf_driver); 696 ret = platform_driver_register(&msipf_driver);
diff --git a/drivers/platform/x86/msi-wmi.c b/drivers/platform/x86/msi-wmi.c
index f5f70d4c6913..367caaae2f3c 100644
--- a/drivers/platform/x86/msi-wmi.c
+++ b/drivers/platform/x86/msi-wmi.c
@@ -138,7 +138,7 @@ static int bl_set_status(struct backlight_device *bd)
138 return msi_wmi_set_block(0, backlight_map[bright]); 138 return msi_wmi_set_block(0, backlight_map[bright]);
139} 139}
140 140
141static struct backlight_ops msi_backlight_ops = { 141static const struct backlight_ops msi_backlight_ops = {
142 .get_brightness = bl_get, 142 .get_brightness = bl_get,
143 .update_status = bl_set_status, 143 .update_status = bl_set_status,
144}; 144};
@@ -249,12 +249,17 @@ static int __init msi_wmi_init(void)
249 goto err_uninstall_notifier; 249 goto err_uninstall_notifier;
250 250
251 if (!acpi_video_backlight_support()) { 251 if (!acpi_video_backlight_support()) {
252 backlight = backlight_device_register(DRV_NAME, 252 struct backlight_properties props;
253 NULL, NULL, &msi_backlight_ops); 253 memset(&props, 0, sizeof(struct backlight_properties));
254 if (IS_ERR(backlight)) 254 props.max_brightness = ARRAY_SIZE(backlight_map) - 1;
255 backlight = backlight_device_register(DRV_NAME, NULL, NULL,
256 &msi_backlight_ops,
257 &props);
258 if (IS_ERR(backlight)) {
259 err = PTR_ERR(backlight);
255 goto err_free_input; 260 goto err_free_input;
261 }
256 262
257 backlight->props.max_brightness = ARRAY_SIZE(backlight_map) - 1;
258 err = bl_get(NULL); 263 err = bl_get(NULL);
259 if (err < 0) 264 if (err < 0)
260 goto err_free_backlight; 265 goto err_free_backlight;
diff --git a/drivers/platform/x86/panasonic-laptop.c b/drivers/platform/x86/panasonic-laptop.c
index c9fc479fc290..726f02affcb6 100644
--- a/drivers/platform/x86/panasonic-laptop.c
+++ b/drivers/platform/x86/panasonic-laptop.c
@@ -352,7 +352,7 @@ static int bl_set_status(struct backlight_device *bd)
352 return acpi_pcc_write_sset(pcc, SINF_DC_CUR_BRIGHT, bright); 352 return acpi_pcc_write_sset(pcc, SINF_DC_CUR_BRIGHT, bright);
353} 353}
354 354
355static struct backlight_ops pcc_backlight_ops = { 355static const struct backlight_ops pcc_backlight_ops = {
356 .get_brightness = bl_get, 356 .get_brightness = bl_get,
357 .update_status = bl_set_status, 357 .update_status = bl_set_status,
358}; 358};
@@ -600,6 +600,7 @@ static int acpi_pcc_hotkey_resume(struct acpi_device *device)
600 600
601static int acpi_pcc_hotkey_add(struct acpi_device *device) 601static int acpi_pcc_hotkey_add(struct acpi_device *device)
602{ 602{
603 struct backlight_properties props;
603 struct pcc_acpi *pcc; 604 struct pcc_acpi *pcc;
604 int num_sifr, result; 605 int num_sifr, result;
605 606
@@ -637,24 +638,25 @@ static int acpi_pcc_hotkey_add(struct acpi_device *device)
637 if (result) { 638 if (result) {
638 ACPI_DEBUG_PRINT((ACPI_DB_ERROR, 639 ACPI_DEBUG_PRINT((ACPI_DB_ERROR,
639 "Error installing keyinput handler\n")); 640 "Error installing keyinput handler\n"));
640 goto out_sinf; 641 goto out_hotkey;
641 } 642 }
642 643
643 /* initialize backlight */
644 pcc->backlight = backlight_device_register("panasonic", NULL, pcc,
645 &pcc_backlight_ops);
646 if (IS_ERR(pcc->backlight))
647 goto out_input;
648
649 if (!acpi_pcc_retrieve_biosdata(pcc, pcc->sinf)) { 644 if (!acpi_pcc_retrieve_biosdata(pcc, pcc->sinf)) {
650 ACPI_DEBUG_PRINT((ACPI_DB_ERROR, 645 ACPI_DEBUG_PRINT((ACPI_DB_ERROR,
651 "Couldn't retrieve BIOS data\n")); 646 "Couldn't retrieve BIOS data\n"));
652 goto out_backlight; 647 goto out_input;
648 }
649 /* initialize backlight */
650 memset(&props, 0, sizeof(struct backlight_properties));
651 props.max_brightness = pcc->sinf[SINF_AC_MAX_BRIGHT];
652 pcc->backlight = backlight_device_register("panasonic", NULL, pcc,
653 &pcc_backlight_ops, &props);
654 if (IS_ERR(pcc->backlight)) {
655 result = PTR_ERR(pcc->backlight);
656 goto out_sinf;
653 } 657 }
654 658
655 /* read the initial brightness setting from the hardware */ 659 /* read the initial brightness setting from the hardware */
656 pcc->backlight->props.max_brightness =
657 pcc->sinf[SINF_AC_MAX_BRIGHT];
658 pcc->backlight->props.brightness = pcc->sinf[SINF_AC_CUR_BRIGHT]; 660 pcc->backlight->props.brightness = pcc->sinf[SINF_AC_CUR_BRIGHT];
659 661
660 /* read the initial sticky key mode from the hardware */ 662 /* read the initial sticky key mode from the hardware */
@@ -669,12 +671,12 @@ static int acpi_pcc_hotkey_add(struct acpi_device *device)
669 671
670out_backlight: 672out_backlight:
671 backlight_device_unregister(pcc->backlight); 673 backlight_device_unregister(pcc->backlight);
674out_sinf:
675 kfree(pcc->sinf);
672out_input: 676out_input:
673 input_unregister_device(pcc->input_dev); 677 input_unregister_device(pcc->input_dev);
674 /* no need to input_free_device() since core input API refcount and 678 /* no need to input_free_device() since core input API refcount and
675 * free()s the device */ 679 * free()s the device */
676out_sinf:
677 kfree(pcc->sinf);
678out_hotkey: 680out_hotkey:
679 kfree(pcc); 681 kfree(pcc);
680 682
diff --git a/drivers/platform/x86/sony-laptop.c b/drivers/platform/x86/sony-laptop.c
index 5a3d8514c66d..6553b91caaa4 100644
--- a/drivers/platform/x86/sony-laptop.c
+++ b/drivers/platform/x86/sony-laptop.c
@@ -1291,9 +1291,13 @@ static int sony_nc_add(struct acpi_device *device)
1291 "controlled by ACPI video driver\n"); 1291 "controlled by ACPI video driver\n");
1292 } else if (ACPI_SUCCESS(acpi_get_handle(sony_nc_acpi_handle, "GBRT", 1292 } else if (ACPI_SUCCESS(acpi_get_handle(sony_nc_acpi_handle, "GBRT",
1293 &handle))) { 1293 &handle))) {
1294 struct backlight_properties props;
1295 memset(&props, 0, sizeof(struct backlight_properties));
1296 props.max_brightness = SONY_MAX_BRIGHTNESS - 1;
1294 sony_backlight_device = backlight_device_register("sony", NULL, 1297 sony_backlight_device = backlight_device_register("sony", NULL,
1295 NULL, 1298 NULL,
1296 &sony_backlight_ops); 1299 &sony_backlight_ops,
1300 &props);
1297 1301
1298 if (IS_ERR(sony_backlight_device)) { 1302 if (IS_ERR(sony_backlight_device)) {
1299 printk(KERN_WARNING DRV_PFX "unable to register backlight device\n"); 1303 printk(KERN_WARNING DRV_PFX "unable to register backlight device\n");
@@ -1302,8 +1306,6 @@ static int sony_nc_add(struct acpi_device *device)
1302 sony_backlight_device->props.brightness = 1306 sony_backlight_device->props.brightness =
1303 sony_backlight_get_brightness 1307 sony_backlight_get_brightness
1304 (sony_backlight_device); 1308 (sony_backlight_device);
1305 sony_backlight_device->props.max_brightness =
1306 SONY_MAX_BRIGHTNESS - 1;
1307 } 1309 }
1308 1310
1309 } 1311 }
diff --git a/drivers/platform/x86/thinkpad_acpi.c b/drivers/platform/x86/thinkpad_acpi.c
index c64e3528889b..770b85327f84 100644
--- a/drivers/platform/x86/thinkpad_acpi.c
+++ b/drivers/platform/x86/thinkpad_acpi.c
@@ -6170,6 +6170,7 @@ static const struct tpacpi_quirk brightness_quirk_table[] __initconst = {
6170 6170
6171static int __init brightness_init(struct ibm_init_struct *iibm) 6171static int __init brightness_init(struct ibm_init_struct *iibm)
6172{ 6172{
6173 struct backlight_properties props;
6173 int b; 6174 int b;
6174 unsigned long quirks; 6175 unsigned long quirks;
6175 6176
@@ -6259,9 +6260,12 @@ static int __init brightness_init(struct ibm_init_struct *iibm)
6259 printk(TPACPI_INFO 6260 printk(TPACPI_INFO
6260 "detected a 16-level brightness capable ThinkPad\n"); 6261 "detected a 16-level brightness capable ThinkPad\n");
6261 6262
6262 ibm_backlight_device = backlight_device_register( 6263 memset(&props, 0, sizeof(struct backlight_properties));
6263 TPACPI_BACKLIGHT_DEV_NAME, NULL, NULL, 6264 props.max_brightness = (tp_features.bright_16levels) ? 15 : 7;
6264 &ibm_backlight_data); 6265 ibm_backlight_device = backlight_device_register(TPACPI_BACKLIGHT_DEV_NAME,
6266 NULL, NULL,
6267 &ibm_backlight_data,
6268 &props);
6265 if (IS_ERR(ibm_backlight_device)) { 6269 if (IS_ERR(ibm_backlight_device)) {
6266 int rc = PTR_ERR(ibm_backlight_device); 6270 int rc = PTR_ERR(ibm_backlight_device);
6267 ibm_backlight_device = NULL; 6271 ibm_backlight_device = NULL;
@@ -6280,8 +6284,6 @@ static int __init brightness_init(struct ibm_init_struct *iibm)
6280 "or not on your ThinkPad\n", TPACPI_MAIL); 6284 "or not on your ThinkPad\n", TPACPI_MAIL);
6281 } 6285 }
6282 6286
6283 ibm_backlight_device->props.max_brightness =
6284 (tp_features.bright_16levels)? 15 : 7;
6285 ibm_backlight_device->props.brightness = b & TP_EC_BACKLIGHT_LVLMSK; 6287 ibm_backlight_device->props.brightness = b & TP_EC_BACKLIGHT_LVLMSK;
6286 backlight_update_status(ibm_backlight_device); 6288 backlight_update_status(ibm_backlight_device);
6287 6289
diff --git a/drivers/platform/x86/toshiba_acpi.c b/drivers/platform/x86/toshiba_acpi.c
index 789240d1b577..def4841183be 100644
--- a/drivers/platform/x86/toshiba_acpi.c
+++ b/drivers/platform/x86/toshiba_acpi.c
@@ -924,6 +924,7 @@ static int __init toshiba_acpi_init(void)
924 u32 hci_result; 924 u32 hci_result;
925 bool bt_present; 925 bool bt_present;
926 int ret = 0; 926 int ret = 0;
927 struct backlight_properties props;
927 928
928 if (acpi_disabled) 929 if (acpi_disabled)
929 return -ENODEV; 930 return -ENODEV;
@@ -974,10 +975,12 @@ static int __init toshiba_acpi_init(void)
974 } 975 }
975 } 976 }
976 977
978 props.max_brightness = HCI_LCD_BRIGHTNESS_LEVELS - 1;
977 toshiba_backlight_device = backlight_device_register("toshiba", 979 toshiba_backlight_device = backlight_device_register("toshiba",
978 &toshiba_acpi.p_dev->dev, 980 &toshiba_acpi.p_dev->dev,
979 NULL, 981 NULL,
980 &toshiba_backlight_data); 982 &toshiba_backlight_data,
983 &props);
981 if (IS_ERR(toshiba_backlight_device)) { 984 if (IS_ERR(toshiba_backlight_device)) {
982 ret = PTR_ERR(toshiba_backlight_device); 985 ret = PTR_ERR(toshiba_backlight_device);
983 986
@@ -986,7 +989,6 @@ static int __init toshiba_acpi_init(void)
986 toshiba_acpi_exit(); 989 toshiba_acpi_exit();
987 return ret; 990 return ret;
988 } 991 }
989 toshiba_backlight_device->props.max_brightness = HCI_LCD_BRIGHTNESS_LEVELS - 1;
990 992
991 /* Register rfkill switch for Bluetooth */ 993 /* Register rfkill switch for Bluetooth */
992 if (hci_get_bt_present(&bt_present) == HCI_SUCCESS && bt_present) { 994 if (hci_get_bt_present(&bt_present) == HCI_SUCCESS && bt_present) {
diff --git a/drivers/s390/char/sclp_cmd.c b/drivers/s390/char/sclp_cmd.c
index b3beab610da4..fc7ae05ce48a 100644
--- a/drivers/s390/char/sclp_cmd.c
+++ b/drivers/s390/char/sclp_cmd.c
@@ -704,6 +704,13 @@ int sclp_chp_deconfigure(struct chp_id chpid)
704 return do_chp_configure(SCLP_CMDW_DECONFIGURE_CHPATH | chpid.id << 8); 704 return do_chp_configure(SCLP_CMDW_DECONFIGURE_CHPATH | chpid.id << 8);
705} 705}
706 706
707int arch_get_memory_phys_device(unsigned long start_pfn)
708{
709 if (!rzm)
710 return 0;
711 return PFN_PHYS(start_pfn) / rzm;
712}
713
707struct chp_info_sccb { 714struct chp_info_sccb {
708 struct sccb_header header; 715 struct sccb_header header;
709 u8 recognized[SCLP_CHP_INFO_MASK_SIZE]; 716 u8 recognized[SCLP_CHP_INFO_MASK_SIZE];
diff --git a/drivers/scsi/Kconfig b/drivers/scsi/Kconfig
index 9191d1ea6451..75f2336807cb 100644
--- a/drivers/scsi/Kconfig
+++ b/drivers/scsi/Kconfig
@@ -1,9 +1,15 @@
1menu "SCSI device support" 1menu "SCSI device support"
2 2
3config SCSI_MOD
4 tristate
5 default y if SCSI=n || SCSI=y
6 default m if SCSI=m
7
3config RAID_ATTRS 8config RAID_ATTRS
4 tristate "RAID Transport Class" 9 tristate "RAID Transport Class"
5 default n 10 default n
6 depends on BLOCK 11 depends on BLOCK
12 depends on SCSI_MOD
7 ---help--- 13 ---help---
8 Provides RAID 14 Provides RAID
9 15
diff --git a/drivers/scsi/be2iscsi/be_cmds.c b/drivers/scsi/be2iscsi/be_cmds.c
index 67098578fba4..cda6642c7368 100644
--- a/drivers/scsi/be2iscsi/be_cmds.c
+++ b/drivers/scsi/be2iscsi/be_cmds.c
@@ -32,18 +32,11 @@ void be_mcc_notify(struct beiscsi_hba *phba)
32unsigned int alloc_mcc_tag(struct beiscsi_hba *phba) 32unsigned int alloc_mcc_tag(struct beiscsi_hba *phba)
33{ 33{
34 unsigned int tag = 0; 34 unsigned int tag = 0;
35 unsigned int num = 0;
36 35
37mcc_tag_rdy:
38 if (phba->ctrl.mcc_tag_available) { 36 if (phba->ctrl.mcc_tag_available) {
39 tag = phba->ctrl.mcc_tag[phba->ctrl.mcc_alloc_index]; 37 tag = phba->ctrl.mcc_tag[phba->ctrl.mcc_alloc_index];
40 phba->ctrl.mcc_tag[phba->ctrl.mcc_alloc_index] = 0; 38 phba->ctrl.mcc_tag[phba->ctrl.mcc_alloc_index] = 0;
41 phba->ctrl.mcc_numtag[tag] = 0; 39 phba->ctrl.mcc_numtag[tag] = 0;
42 } else {
43 udelay(100);
44 num++;
45 if (num < mcc_timeout)
46 goto mcc_tag_rdy;
47 } 40 }
48 if (tag) { 41 if (tag) {
49 phba->ctrl.mcc_tag_available--; 42 phba->ctrl.mcc_tag_available--;
diff --git a/drivers/scsi/be2iscsi/be_iscsi.c b/drivers/scsi/be2iscsi/be_iscsi.c
index 29a3aaf35f9f..c3928cb8b042 100644
--- a/drivers/scsi/be2iscsi/be_iscsi.c
+++ b/drivers/scsi/be2iscsi/be_iscsi.c
@@ -482,7 +482,7 @@ static int beiscsi_open_conn(struct iscsi_endpoint *ep,
482 tag = mgmt_open_connection(phba, dst_addr, beiscsi_ep); 482 tag = mgmt_open_connection(phba, dst_addr, beiscsi_ep);
483 if (!tag) { 483 if (!tag) {
484 SE_DEBUG(DBG_LVL_1, 484 SE_DEBUG(DBG_LVL_1,
485 "mgmt_invalidate_connection Failed for cid=%d \n", 485 "mgmt_open_connection Failed for cid=%d \n",
486 beiscsi_ep->ep_cid); 486 beiscsi_ep->ep_cid);
487 } else { 487 } else {
488 wait_event_interruptible(phba->ctrl.mcc_wait[tag], 488 wait_event_interruptible(phba->ctrl.mcc_wait[tag],
@@ -701,7 +701,7 @@ void beiscsi_conn_stop(struct iscsi_cls_conn *cls_conn, int flag)
701 if (!tag) { 701 if (!tag) {
702 SE_DEBUG(DBG_LVL_1, 702 SE_DEBUG(DBG_LVL_1,
703 "mgmt_invalidate_connection Failed for cid=%d \n", 703 "mgmt_invalidate_connection Failed for cid=%d \n",
704 beiscsi_ep->ep_cid); 704 beiscsi_ep->ep_cid);
705 } else { 705 } else {
706 wait_event_interruptible(phba->ctrl.mcc_wait[tag], 706 wait_event_interruptible(phba->ctrl.mcc_wait[tag],
707 phba->ctrl.mcc_numtag[tag]); 707 phba->ctrl.mcc_numtag[tag]);
diff --git a/drivers/scsi/be2iscsi/be_main.c b/drivers/scsi/be2iscsi/be_main.c
index 7c22616ab141..fcfb29e02d8a 100644
--- a/drivers/scsi/be2iscsi/be_main.c
+++ b/drivers/scsi/be2iscsi/be_main.c
@@ -58,6 +58,123 @@ static int beiscsi_slave_configure(struct scsi_device *sdev)
58 return 0; 58 return 0;
59} 59}
60 60
61static int beiscsi_eh_abort(struct scsi_cmnd *sc)
62{
63 struct iscsi_cls_session *cls_session;
64 struct iscsi_task *aborted_task = (struct iscsi_task *)sc->SCp.ptr;
65 struct beiscsi_io_task *aborted_io_task;
66 struct iscsi_conn *conn;
67 struct beiscsi_conn *beiscsi_conn;
68 struct beiscsi_hba *phba;
69 struct iscsi_session *session;
70 struct invalidate_command_table *inv_tbl;
71 unsigned int cid, tag, num_invalidate;
72
73 cls_session = starget_to_session(scsi_target(sc->device));
74 session = cls_session->dd_data;
75
76 spin_lock_bh(&session->lock);
77 if (!aborted_task || !aborted_task->sc) {
78 /* we raced */
79 spin_unlock_bh(&session->lock);
80 return SUCCESS;
81 }
82
83 aborted_io_task = aborted_task->dd_data;
84 if (!aborted_io_task->scsi_cmnd) {
85 /* raced or invalid command */
86 spin_unlock_bh(&session->lock);
87 return SUCCESS;
88 }
89 spin_unlock_bh(&session->lock);
90 conn = aborted_task->conn;
91 beiscsi_conn = conn->dd_data;
92 phba = beiscsi_conn->phba;
93
94 /* invalidate iocb */
95 cid = beiscsi_conn->beiscsi_conn_cid;
96 inv_tbl = phba->inv_tbl;
97 memset(inv_tbl, 0x0, sizeof(*inv_tbl));
98 inv_tbl->cid = cid;
99 inv_tbl->icd = aborted_io_task->psgl_handle->sgl_index;
100 num_invalidate = 1;
101 tag = mgmt_invalidate_icds(phba, inv_tbl, num_invalidate, cid);
102 if (!tag) {
103 shost_printk(KERN_WARNING, phba->shost,
104 "mgmt_invalidate_icds could not be"
105 " submitted\n");
106 return FAILED;
107 } else {
108 wait_event_interruptible(phba->ctrl.mcc_wait[tag],
109 phba->ctrl.mcc_numtag[tag]);
110 free_mcc_tag(&phba->ctrl, tag);
111 }
112
113 return iscsi_eh_abort(sc);
114}
115
116static int beiscsi_eh_device_reset(struct scsi_cmnd *sc)
117{
118 struct iscsi_task *abrt_task;
119 struct beiscsi_io_task *abrt_io_task;
120 struct iscsi_conn *conn;
121 struct beiscsi_conn *beiscsi_conn;
122 struct beiscsi_hba *phba;
123 struct iscsi_session *session;
124 struct iscsi_cls_session *cls_session;
125 struct invalidate_command_table *inv_tbl;
126 unsigned int cid, tag, i, num_invalidate;
127 int rc = FAILED;
128
129 /* invalidate iocbs */
130 cls_session = starget_to_session(scsi_target(sc->device));
131 session = cls_session->dd_data;
132 spin_lock_bh(&session->lock);
133 if (!session->leadconn || session->state != ISCSI_STATE_LOGGED_IN)
134 goto unlock;
135
136 conn = session->leadconn;
137 beiscsi_conn = conn->dd_data;
138 phba = beiscsi_conn->phba;
139 cid = beiscsi_conn->beiscsi_conn_cid;
140 inv_tbl = phba->inv_tbl;
141 memset(inv_tbl, 0x0, sizeof(*inv_tbl) * BE2_CMDS_PER_CXN);
142 num_invalidate = 0;
143 for (i = 0; i < conn->session->cmds_max; i++) {
144 abrt_task = conn->session->cmds[i];
145 abrt_io_task = abrt_task->dd_data;
146 if (!abrt_task->sc || abrt_task->state == ISCSI_TASK_FREE)
147 continue;
148
149 if (abrt_task->sc->device->lun != abrt_task->sc->device->lun)
150 continue;
151
152 inv_tbl->cid = cid;
153 inv_tbl->icd = abrt_io_task->psgl_handle->sgl_index;
154 num_invalidate++;
155 inv_tbl++;
156 }
157 spin_unlock_bh(&session->lock);
158 inv_tbl = phba->inv_tbl;
159
160 tag = mgmt_invalidate_icds(phba, inv_tbl, num_invalidate, cid);
161 if (!tag) {
162 shost_printk(KERN_WARNING, phba->shost,
163 "mgmt_invalidate_icds could not be"
164 " submitted\n");
165 return FAILED;
166 } else {
167 wait_event_interruptible(phba->ctrl.mcc_wait[tag],
168 phba->ctrl.mcc_numtag[tag]);
169 free_mcc_tag(&phba->ctrl, tag);
170 }
171
172 return iscsi_eh_device_reset(sc);
173unlock:
174 spin_unlock_bh(&session->lock);
175 return rc;
176}
177
61/*------------------- PCI Driver operations and data ----------------- */ 178/*------------------- PCI Driver operations and data ----------------- */
62static DEFINE_PCI_DEVICE_TABLE(beiscsi_pci_id_table) = { 179static DEFINE_PCI_DEVICE_TABLE(beiscsi_pci_id_table) = {
63 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID1) }, 180 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID1) },
@@ -74,12 +191,12 @@ static struct scsi_host_template beiscsi_sht = {
74 .name = "ServerEngines 10Gbe open-iscsi Initiator Driver", 191 .name = "ServerEngines 10Gbe open-iscsi Initiator Driver",
75 .proc_name = DRV_NAME, 192 .proc_name = DRV_NAME,
76 .queuecommand = iscsi_queuecommand, 193 .queuecommand = iscsi_queuecommand,
77 .eh_abort_handler = iscsi_eh_abort,
78 .change_queue_depth = iscsi_change_queue_depth, 194 .change_queue_depth = iscsi_change_queue_depth,
79 .slave_configure = beiscsi_slave_configure, 195 .slave_configure = beiscsi_slave_configure,
80 .target_alloc = iscsi_target_alloc, 196 .target_alloc = iscsi_target_alloc,
81 .eh_device_reset_handler = iscsi_eh_device_reset, 197 .eh_abort_handler = beiscsi_eh_abort,
82 .eh_target_reset_handler = iscsi_eh_target_reset, 198 .eh_device_reset_handler = beiscsi_eh_device_reset,
199 .eh_target_reset_handler = iscsi_eh_session_reset,
83 .sg_tablesize = BEISCSI_SGLIST_ELEMENTS, 200 .sg_tablesize = BEISCSI_SGLIST_ELEMENTS,
84 .can_queue = BE2_IO_DEPTH, 201 .can_queue = BE2_IO_DEPTH,
85 .this_id = -1, 202 .this_id = -1,
@@ -242,7 +359,7 @@ static void beiscsi_get_params(struct beiscsi_hba *phba)
242 + BE2_TMFS 359 + BE2_TMFS
243 + BE2_NOPOUT_REQ)); 360 + BE2_NOPOUT_REQ));
244 phba->params.cxns_per_ctrl = phba->fw_config.iscsi_cid_count; 361 phba->params.cxns_per_ctrl = phba->fw_config.iscsi_cid_count;
245 phba->params.asyncpdus_per_ctrl = phba->fw_config.iscsi_cid_count;; 362 phba->params.asyncpdus_per_ctrl = phba->fw_config.iscsi_cid_count * 2;
246 phba->params.icds_per_ctrl = phba->fw_config.iscsi_icd_count;; 363 phba->params.icds_per_ctrl = phba->fw_config.iscsi_icd_count;;
247 phba->params.num_sge_per_io = BE2_SGE; 364 phba->params.num_sge_per_io = BE2_SGE;
248 phba->params.defpdu_hdr_sz = BE2_DEFPDU_HDR_SZ; 365 phba->params.defpdu_hdr_sz = BE2_DEFPDU_HDR_SZ;
@@ -946,14 +1063,18 @@ static void hwi_complete_cmd(struct beiscsi_conn *beiscsi_conn,
946 case HWH_TYPE_IO: 1063 case HWH_TYPE_IO:
947 case HWH_TYPE_IO_RD: 1064 case HWH_TYPE_IO_RD:
948 if ((task->hdr->opcode & ISCSI_OPCODE_MASK) == 1065 if ((task->hdr->opcode & ISCSI_OPCODE_MASK) ==
949 ISCSI_OP_NOOP_OUT) { 1066 ISCSI_OP_NOOP_OUT)
950 be_complete_nopin_resp(beiscsi_conn, task, psol); 1067 be_complete_nopin_resp(beiscsi_conn, task, psol);
951 } else 1068 else
952 be_complete_io(beiscsi_conn, task, psol); 1069 be_complete_io(beiscsi_conn, task, psol);
953 break; 1070 break;
954 1071
955 case HWH_TYPE_LOGOUT: 1072 case HWH_TYPE_LOGOUT:
956 be_complete_logout(beiscsi_conn, task, psol); 1073 if ((task->hdr->opcode & ISCSI_OPCODE_MASK) == ISCSI_OP_LOGOUT)
1074 be_complete_logout(beiscsi_conn, task, psol);
1075 else
1076 be_complete_tmf(beiscsi_conn, task, psol);
1077
957 break; 1078 break;
958 1079
959 case HWH_TYPE_LOGIN: 1080 case HWH_TYPE_LOGIN:
@@ -962,10 +1083,6 @@ static void hwi_complete_cmd(struct beiscsi_conn *beiscsi_conn,
962 "- Solicited path \n"); 1083 "- Solicited path \n");
963 break; 1084 break;
964 1085
965 case HWH_TYPE_TMF:
966 be_complete_tmf(beiscsi_conn, task, psol);
967 break;
968
969 case HWH_TYPE_NOP: 1086 case HWH_TYPE_NOP:
970 be_complete_nopin_resp(beiscsi_conn, task, psol); 1087 be_complete_nopin_resp(beiscsi_conn, task, psol);
971 break; 1088 break;
@@ -2052,7 +2169,7 @@ static void beiscsi_init_wrb_handle(struct beiscsi_hba *phba)
2052 num_cxn_wrb = (mem_descr_wrb->mem_array[idx].size) / 2169 num_cxn_wrb = (mem_descr_wrb->mem_array[idx].size) /
2053 ((sizeof(struct iscsi_wrb) * 2170 ((sizeof(struct iscsi_wrb) *
2054 phba->params.wrbs_per_cxn)); 2171 phba->params.wrbs_per_cxn));
2055 for (index = 0; index < phba->params.cxns_per_ctrl; index += 2) { 2172 for (index = 0; index < phba->params.cxns_per_ctrl * 2; index += 2) {
2056 pwrb_context = &phwi_ctrlr->wrb_context[index]; 2173 pwrb_context = &phwi_ctrlr->wrb_context[index];
2057 if (num_cxn_wrb) { 2174 if (num_cxn_wrb) {
2058 for (j = 0; j < phba->params.wrbs_per_cxn; j++) { 2175 for (j = 0; j < phba->params.wrbs_per_cxn; j++) {
@@ -3073,14 +3190,18 @@ static unsigned char hwi_enable_intr(struct beiscsi_hba *phba)
3073 reg |= MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK; 3190 reg |= MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
3074 SE_DEBUG(DBG_LVL_8, "reg =x%08x addr=%p \n", reg, addr); 3191 SE_DEBUG(DBG_LVL_8, "reg =x%08x addr=%p \n", reg, addr);
3075 iowrite32(reg, addr); 3192 iowrite32(reg, addr);
3076 for (i = 0; i <= phba->num_cpus; i++) { 3193 if (!phba->msix_enabled) {
3077 eq = &phwi_context->be_eq[i].q; 3194 eq = &phwi_context->be_eq[0].q;
3078 SE_DEBUG(DBG_LVL_8, "eq->id=%d \n", eq->id); 3195 SE_DEBUG(DBG_LVL_8, "eq->id=%d \n", eq->id);
3079 hwi_ring_eq_db(phba, eq->id, 0, 0, 1, 1); 3196 hwi_ring_eq_db(phba, eq->id, 0, 0, 1, 1);
3197 } else {
3198 for (i = 0; i <= phba->num_cpus; i++) {
3199 eq = &phwi_context->be_eq[i].q;
3200 SE_DEBUG(DBG_LVL_8, "eq->id=%d \n", eq->id);
3201 hwi_ring_eq_db(phba, eq->id, 0, 0, 1, 1);
3202 }
3080 } 3203 }
3081 } else 3204 }
3082 shost_printk(KERN_WARNING, phba->shost,
3083 "In hwi_enable_intr, Not Enabled \n");
3084 return true; 3205 return true;
3085} 3206}
3086 3207
@@ -3476,19 +3597,13 @@ static int beiscsi_iotask(struct iscsi_task *task, struct scatterlist *sg,
3476 3597
3477static int beiscsi_mtask(struct iscsi_task *task) 3598static int beiscsi_mtask(struct iscsi_task *task)
3478{ 3599{
3479 struct beiscsi_io_task *aborted_io_task, *io_task = task->dd_data; 3600 struct beiscsi_io_task *io_task = task->dd_data;
3480 struct iscsi_conn *conn = task->conn; 3601 struct iscsi_conn *conn = task->conn;
3481 struct beiscsi_conn *beiscsi_conn = conn->dd_data; 3602 struct beiscsi_conn *beiscsi_conn = conn->dd_data;
3482 struct beiscsi_hba *phba = beiscsi_conn->phba; 3603 struct beiscsi_hba *phba = beiscsi_conn->phba;
3483 struct iscsi_session *session;
3484 struct iscsi_wrb *pwrb = NULL; 3604 struct iscsi_wrb *pwrb = NULL;
3485 struct hwi_controller *phwi_ctrlr;
3486 struct hwi_wrb_context *pwrb_context;
3487 struct wrb_handle *pwrb_handle;
3488 unsigned int doorbell = 0; 3605 unsigned int doorbell = 0;
3489 unsigned int i, cid; 3606 unsigned int cid;
3490 struct iscsi_task *aborted_task;
3491 unsigned int tag;
3492 3607
3493 cid = beiscsi_conn->beiscsi_conn_cid; 3608 cid = beiscsi_conn->beiscsi_conn_cid;
3494 pwrb = io_task->pwrb_handle->pwrb; 3609 pwrb = io_task->pwrb_handle->pwrb;
@@ -3499,6 +3614,7 @@ static int beiscsi_mtask(struct iscsi_task *task)
3499 io_task->pwrb_handle->wrb_index); 3614 io_task->pwrb_handle->wrb_index);
3500 AMAP_SET_BITS(struct amap_iscsi_wrb, sgl_icd_idx, pwrb, 3615 AMAP_SET_BITS(struct amap_iscsi_wrb, sgl_icd_idx, pwrb,
3501 io_task->psgl_handle->sgl_index); 3616 io_task->psgl_handle->sgl_index);
3617
3502 switch (task->hdr->opcode & ISCSI_OPCODE_MASK) { 3618 switch (task->hdr->opcode & ISCSI_OPCODE_MASK) {
3503 case ISCSI_OP_LOGIN: 3619 case ISCSI_OP_LOGIN:
3504 AMAP_SET_BITS(struct amap_iscsi_wrb, type, pwrb, 3620 AMAP_SET_BITS(struct amap_iscsi_wrb, type, pwrb,
@@ -3523,33 +3639,6 @@ static int beiscsi_mtask(struct iscsi_task *task)
3523 hwi_write_buffer(pwrb, task); 3639 hwi_write_buffer(pwrb, task);
3524 break; 3640 break;
3525 case ISCSI_OP_SCSI_TMFUNC: 3641 case ISCSI_OP_SCSI_TMFUNC:
3526 session = conn->session;
3527 i = ((struct iscsi_tm *)task->hdr)->rtt;
3528 phwi_ctrlr = phba->phwi_ctrlr;
3529 pwrb_context = &phwi_ctrlr->wrb_context[cid -
3530 phba->fw_config.iscsi_cid_start];
3531 pwrb_handle = pwrb_context->pwrb_handle_basestd[be32_to_cpu(i)
3532 >> 16];
3533 aborted_task = pwrb_handle->pio_handle;
3534 if (!aborted_task)
3535 return 0;
3536
3537 aborted_io_task = aborted_task->dd_data;
3538 if (!aborted_io_task->scsi_cmnd)
3539 return 0;
3540
3541 tag = mgmt_invalidate_icds(phba,
3542 aborted_io_task->psgl_handle->sgl_index,
3543 cid);
3544 if (!tag) {
3545 shost_printk(KERN_WARNING, phba->shost,
3546 "mgmt_invalidate_icds could not be"
3547 " submitted\n");
3548 } else {
3549 wait_event_interruptible(phba->ctrl.mcc_wait[tag],
3550 phba->ctrl.mcc_numtag[tag]);
3551 free_mcc_tag(&phba->ctrl, tag);
3552 }
3553 AMAP_SET_BITS(struct amap_iscsi_wrb, type, pwrb, 3642 AMAP_SET_BITS(struct amap_iscsi_wrb, type, pwrb,
3554 INI_TMF_CMD); 3643 INI_TMF_CMD);
3555 AMAP_SET_BITS(struct amap_iscsi_wrb, dmsg, pwrb, 0); 3644 AMAP_SET_BITS(struct amap_iscsi_wrb, dmsg, pwrb, 0);
@@ -3558,7 +3647,7 @@ static int beiscsi_mtask(struct iscsi_task *task)
3558 case ISCSI_OP_LOGOUT: 3647 case ISCSI_OP_LOGOUT:
3559 AMAP_SET_BITS(struct amap_iscsi_wrb, dmsg, pwrb, 0); 3648 AMAP_SET_BITS(struct amap_iscsi_wrb, dmsg, pwrb, 0);
3560 AMAP_SET_BITS(struct amap_iscsi_wrb, type, pwrb, 3649 AMAP_SET_BITS(struct amap_iscsi_wrb, type, pwrb,
3561 HWH_TYPE_LOGOUT); 3650 HWH_TYPE_LOGOUT);
3562 hwi_write_buffer(pwrb, task); 3651 hwi_write_buffer(pwrb, task);
3563 break; 3652 break;
3564 3653
@@ -3584,17 +3673,12 @@ static int beiscsi_mtask(struct iscsi_task *task)
3584 3673
3585static int beiscsi_task_xmit(struct iscsi_task *task) 3674static int beiscsi_task_xmit(struct iscsi_task *task)
3586{ 3675{
3587 struct iscsi_conn *conn = task->conn;
3588 struct beiscsi_io_task *io_task = task->dd_data; 3676 struct beiscsi_io_task *io_task = task->dd_data;
3589 struct scsi_cmnd *sc = task->sc; 3677 struct scsi_cmnd *sc = task->sc;
3590 struct beiscsi_conn *beiscsi_conn = conn->dd_data;
3591 struct scatterlist *sg; 3678 struct scatterlist *sg;
3592 int num_sg; 3679 int num_sg;
3593 unsigned int writedir = 0, xferlen = 0; 3680 unsigned int writedir = 0, xferlen = 0;
3594 3681
3595 SE_DEBUG(DBG_LVL_4, "\n cid=%d In beiscsi_task_xmit task=%p conn=%p \t"
3596 "beiscsi_conn=%p \n", beiscsi_conn->beiscsi_conn_cid,
3597 task, conn, beiscsi_conn);
3598 if (!sc) 3682 if (!sc)
3599 return beiscsi_mtask(task); 3683 return beiscsi_mtask(task);
3600 3684
@@ -3699,7 +3783,6 @@ static int __devinit beiscsi_dev_probe(struct pci_dev *pcidev,
3699 " Failed in beiscsi_hba_alloc \n"); 3783 " Failed in beiscsi_hba_alloc \n");
3700 goto disable_pci; 3784 goto disable_pci;
3701 } 3785 }
3702 SE_DEBUG(DBG_LVL_8, " phba = %p \n", phba);
3703 3786
3704 switch (pcidev->device) { 3787 switch (pcidev->device) {
3705 case BE_DEVICE_ID1: 3788 case BE_DEVICE_ID1:
diff --git a/drivers/scsi/be2iscsi/be_main.h b/drivers/scsi/be2iscsi/be_main.h
index c53a80ab796c..87ec21280a37 100644
--- a/drivers/scsi/be2iscsi/be_main.h
+++ b/drivers/scsi/be2iscsi/be_main.h
@@ -257,6 +257,11 @@ struct hba_parameters {
257 unsigned int num_sge; 257 unsigned int num_sge;
258}; 258};
259 259
260struct invalidate_command_table {
261 unsigned short icd;
262 unsigned short cid;
263} __packed;
264
260struct beiscsi_hba { 265struct beiscsi_hba {
261 struct hba_parameters params; 266 struct hba_parameters params;
262 struct hwi_controller *phwi_ctrlr; 267 struct hwi_controller *phwi_ctrlr;
@@ -329,6 +334,8 @@ struct beiscsi_hba {
329 struct work_struct work_cqs; /* The work being queued */ 334 struct work_struct work_cqs; /* The work being queued */
330 struct be_ctrl_info ctrl; 335 struct be_ctrl_info ctrl;
331 unsigned int generation; 336 unsigned int generation;
337 struct invalidate_command_table inv_tbl[128];
338
332}; 339};
333 340
334struct beiscsi_session { 341struct beiscsi_session {
@@ -491,8 +498,6 @@ struct hwi_async_entry {
491 struct list_head data_busy_list; 498 struct list_head data_busy_list;
492}; 499};
493 500
494#define BE_MIN_ASYNC_ENTRIES 128
495
496struct hwi_async_pdu_context { 501struct hwi_async_pdu_context {
497 struct { 502 struct {
498 struct be_bus_address pa_base; 503 struct be_bus_address pa_base;
@@ -533,7 +538,7 @@ struct hwi_async_pdu_context {
533 * This is a varying size list! Do not add anything 538 * This is a varying size list! Do not add anything
534 * after this entry!! 539 * after this entry!!
535 */ 540 */
536 struct hwi_async_entry async_entry[BE_MIN_ASYNC_ENTRIES]; 541 struct hwi_async_entry async_entry[BE2_MAX_SESSIONS * 2];
537}; 542};
538 543
539#define PDUCQE_CODE_MASK 0x0000003F 544#define PDUCQE_CODE_MASK 0x0000003F
diff --git a/drivers/scsi/be2iscsi/be_mgmt.c b/drivers/scsi/be2iscsi/be_mgmt.c
index 317bcd042ced..72617b650a7e 100644
--- a/drivers/scsi/be2iscsi/be_mgmt.c
+++ b/drivers/scsi/be2iscsi/be_mgmt.c
@@ -145,14 +145,15 @@ unsigned char mgmt_epfw_cleanup(struct beiscsi_hba *phba, unsigned short chute)
145} 145}
146 146
147unsigned char mgmt_invalidate_icds(struct beiscsi_hba *phba, 147unsigned char mgmt_invalidate_icds(struct beiscsi_hba *phba,
148 unsigned int icd, unsigned int cid) 148 struct invalidate_command_table *inv_tbl,
149 unsigned int num_invalidate, unsigned int cid)
149{ 150{
150 struct be_dma_mem nonemb_cmd; 151 struct be_dma_mem nonemb_cmd;
151 struct be_ctrl_info *ctrl = &phba->ctrl; 152 struct be_ctrl_info *ctrl = &phba->ctrl;
152 struct be_mcc_wrb *wrb; 153 struct be_mcc_wrb *wrb;
153 struct be_sge *sge; 154 struct be_sge *sge;
154 struct invalidate_commands_params_in *req; 155 struct invalidate_commands_params_in *req;
155 unsigned int tag = 0; 156 unsigned int i, tag = 0;
156 157
157 spin_lock(&ctrl->mbox_lock); 158 spin_lock(&ctrl->mbox_lock);
158 tag = alloc_mcc_tag(phba); 159 tag = alloc_mcc_tag(phba);
@@ -183,9 +184,12 @@ unsigned char mgmt_invalidate_icds(struct beiscsi_hba *phba,
183 sizeof(*req)); 184 sizeof(*req));
184 req->ref_handle = 0; 185 req->ref_handle = 0;
185 req->cleanup_type = CMD_ISCSI_COMMAND_INVALIDATE; 186 req->cleanup_type = CMD_ISCSI_COMMAND_INVALIDATE;
186 req->icd_count = 0; 187 for (i = 0; i < num_invalidate; i++) {
187 req->table[req->icd_count].icd = icd; 188 req->table[i].icd = inv_tbl->icd;
188 req->table[req->icd_count].cid = cid; 189 req->table[i].cid = inv_tbl->cid;
190 req->icd_count++;
191 inv_tbl++;
192 }
189 sge->pa_hi = cpu_to_le32(upper_32_bits(nonemb_cmd.dma)); 193 sge->pa_hi = cpu_to_le32(upper_32_bits(nonemb_cmd.dma));
190 sge->pa_lo = cpu_to_le32(nonemb_cmd.dma & 0xFFFFFFFF); 194 sge->pa_lo = cpu_to_le32(nonemb_cmd.dma & 0xFFFFFFFF);
191 sge->len = cpu_to_le32(nonemb_cmd.size); 195 sge->len = cpu_to_le32(nonemb_cmd.size);
diff --git a/drivers/scsi/be2iscsi/be_mgmt.h b/drivers/scsi/be2iscsi/be_mgmt.h
index ecead6a5aa56..3d316b82feb1 100644
--- a/drivers/scsi/be2iscsi/be_mgmt.h
+++ b/drivers/scsi/be2iscsi/be_mgmt.h
@@ -94,7 +94,8 @@ unsigned char mgmt_upload_connection(struct beiscsi_hba *phba,
94 unsigned short cid, 94 unsigned short cid,
95 unsigned int upload_flag); 95 unsigned int upload_flag);
96unsigned char mgmt_invalidate_icds(struct beiscsi_hba *phba, 96unsigned char mgmt_invalidate_icds(struct beiscsi_hba *phba,
97 unsigned int icd, unsigned int cid); 97 struct invalidate_command_table *inv_tbl,
98 unsigned int num_invalidate, unsigned int cid);
98 99
99struct iscsi_invalidate_connection_params_in { 100struct iscsi_invalidate_connection_params_in {
100 struct be_cmd_req_hdr hdr; 101 struct be_cmd_req_hdr hdr;
@@ -116,11 +117,6 @@ union iscsi_invalidate_connection_params {
116 struct iscsi_invalidate_connection_params_out response; 117 struct iscsi_invalidate_connection_params_out response;
117} __packed; 118} __packed;
118 119
119struct invalidate_command_table {
120 unsigned short icd;
121 unsigned short cid;
122} __packed;
123
124struct invalidate_commands_params_in { 120struct invalidate_commands_params_in {
125 struct be_cmd_req_hdr hdr; 121 struct be_cmd_req_hdr hdr;
126 unsigned int ref_handle; 122 unsigned int ref_handle;
diff --git a/drivers/scsi/bfa/Makefile b/drivers/scsi/bfa/Makefile
index 1d6009490d1c..17e06cae71b2 100644
--- a/drivers/scsi/bfa/Makefile
+++ b/drivers/scsi/bfa/Makefile
@@ -2,14 +2,14 @@ obj-$(CONFIG_SCSI_BFA_FC) := bfa.o
2 2
3bfa-y := bfad.o bfad_intr.o bfad_os.o bfad_im.o bfad_attr.o bfad_fwimg.o 3bfa-y := bfad.o bfad_intr.o bfad_os.o bfad_im.o bfad_attr.o bfad_fwimg.o
4 4
5bfa-y += bfa_core.o bfa_ioc.o bfa_iocfc.o bfa_fcxp.o bfa_lps.o 5bfa-y += bfa_core.o bfa_ioc.o bfa_ioc_ct.o bfa_ioc_cb.o bfa_iocfc.o bfa_fcxp.o
6bfa-y += bfa_hw_cb.o bfa_hw_ct.o bfa_intr.o bfa_timer.o bfa_rport.o 6bfa-y += bfa_lps.o bfa_hw_cb.o bfa_hw_ct.o bfa_intr.o bfa_timer.o bfa_rport.o
7bfa-y += bfa_fcport.o bfa_port.o bfa_uf.o bfa_sgpg.o bfa_module.o bfa_ioim.o 7bfa-y += bfa_fcport.o bfa_port.o bfa_uf.o bfa_sgpg.o bfa_module.o bfa_ioim.o
8bfa-y += bfa_itnim.o bfa_fcpim.o bfa_tskim.o bfa_log.o bfa_log_module.o 8bfa-y += bfa_itnim.o bfa_fcpim.o bfa_tskim.o bfa_log.o bfa_log_module.o
9bfa-y += bfa_csdebug.o bfa_sm.o plog.o 9bfa-y += bfa_csdebug.o bfa_sm.o plog.o
10 10
11bfa-y += fcbuild.o fabric.o fcpim.o vfapi.o fcptm.o bfa_fcs.o bfa_fcs_port.o 11bfa-y += fcbuild.o fabric.o fcpim.o vfapi.o fcptm.o bfa_fcs.o bfa_fcs_port.o
12bfa-y += bfa_fcs_uf.o bfa_fcs_lport.o fab.o fdmi.o ms.o ns.o scn.o loop.o 12bfa-y += bfa_fcs_uf.o bfa_fcs_lport.o fab.o fdmi.o ms.o ns.o scn.o loop.o
13bfa-y += lport_api.o n2n.o rport.o rport_api.o rport_ftrs.o vport.o 13bfa-y += lport_api.o n2n.o rport.o rport_api.o rport_ftrs.o vport.o
14 14
15ccflags-y := -I$(obj) -I$(obj)/include -I$(obj)/include/cna 15ccflags-y := -I$(obj) -I$(obj)/include -I$(obj)/include/cna -DBFA_PERF_BUILD
diff --git a/drivers/scsi/bfa/bfa_core.c b/drivers/scsi/bfa/bfa_core.c
index 44e2d1155c51..0c08e185a766 100644
--- a/drivers/scsi/bfa/bfa_core.c
+++ b/drivers/scsi/bfa/bfa_core.c
@@ -385,6 +385,15 @@ bfa_debug_fwsave(struct bfa_s *bfa, void *trcdata, int *trclen)
385} 385}
386 386
387/** 387/**
388 * Clear the saved firmware trace information of an IOC.
389 */
390void
391bfa_debug_fwsave_clear(struct bfa_s *bfa)
392{
393 bfa_ioc_debug_fwsave_clear(&bfa->ioc);
394}
395
396/**
388 * Fetch firmware trace data. 397 * Fetch firmware trace data.
389 * 398 *
390 * @param[in] bfa BFA instance 399 * @param[in] bfa BFA instance
@@ -399,4 +408,14 @@ bfa_debug_fwtrc(struct bfa_s *bfa, void *trcdata, int *trclen)
399{ 408{
400 return bfa_ioc_debug_fwtrc(&bfa->ioc, trcdata, trclen); 409 return bfa_ioc_debug_fwtrc(&bfa->ioc, trcdata, trclen);
401} 410}
411
412/**
413 * Reset hw semaphore & usage cnt regs and initialize.
414 */
415void
416bfa_chip_reset(struct bfa_s *bfa)
417{
418 bfa_ioc_ownership_reset(&bfa->ioc);
419 bfa_ioc_pll_init(&bfa->ioc);
420}
402#endif 421#endif
diff --git a/drivers/scsi/bfa/bfa_fcport.c b/drivers/scsi/bfa/bfa_fcport.c
index aef648b55dfc..c589488db0c1 100644
--- a/drivers/scsi/bfa/bfa_fcport.c
+++ b/drivers/scsi/bfa/bfa_fcport.c
@@ -23,40 +23,33 @@
23#include <cs/bfa_plog.h> 23#include <cs/bfa_plog.h>
24#include <aen/bfa_aen_port.h> 24#include <aen/bfa_aen_port.h>
25 25
26BFA_TRC_FILE(HAL, PPORT); 26BFA_TRC_FILE(HAL, FCPORT);
27BFA_MODULE(pport); 27BFA_MODULE(fcport);
28
29#define bfa_pport_callback(__pport, __event) do { \
30 if ((__pport)->bfa->fcs) { \
31 (__pport)->event_cbfn((__pport)->event_cbarg, (__event)); \
32 } else { \
33 (__pport)->hcb_event = (__event); \
34 bfa_cb_queue((__pport)->bfa, &(__pport)->hcb_qe, \
35 __bfa_cb_port_event, (__pport)); \
36 } \
37} while (0)
38 28
39/* 29/*
40 * The port is considered disabled if corresponding physical port or IOC are 30 * The port is considered disabled if corresponding physical port or IOC are
41 * disabled explicitly 31 * disabled explicitly
42 */ 32 */
43#define BFA_PORT_IS_DISABLED(bfa) \ 33#define BFA_PORT_IS_DISABLED(bfa) \
44 ((bfa_pport_is_disabled(bfa) == BFA_TRUE) || \ 34 ((bfa_fcport_is_disabled(bfa) == BFA_TRUE) || \
45 (bfa_ioc_is_disabled(&bfa->ioc) == BFA_TRUE)) 35 (bfa_ioc_is_disabled(&bfa->ioc) == BFA_TRUE))
46 36
47/* 37/*
48 * forward declarations 38 * forward declarations
49 */ 39 */
50static bfa_boolean_t bfa_pport_send_enable(struct bfa_pport_s *port); 40static bfa_boolean_t bfa_fcport_send_enable(struct bfa_fcport_s *fcport);
51static bfa_boolean_t bfa_pport_send_disable(struct bfa_pport_s *port); 41static bfa_boolean_t bfa_fcport_send_disable(struct bfa_fcport_s *fcport);
52static void bfa_pport_update_linkinfo(struct bfa_pport_s *pport); 42static void bfa_fcport_update_linkinfo(struct bfa_fcport_s *fcport);
53static void bfa_pport_reset_linkinfo(struct bfa_pport_s *pport); 43static void bfa_fcport_reset_linkinfo(struct bfa_fcport_s *fcport);
54static void bfa_pport_set_wwns(struct bfa_pport_s *port); 44static void bfa_fcport_set_wwns(struct bfa_fcport_s *fcport);
55static void __bfa_cb_port_event(void *cbarg, bfa_boolean_t complete); 45static void __bfa_cb_fcport_event(void *cbarg, bfa_boolean_t complete);
56static void __bfa_cb_port_stats(void *cbarg, bfa_boolean_t complete); 46static void bfa_fcport_callback(struct bfa_fcport_s *fcport,
57static void __bfa_cb_port_stats_clr(void *cbarg, bfa_boolean_t complete); 47 enum bfa_pport_linkstate event);
58static void bfa_port_stats_timeout(void *cbarg); 48static void bfa_fcport_queue_cb(struct bfa_fcport_ln_s *ln,
59static void bfa_port_stats_clr_timeout(void *cbarg); 49 enum bfa_pport_linkstate event);
50static void __bfa_cb_fcport_stats_clr(void *cbarg, bfa_boolean_t complete);
51static void bfa_fcport_stats_get_timeout(void *cbarg);
52static void bfa_fcport_stats_clr_timeout(void *cbarg);
60 53
61/** 54/**
62 * bfa_pport_private 55 * bfa_pport_private
@@ -65,111 +58,114 @@ static void bfa_port_stats_clr_timeout(void *cbarg);
65/** 58/**
66 * BFA port state machine events 59 * BFA port state machine events
67 */ 60 */
68enum bfa_pport_sm_event { 61enum bfa_fcport_sm_event {
69 BFA_PPORT_SM_START = 1, /* start port state machine */ 62 BFA_FCPORT_SM_START = 1, /* start port state machine */
70 BFA_PPORT_SM_STOP = 2, /* stop port state machine */ 63 BFA_FCPORT_SM_STOP = 2, /* stop port state machine */
71 BFA_PPORT_SM_ENABLE = 3, /* enable port */ 64 BFA_FCPORT_SM_ENABLE = 3, /* enable port */
72 BFA_PPORT_SM_DISABLE = 4, /* disable port state machine */ 65 BFA_FCPORT_SM_DISABLE = 4, /* disable port state machine */
73 BFA_PPORT_SM_FWRSP = 5, /* firmware enable/disable rsp */ 66 BFA_FCPORT_SM_FWRSP = 5, /* firmware enable/disable rsp */
74 BFA_PPORT_SM_LINKUP = 6, /* firmware linkup event */ 67 BFA_FCPORT_SM_LINKUP = 6, /* firmware linkup event */
75 BFA_PPORT_SM_LINKDOWN = 7, /* firmware linkup down */ 68 BFA_FCPORT_SM_LINKDOWN = 7, /* firmware linkup down */
76 BFA_PPORT_SM_QRESUME = 8, /* CQ space available */ 69 BFA_FCPORT_SM_QRESUME = 8, /* CQ space available */
77 BFA_PPORT_SM_HWFAIL = 9, /* IOC h/w failure */ 70 BFA_FCPORT_SM_HWFAIL = 9, /* IOC h/w failure */
78}; 71};
79 72
80static void bfa_pport_sm_uninit(struct bfa_pport_s *pport, 73/**
81 enum bfa_pport_sm_event event); 74 * BFA port link notification state machine events
82static void bfa_pport_sm_enabling_qwait(struct bfa_pport_s *pport, 75 */
83 enum bfa_pport_sm_event event); 76
84static void bfa_pport_sm_enabling(struct bfa_pport_s *pport, 77enum bfa_fcport_ln_sm_event {
85 enum bfa_pport_sm_event event); 78 BFA_FCPORT_LN_SM_LINKUP = 1, /* linkup event */
86static void bfa_pport_sm_linkdown(struct bfa_pport_s *pport, 79 BFA_FCPORT_LN_SM_LINKDOWN = 2, /* linkdown event */
87 enum bfa_pport_sm_event event); 80 BFA_FCPORT_LN_SM_NOTIFICATION = 3 /* done notification */
88static void bfa_pport_sm_linkup(struct bfa_pport_s *pport, 81};
89 enum bfa_pport_sm_event event); 82
90static void bfa_pport_sm_disabling(struct bfa_pport_s *pport, 83static void bfa_fcport_sm_uninit(struct bfa_fcport_s *fcport,
91 enum bfa_pport_sm_event event); 84 enum bfa_fcport_sm_event event);
92static void bfa_pport_sm_disabling_qwait(struct bfa_pport_s *pport, 85static void bfa_fcport_sm_enabling_qwait(struct bfa_fcport_s *fcport,
93 enum bfa_pport_sm_event event); 86 enum bfa_fcport_sm_event event);
94static void bfa_pport_sm_disabled(struct bfa_pport_s *pport, 87static void bfa_fcport_sm_enabling(struct bfa_fcport_s *fcport,
95 enum bfa_pport_sm_event event); 88 enum bfa_fcport_sm_event event);
96static void bfa_pport_sm_stopped(struct bfa_pport_s *pport, 89static void bfa_fcport_sm_linkdown(struct bfa_fcport_s *fcport,
97 enum bfa_pport_sm_event event); 90 enum bfa_fcport_sm_event event);
98static void bfa_pport_sm_iocdown(struct bfa_pport_s *pport, 91static void bfa_fcport_sm_linkup(struct bfa_fcport_s *fcport,
99 enum bfa_pport_sm_event event); 92 enum bfa_fcport_sm_event event);
100static void bfa_pport_sm_iocfail(struct bfa_pport_s *pport, 93static void bfa_fcport_sm_disabling(struct bfa_fcport_s *fcport,
101 enum bfa_pport_sm_event event); 94 enum bfa_fcport_sm_event event);
95static void bfa_fcport_sm_disabling_qwait(struct bfa_fcport_s *fcport,
96 enum bfa_fcport_sm_event event);
97static void bfa_fcport_sm_disabled(struct bfa_fcport_s *fcport,
98 enum bfa_fcport_sm_event event);
99static void bfa_fcport_sm_stopped(struct bfa_fcport_s *fcport,
100 enum bfa_fcport_sm_event event);
101static void bfa_fcport_sm_iocdown(struct bfa_fcport_s *fcport,
102 enum bfa_fcport_sm_event event);
103static void bfa_fcport_sm_iocfail(struct bfa_fcport_s *fcport,
104 enum bfa_fcport_sm_event event);
105
106static void bfa_fcport_ln_sm_dn(struct bfa_fcport_ln_s *ln,
107 enum bfa_fcport_ln_sm_event event);
108static void bfa_fcport_ln_sm_dn_nf(struct bfa_fcport_ln_s *ln,
109 enum bfa_fcport_ln_sm_event event);
110static void bfa_fcport_ln_sm_dn_up_nf(struct bfa_fcport_ln_s *ln,
111 enum bfa_fcport_ln_sm_event event);
112static void bfa_fcport_ln_sm_up(struct bfa_fcport_ln_s *ln,
113 enum bfa_fcport_ln_sm_event event);
114static void bfa_fcport_ln_sm_up_nf(struct bfa_fcport_ln_s *ln,
115 enum bfa_fcport_ln_sm_event event);
116static void bfa_fcport_ln_sm_up_dn_nf(struct bfa_fcport_ln_s *ln,
117 enum bfa_fcport_ln_sm_event event);
118static void bfa_fcport_ln_sm_up_dn_up_nf(struct bfa_fcport_ln_s *ln,
119 enum bfa_fcport_ln_sm_event event);
102 120
103static struct bfa_sm_table_s hal_pport_sm_table[] = { 121static struct bfa_sm_table_s hal_pport_sm_table[] = {
104 {BFA_SM(bfa_pport_sm_uninit), BFA_PPORT_ST_UNINIT}, 122 {BFA_SM(bfa_fcport_sm_uninit), BFA_PPORT_ST_UNINIT},
105 {BFA_SM(bfa_pport_sm_enabling_qwait), BFA_PPORT_ST_ENABLING_QWAIT}, 123 {BFA_SM(bfa_fcport_sm_enabling_qwait), BFA_PPORT_ST_ENABLING_QWAIT},
106 {BFA_SM(bfa_pport_sm_enabling), BFA_PPORT_ST_ENABLING}, 124 {BFA_SM(bfa_fcport_sm_enabling), BFA_PPORT_ST_ENABLING},
107 {BFA_SM(bfa_pport_sm_linkdown), BFA_PPORT_ST_LINKDOWN}, 125 {BFA_SM(bfa_fcport_sm_linkdown), BFA_PPORT_ST_LINKDOWN},
108 {BFA_SM(bfa_pport_sm_linkup), BFA_PPORT_ST_LINKUP}, 126 {BFA_SM(bfa_fcport_sm_linkup), BFA_PPORT_ST_LINKUP},
109 {BFA_SM(bfa_pport_sm_disabling_qwait), 127 {BFA_SM(bfa_fcport_sm_disabling_qwait), BFA_PPORT_ST_DISABLING_QWAIT},
110 BFA_PPORT_ST_DISABLING_QWAIT}, 128 {BFA_SM(bfa_fcport_sm_disabling), BFA_PPORT_ST_DISABLING},
111 {BFA_SM(bfa_pport_sm_disabling), BFA_PPORT_ST_DISABLING}, 129 {BFA_SM(bfa_fcport_sm_disabled), BFA_PPORT_ST_DISABLED},
112 {BFA_SM(bfa_pport_sm_disabled), BFA_PPORT_ST_DISABLED}, 130 {BFA_SM(bfa_fcport_sm_stopped), BFA_PPORT_ST_STOPPED},
113 {BFA_SM(bfa_pport_sm_stopped), BFA_PPORT_ST_STOPPED}, 131 {BFA_SM(bfa_fcport_sm_iocdown), BFA_PPORT_ST_IOCDOWN},
114 {BFA_SM(bfa_pport_sm_iocdown), BFA_PPORT_ST_IOCDOWN}, 132 {BFA_SM(bfa_fcport_sm_iocfail), BFA_PPORT_ST_IOCDOWN},
115 {BFA_SM(bfa_pport_sm_iocfail), BFA_PPORT_ST_IOCDOWN},
116}; 133};
117 134
118static void 135static void
119bfa_pport_aen_post(struct bfa_pport_s *pport, enum bfa_port_aen_event event) 136bfa_fcport_aen_post(struct bfa_fcport_s *fcport, enum bfa_port_aen_event event)
120{ 137{
121 union bfa_aen_data_u aen_data; 138 union bfa_aen_data_u aen_data;
122 struct bfa_log_mod_s *logmod = pport->bfa->logm; 139 struct bfa_log_mod_s *logmod = fcport->bfa->logm;
123 wwn_t pwwn = pport->pwwn; 140 wwn_t pwwn = fcport->pwwn;
124 char pwwn_ptr[BFA_STRING_32]; 141 char pwwn_ptr[BFA_STRING_32];
125 struct bfa_ioc_attr_s ioc_attr;
126 142
143 memset(&aen_data, 0, sizeof(aen_data));
127 wwn2str(pwwn_ptr, pwwn); 144 wwn2str(pwwn_ptr, pwwn);
128 switch (event) { 145 bfa_log(logmod, BFA_LOG_CREATE_ID(BFA_AEN_CAT_PORT, event), pwwn_ptr);
129 case BFA_PORT_AEN_ONLINE:
130 bfa_log(logmod, BFA_AEN_PORT_ONLINE, pwwn_ptr);
131 break;
132 case BFA_PORT_AEN_OFFLINE:
133 bfa_log(logmod, BFA_AEN_PORT_OFFLINE, pwwn_ptr);
134 break;
135 case BFA_PORT_AEN_ENABLE:
136 bfa_log(logmod, BFA_AEN_PORT_ENABLE, pwwn_ptr);
137 break;
138 case BFA_PORT_AEN_DISABLE:
139 bfa_log(logmod, BFA_AEN_PORT_DISABLE, pwwn_ptr);
140 break;
141 case BFA_PORT_AEN_DISCONNECT:
142 bfa_log(logmod, BFA_AEN_PORT_DISCONNECT, pwwn_ptr);
143 break;
144 case BFA_PORT_AEN_QOS_NEG:
145 bfa_log(logmod, BFA_AEN_PORT_QOS_NEG, pwwn_ptr);
146 break;
147 default:
148 break;
149 }
150 146
151 bfa_ioc_get_attr(&pport->bfa->ioc, &ioc_attr); 147 aen_data.port.ioc_type = bfa_get_type(fcport->bfa);
152 aen_data.port.ioc_type = ioc_attr.ioc_type;
153 aen_data.port.pwwn = pwwn; 148 aen_data.port.pwwn = pwwn;
154} 149}
155 150
156static void 151static void
157bfa_pport_sm_uninit(struct bfa_pport_s *pport, enum bfa_pport_sm_event event) 152bfa_fcport_sm_uninit(struct bfa_fcport_s *fcport,
153 enum bfa_fcport_sm_event event)
158{ 154{
159 bfa_trc(pport->bfa, event); 155 bfa_trc(fcport->bfa, event);
160 156
161 switch (event) { 157 switch (event) {
162 case BFA_PPORT_SM_START: 158 case BFA_FCPORT_SM_START:
163 /** 159 /**
164 * Start event after IOC is configured and BFA is started. 160 * Start event after IOC is configured and BFA is started.
165 */ 161 */
166 if (bfa_pport_send_enable(pport)) 162 if (bfa_fcport_send_enable(fcport))
167 bfa_sm_set_state(pport, bfa_pport_sm_enabling); 163 bfa_sm_set_state(fcport, bfa_fcport_sm_enabling);
168 else 164 else
169 bfa_sm_set_state(pport, bfa_pport_sm_enabling_qwait); 165 bfa_sm_set_state(fcport, bfa_fcport_sm_enabling_qwait);
170 break; 166 break;
171 167
172 case BFA_PPORT_SM_ENABLE: 168 case BFA_FCPORT_SM_ENABLE:
173 /** 169 /**
174 * Port is persistently configured to be in enabled state. Do 170 * Port is persistently configured to be in enabled state. Do
175 * not change state. Port enabling is done when START event is 171 * not change state. Port enabling is done when START event is
@@ -177,389 +173,412 @@ bfa_pport_sm_uninit(struct bfa_pport_s *pport, enum bfa_pport_sm_event event)
177 */ 173 */
178 break; 174 break;
179 175
180 case BFA_PPORT_SM_DISABLE: 176 case BFA_FCPORT_SM_DISABLE:
181 /** 177 /**
182 * If a port is persistently configured to be disabled, the 178 * If a port is persistently configured to be disabled, the
183 * first event will a port disable request. 179 * first event will a port disable request.
184 */ 180 */
185 bfa_sm_set_state(pport, bfa_pport_sm_disabled); 181 bfa_sm_set_state(fcport, bfa_fcport_sm_disabled);
186 break; 182 break;
187 183
188 case BFA_PPORT_SM_HWFAIL: 184 case BFA_FCPORT_SM_HWFAIL:
189 bfa_sm_set_state(pport, bfa_pport_sm_iocdown); 185 bfa_sm_set_state(fcport, bfa_fcport_sm_iocdown);
190 break; 186 break;
191 187
192 default: 188 default:
193 bfa_sm_fault(pport->bfa, event); 189 bfa_sm_fault(fcport->bfa, event);
194 } 190 }
195} 191}
196 192
197static void 193static void
198bfa_pport_sm_enabling_qwait(struct bfa_pport_s *pport, 194bfa_fcport_sm_enabling_qwait(struct bfa_fcport_s *fcport,
199 enum bfa_pport_sm_event event) 195 enum bfa_fcport_sm_event event)
200{ 196{
201 bfa_trc(pport->bfa, event); 197 bfa_trc(fcport->bfa, event);
202 198
203 switch (event) { 199 switch (event) {
204 case BFA_PPORT_SM_QRESUME: 200 case BFA_FCPORT_SM_QRESUME:
205 bfa_sm_set_state(pport, bfa_pport_sm_enabling); 201 bfa_sm_set_state(fcport, bfa_fcport_sm_enabling);
206 bfa_pport_send_enable(pport); 202 bfa_fcport_send_enable(fcport);
207 break; 203 break;
208 204
209 case BFA_PPORT_SM_STOP: 205 case BFA_FCPORT_SM_STOP:
210 bfa_reqq_wcancel(&pport->reqq_wait); 206 bfa_reqq_wcancel(&fcport->reqq_wait);
211 bfa_sm_set_state(pport, bfa_pport_sm_stopped); 207 bfa_sm_set_state(fcport, bfa_fcport_sm_stopped);
212 break; 208 break;
213 209
214 case BFA_PPORT_SM_ENABLE: 210 case BFA_FCPORT_SM_ENABLE:
215 /** 211 /**
216 * Already enable is in progress. 212 * Already enable is in progress.
217 */ 213 */
218 break; 214 break;
219 215
220 case BFA_PPORT_SM_DISABLE: 216 case BFA_FCPORT_SM_DISABLE:
221 /** 217 /**
222 * Just send disable request to firmware when room becomes 218 * Just send disable request to firmware when room becomes
223 * available in request queue. 219 * available in request queue.
224 */ 220 */
225 bfa_sm_set_state(pport, bfa_pport_sm_disabled); 221 bfa_sm_set_state(fcport, bfa_fcport_sm_disabled);
226 bfa_reqq_wcancel(&pport->reqq_wait); 222 bfa_reqq_wcancel(&fcport->reqq_wait);
227 bfa_plog_str(pport->bfa->plog, BFA_PL_MID_HAL, 223 bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL,
228 BFA_PL_EID_PORT_DISABLE, 0, "Port Disable"); 224 BFA_PL_EID_PORT_DISABLE, 0, "Port Disable");
229 bfa_pport_aen_post(pport, BFA_PORT_AEN_DISABLE); 225 bfa_fcport_aen_post(fcport, BFA_PORT_AEN_DISABLE);
230 break; 226 break;
231 227
232 case BFA_PPORT_SM_LINKUP: 228 case BFA_FCPORT_SM_LINKUP:
233 case BFA_PPORT_SM_LINKDOWN: 229 case BFA_FCPORT_SM_LINKDOWN:
234 /** 230 /**
235 * Possible to get link events when doing back-to-back 231 * Possible to get link events when doing back-to-back
236 * enable/disables. 232 * enable/disables.
237 */ 233 */
238 break; 234 break;
239 235
240 case BFA_PPORT_SM_HWFAIL: 236 case BFA_FCPORT_SM_HWFAIL:
241 bfa_reqq_wcancel(&pport->reqq_wait); 237 bfa_reqq_wcancel(&fcport->reqq_wait);
242 bfa_sm_set_state(pport, bfa_pport_sm_iocdown); 238 bfa_sm_set_state(fcport, bfa_fcport_sm_iocdown);
243 break; 239 break;
244 240
245 default: 241 default:
246 bfa_sm_fault(pport->bfa, event); 242 bfa_sm_fault(fcport->bfa, event);
247 } 243 }
248} 244}
249 245
250static void 246static void
251bfa_pport_sm_enabling(struct bfa_pport_s *pport, enum bfa_pport_sm_event event) 247bfa_fcport_sm_enabling(struct bfa_fcport_s *fcport,
248 enum bfa_fcport_sm_event event)
252{ 249{
253 bfa_trc(pport->bfa, event); 250 bfa_trc(fcport->bfa, event);
254 251
255 switch (event) { 252 switch (event) {
256 case BFA_PPORT_SM_FWRSP: 253 case BFA_FCPORT_SM_FWRSP:
257 case BFA_PPORT_SM_LINKDOWN: 254 case BFA_FCPORT_SM_LINKDOWN:
258 bfa_sm_set_state(pport, bfa_pport_sm_linkdown); 255 bfa_sm_set_state(fcport, bfa_fcport_sm_linkdown);
259 break; 256 break;
260 257
261 case BFA_PPORT_SM_LINKUP: 258 case BFA_FCPORT_SM_LINKUP:
262 bfa_pport_update_linkinfo(pport); 259 bfa_fcport_update_linkinfo(fcport);
263 bfa_sm_set_state(pport, bfa_pport_sm_linkup); 260 bfa_sm_set_state(fcport, bfa_fcport_sm_linkup);
264 261
265 bfa_assert(pport->event_cbfn); 262 bfa_assert(fcport->event_cbfn);
266 bfa_pport_callback(pport, BFA_PPORT_LINKUP); 263 bfa_fcport_callback(fcport, BFA_PPORT_LINKUP);
267 break; 264 break;
268 265
269 case BFA_PPORT_SM_ENABLE: 266 case BFA_FCPORT_SM_ENABLE:
270 /** 267 /**
271 * Already being enabled. 268 * Already being enabled.
272 */ 269 */
273 break; 270 break;
274 271
275 case BFA_PPORT_SM_DISABLE: 272 case BFA_FCPORT_SM_DISABLE:
276 if (bfa_pport_send_disable(pport)) 273 if (bfa_fcport_send_disable(fcport))
277 bfa_sm_set_state(pport, bfa_pport_sm_disabling); 274 bfa_sm_set_state(fcport, bfa_fcport_sm_disabling);
278 else 275 else
279 bfa_sm_set_state(pport, bfa_pport_sm_disabling_qwait); 276 bfa_sm_set_state(fcport, bfa_fcport_sm_disabling_qwait);
280 277
281 bfa_plog_str(pport->bfa->plog, BFA_PL_MID_HAL, 278 bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL,
282 BFA_PL_EID_PORT_DISABLE, 0, "Port Disable"); 279 BFA_PL_EID_PORT_DISABLE, 0, "Port Disable");
283 bfa_pport_aen_post(pport, BFA_PORT_AEN_DISABLE); 280 bfa_fcport_aen_post(fcport, BFA_PORT_AEN_DISABLE);
284 break; 281 break;
285 282
286 case BFA_PPORT_SM_STOP: 283 case BFA_FCPORT_SM_STOP:
287 bfa_sm_set_state(pport, bfa_pport_sm_stopped); 284 bfa_sm_set_state(fcport, bfa_fcport_sm_stopped);
288 break; 285 break;
289 286
290 case BFA_PPORT_SM_HWFAIL: 287 case BFA_FCPORT_SM_HWFAIL:
291 bfa_sm_set_state(pport, bfa_pport_sm_iocdown); 288 bfa_sm_set_state(fcport, bfa_fcport_sm_iocdown);
292 break; 289 break;
293 290
294 default: 291 default:
295 bfa_sm_fault(pport->bfa, event); 292 bfa_sm_fault(fcport->bfa, event);
296 } 293 }
297} 294}
298 295
299static void 296static void
300bfa_pport_sm_linkdown(struct bfa_pport_s *pport, enum bfa_pport_sm_event event) 297bfa_fcport_sm_linkdown(struct bfa_fcport_s *fcport,
298 enum bfa_fcport_sm_event event)
301{ 299{
302 bfa_trc(pport->bfa, event); 300 struct bfi_fcport_event_s *pevent = fcport->event_arg.i2hmsg.event;
301 bfa_trc(fcport->bfa, event);
303 302
304 switch (event) { 303 switch (event) {
305 case BFA_PPORT_SM_LINKUP: 304 case BFA_FCPORT_SM_LINKUP:
306 bfa_pport_update_linkinfo(pport); 305 bfa_fcport_update_linkinfo(fcport);
307 bfa_sm_set_state(pport, bfa_pport_sm_linkup); 306 bfa_sm_set_state(fcport, bfa_fcport_sm_linkup);
308 bfa_assert(pport->event_cbfn); 307 bfa_assert(fcport->event_cbfn);
309 bfa_plog_str(pport->bfa->plog, BFA_PL_MID_HAL, 308 bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL,
310 BFA_PL_EID_PORT_ST_CHANGE, 0, "Port Linkup"); 309 BFA_PL_EID_PORT_ST_CHANGE, 0, "Port Linkup");
311 bfa_pport_callback(pport, BFA_PPORT_LINKUP); 310
312 bfa_pport_aen_post(pport, BFA_PORT_AEN_ONLINE); 311 if (!bfa_ioc_get_fcmode(&fcport->bfa->ioc)) {
312
313 bfa_trc(fcport->bfa, pevent->link_state.fcf.fipenabled);
314 bfa_trc(fcport->bfa, pevent->link_state.fcf.fipfailed);
315
316 if (pevent->link_state.fcf.fipfailed)
317 bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL,
318 BFA_PL_EID_FIP_FCF_DISC, 0,
319 "FIP FCF Discovery Failed");
320 else
321 bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL,
322 BFA_PL_EID_FIP_FCF_DISC, 0,
323 "FIP FCF Discovered");
324 }
325
326 bfa_fcport_callback(fcport, BFA_PPORT_LINKUP);
327 bfa_fcport_aen_post(fcport, BFA_PORT_AEN_ONLINE);
313 /** 328 /**
314 * If QoS is enabled and it is not online, 329 * If QoS is enabled and it is not online,
315 * Send a separate event. 330 * Send a separate event.
316 */ 331 */
317 if ((pport->cfg.qos_enabled) 332 if ((fcport->cfg.qos_enabled)
318 && (bfa_os_ntohl(pport->qos_attr.state) != BFA_QOS_ONLINE)) 333 && (bfa_os_ntohl(fcport->qos_attr.state) != BFA_QOS_ONLINE))
319 bfa_pport_aen_post(pport, BFA_PORT_AEN_QOS_NEG); 334 bfa_fcport_aen_post(fcport, BFA_PORT_AEN_QOS_NEG);
320 335
321 break; 336 break;
322 337
323 case BFA_PPORT_SM_LINKDOWN: 338 case BFA_FCPORT_SM_LINKDOWN:
324 /** 339 /**
325 * Possible to get link down event. 340 * Possible to get link down event.
326 */ 341 */
327 break; 342 break;
328 343
329 case BFA_PPORT_SM_ENABLE: 344 case BFA_FCPORT_SM_ENABLE:
330 /** 345 /**
331 * Already enabled. 346 * Already enabled.
332 */ 347 */
333 break; 348 break;
334 349
335 case BFA_PPORT_SM_DISABLE: 350 case BFA_FCPORT_SM_DISABLE:
336 if (bfa_pport_send_disable(pport)) 351 if (bfa_fcport_send_disable(fcport))
337 bfa_sm_set_state(pport, bfa_pport_sm_disabling); 352 bfa_sm_set_state(fcport, bfa_fcport_sm_disabling);
338 else 353 else
339 bfa_sm_set_state(pport, bfa_pport_sm_disabling_qwait); 354 bfa_sm_set_state(fcport, bfa_fcport_sm_disabling_qwait);
340 355
341 bfa_plog_str(pport->bfa->plog, BFA_PL_MID_HAL, 356 bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL,
342 BFA_PL_EID_PORT_DISABLE, 0, "Port Disable"); 357 BFA_PL_EID_PORT_DISABLE, 0, "Port Disable");
343 bfa_pport_aen_post(pport, BFA_PORT_AEN_DISABLE); 358 bfa_fcport_aen_post(fcport, BFA_PORT_AEN_DISABLE);
344 break; 359 break;
345 360
346 case BFA_PPORT_SM_STOP: 361 case BFA_FCPORT_SM_STOP:
347 bfa_sm_set_state(pport, bfa_pport_sm_stopped); 362 bfa_sm_set_state(fcport, bfa_fcport_sm_stopped);
348 break; 363 break;
349 364
350 case BFA_PPORT_SM_HWFAIL: 365 case BFA_FCPORT_SM_HWFAIL:
351 bfa_sm_set_state(pport, bfa_pport_sm_iocdown); 366 bfa_sm_set_state(fcport, bfa_fcport_sm_iocdown);
352 break; 367 break;
353 368
354 default: 369 default:
355 bfa_sm_fault(pport->bfa, event); 370 bfa_sm_fault(fcport->bfa, event);
356 } 371 }
357} 372}
358 373
359static void 374static void
360bfa_pport_sm_linkup(struct bfa_pport_s *pport, enum bfa_pport_sm_event event) 375bfa_fcport_sm_linkup(struct bfa_fcport_s *fcport,
376 enum bfa_fcport_sm_event event)
361{ 377{
362 bfa_trc(pport->bfa, event); 378 bfa_trc(fcport->bfa, event);
363 379
364 switch (event) { 380 switch (event) {
365 case BFA_PPORT_SM_ENABLE: 381 case BFA_FCPORT_SM_ENABLE:
366 /** 382 /**
367 * Already enabled. 383 * Already enabled.
368 */ 384 */
369 break; 385 break;
370 386
371 case BFA_PPORT_SM_DISABLE: 387 case BFA_FCPORT_SM_DISABLE:
372 if (bfa_pport_send_disable(pport)) 388 if (bfa_fcport_send_disable(fcport))
373 bfa_sm_set_state(pport, bfa_pport_sm_disabling); 389 bfa_sm_set_state(fcport, bfa_fcport_sm_disabling);
374 else 390 else
375 bfa_sm_set_state(pport, bfa_pport_sm_disabling_qwait); 391 bfa_sm_set_state(fcport, bfa_fcport_sm_disabling_qwait);
376 392
377 bfa_pport_reset_linkinfo(pport); 393 bfa_fcport_reset_linkinfo(fcport);
378 bfa_pport_callback(pport, BFA_PPORT_LINKDOWN); 394 bfa_fcport_callback(fcport, BFA_PPORT_LINKDOWN);
379 bfa_plog_str(pport->bfa->plog, BFA_PL_MID_HAL, 395 bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL,
380 BFA_PL_EID_PORT_DISABLE, 0, "Port Disable"); 396 BFA_PL_EID_PORT_DISABLE, 0, "Port Disable");
381 bfa_pport_aen_post(pport, BFA_PORT_AEN_OFFLINE); 397 bfa_fcport_aen_post(fcport, BFA_PORT_AEN_OFFLINE);
382 bfa_pport_aen_post(pport, BFA_PORT_AEN_DISABLE); 398 bfa_fcport_aen_post(fcport, BFA_PORT_AEN_DISABLE);
383 break; 399 break;
384 400
385 case BFA_PPORT_SM_LINKDOWN: 401 case BFA_FCPORT_SM_LINKDOWN:
386 bfa_sm_set_state(pport, bfa_pport_sm_linkdown); 402 bfa_sm_set_state(fcport, bfa_fcport_sm_linkdown);
387 bfa_pport_reset_linkinfo(pport); 403 bfa_fcport_reset_linkinfo(fcport);
388 bfa_pport_callback(pport, BFA_PPORT_LINKDOWN); 404 bfa_fcport_callback(fcport, BFA_PPORT_LINKDOWN);
389 bfa_plog_str(pport->bfa->plog, BFA_PL_MID_HAL, 405 bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL,
390 BFA_PL_EID_PORT_ST_CHANGE, 0, "Port Linkdown"); 406 BFA_PL_EID_PORT_ST_CHANGE, 0, "Port Linkdown");
391 if (BFA_PORT_IS_DISABLED(pport->bfa)) 407 if (BFA_PORT_IS_DISABLED(fcport->bfa))
392 bfa_pport_aen_post(pport, BFA_PORT_AEN_OFFLINE); 408 bfa_fcport_aen_post(fcport, BFA_PORT_AEN_OFFLINE);
393 else 409 else
394 bfa_pport_aen_post(pport, BFA_PORT_AEN_DISCONNECT); 410 bfa_fcport_aen_post(fcport, BFA_PORT_AEN_DISCONNECT);
395 break; 411 break;
396 412
397 case BFA_PPORT_SM_STOP: 413 case BFA_FCPORT_SM_STOP:
398 bfa_sm_set_state(pport, bfa_pport_sm_stopped); 414 bfa_sm_set_state(fcport, bfa_fcport_sm_stopped);
399 bfa_pport_reset_linkinfo(pport); 415 bfa_fcport_reset_linkinfo(fcport);
400 if (BFA_PORT_IS_DISABLED(pport->bfa)) 416 if (BFA_PORT_IS_DISABLED(fcport->bfa))
401 bfa_pport_aen_post(pport, BFA_PORT_AEN_OFFLINE); 417 bfa_fcport_aen_post(fcport, BFA_PORT_AEN_OFFLINE);
402 else 418 else
403 bfa_pport_aen_post(pport, BFA_PORT_AEN_DISCONNECT); 419 bfa_fcport_aen_post(fcport, BFA_PORT_AEN_DISCONNECT);
404 break; 420 break;
405 421
406 case BFA_PPORT_SM_HWFAIL: 422 case BFA_FCPORT_SM_HWFAIL:
407 bfa_sm_set_state(pport, bfa_pport_sm_iocdown); 423 bfa_sm_set_state(fcport, bfa_fcport_sm_iocdown);
408 bfa_pport_reset_linkinfo(pport); 424 bfa_fcport_reset_linkinfo(fcport);
409 bfa_pport_callback(pport, BFA_PPORT_LINKDOWN); 425 bfa_fcport_callback(fcport, BFA_PPORT_LINKDOWN);
410 if (BFA_PORT_IS_DISABLED(pport->bfa)) 426 if (BFA_PORT_IS_DISABLED(fcport->bfa))
411 bfa_pport_aen_post(pport, BFA_PORT_AEN_OFFLINE); 427 bfa_fcport_aen_post(fcport, BFA_PORT_AEN_OFFLINE);
412 else 428 else
413 bfa_pport_aen_post(pport, BFA_PORT_AEN_DISCONNECT); 429 bfa_fcport_aen_post(fcport, BFA_PORT_AEN_DISCONNECT);
414 break; 430 break;
415 431
416 default: 432 default:
417 bfa_sm_fault(pport->bfa, event); 433 bfa_sm_fault(fcport->bfa, event);
418 } 434 }
419} 435}
420 436
421static void 437static void
422bfa_pport_sm_disabling_qwait(struct bfa_pport_s *pport, 438bfa_fcport_sm_disabling_qwait(struct bfa_fcport_s *fcport,
423 enum bfa_pport_sm_event event) 439 enum bfa_fcport_sm_event event)
424{ 440{
425 bfa_trc(pport->bfa, event); 441 bfa_trc(fcport->bfa, event);
426 442
427 switch (event) { 443 switch (event) {
428 case BFA_PPORT_SM_QRESUME: 444 case BFA_FCPORT_SM_QRESUME:
429 bfa_sm_set_state(pport, bfa_pport_sm_disabling); 445 bfa_sm_set_state(fcport, bfa_fcport_sm_disabling);
430 bfa_pport_send_disable(pport); 446 bfa_fcport_send_disable(fcport);
431 break; 447 break;
432 448
433 case BFA_PPORT_SM_STOP: 449 case BFA_FCPORT_SM_STOP:
434 bfa_sm_set_state(pport, bfa_pport_sm_stopped); 450 bfa_sm_set_state(fcport, bfa_fcport_sm_stopped);
435 bfa_reqq_wcancel(&pport->reqq_wait); 451 bfa_reqq_wcancel(&fcport->reqq_wait);
436 break; 452 break;
437 453
438 case BFA_PPORT_SM_DISABLE: 454 case BFA_FCPORT_SM_DISABLE:
439 /** 455 /**
440 * Already being disabled. 456 * Already being disabled.
441 */ 457 */
442 break; 458 break;
443 459
444 case BFA_PPORT_SM_LINKUP: 460 case BFA_FCPORT_SM_LINKUP:
445 case BFA_PPORT_SM_LINKDOWN: 461 case BFA_FCPORT_SM_LINKDOWN:
446 /** 462 /**
447 * Possible to get link events when doing back-to-back 463 * Possible to get link events when doing back-to-back
448 * enable/disables. 464 * enable/disables.
449 */ 465 */
450 break; 466 break;
451 467
452 case BFA_PPORT_SM_HWFAIL: 468 case BFA_FCPORT_SM_HWFAIL:
453 bfa_sm_set_state(pport, bfa_pport_sm_iocfail); 469 bfa_sm_set_state(fcport, bfa_fcport_sm_iocfail);
454 bfa_reqq_wcancel(&pport->reqq_wait); 470 bfa_reqq_wcancel(&fcport->reqq_wait);
455 break; 471 break;
456 472
457 default: 473 default:
458 bfa_sm_fault(pport->bfa, event); 474 bfa_sm_fault(fcport->bfa, event);
459 } 475 }
460} 476}
461 477
462static void 478static void
463bfa_pport_sm_disabling(struct bfa_pport_s *pport, enum bfa_pport_sm_event event) 479bfa_fcport_sm_disabling(struct bfa_fcport_s *fcport,
480 enum bfa_fcport_sm_event event)
464{ 481{
465 bfa_trc(pport->bfa, event); 482 bfa_trc(fcport->bfa, event);
466 483
467 switch (event) { 484 switch (event) {
468 case BFA_PPORT_SM_FWRSP: 485 case BFA_FCPORT_SM_FWRSP:
469 bfa_sm_set_state(pport, bfa_pport_sm_disabled); 486 bfa_sm_set_state(fcport, bfa_fcport_sm_disabled);
470 break; 487 break;
471 488
472 case BFA_PPORT_SM_DISABLE: 489 case BFA_FCPORT_SM_DISABLE:
473 /** 490 /**
474 * Already being disabled. 491 * Already being disabled.
475 */ 492 */
476 break; 493 break;
477 494
478 case BFA_PPORT_SM_ENABLE: 495 case BFA_FCPORT_SM_ENABLE:
479 if (bfa_pport_send_enable(pport)) 496 if (bfa_fcport_send_enable(fcport))
480 bfa_sm_set_state(pport, bfa_pport_sm_enabling); 497 bfa_sm_set_state(fcport, bfa_fcport_sm_enabling);
481 else 498 else
482 bfa_sm_set_state(pport, bfa_pport_sm_enabling_qwait); 499 bfa_sm_set_state(fcport, bfa_fcport_sm_enabling_qwait);
483 500
484 bfa_plog_str(pport->bfa->plog, BFA_PL_MID_HAL, 501 bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL,
485 BFA_PL_EID_PORT_ENABLE, 0, "Port Enable"); 502 BFA_PL_EID_PORT_ENABLE, 0, "Port Enable");
486 bfa_pport_aen_post(pport, BFA_PORT_AEN_ENABLE); 503 bfa_fcport_aen_post(fcport, BFA_PORT_AEN_ENABLE);
487 break; 504 break;
488 505
489 case BFA_PPORT_SM_STOP: 506 case BFA_FCPORT_SM_STOP:
490 bfa_sm_set_state(pport, bfa_pport_sm_stopped); 507 bfa_sm_set_state(fcport, bfa_fcport_sm_stopped);
491 break; 508 break;
492 509
493 case BFA_PPORT_SM_LINKUP: 510 case BFA_FCPORT_SM_LINKUP:
494 case BFA_PPORT_SM_LINKDOWN: 511 case BFA_FCPORT_SM_LINKDOWN:
495 /** 512 /**
496 * Possible to get link events when doing back-to-back 513 * Possible to get link events when doing back-to-back
497 * enable/disables. 514 * enable/disables.
498 */ 515 */
499 break; 516 break;
500 517
501 case BFA_PPORT_SM_HWFAIL: 518 case BFA_FCPORT_SM_HWFAIL:
502 bfa_sm_set_state(pport, bfa_pport_sm_iocfail); 519 bfa_sm_set_state(fcport, bfa_fcport_sm_iocfail);
503 break; 520 break;
504 521
505 default: 522 default:
506 bfa_sm_fault(pport->bfa, event); 523 bfa_sm_fault(fcport->bfa, event);
507 } 524 }
508} 525}
509 526
510static void 527static void
511bfa_pport_sm_disabled(struct bfa_pport_s *pport, enum bfa_pport_sm_event event) 528bfa_fcport_sm_disabled(struct bfa_fcport_s *fcport,
529 enum bfa_fcport_sm_event event)
512{ 530{
513 bfa_trc(pport->bfa, event); 531 bfa_trc(fcport->bfa, event);
514 532
515 switch (event) { 533 switch (event) {
516 case BFA_PPORT_SM_START: 534 case BFA_FCPORT_SM_START:
517 /** 535 /**
518 * Ignore start event for a port that is disabled. 536 * Ignore start event for a port that is disabled.
519 */ 537 */
520 break; 538 break;
521 539
522 case BFA_PPORT_SM_STOP: 540 case BFA_FCPORT_SM_STOP:
523 bfa_sm_set_state(pport, bfa_pport_sm_stopped); 541 bfa_sm_set_state(fcport, bfa_fcport_sm_stopped);
524 break; 542 break;
525 543
526 case BFA_PPORT_SM_ENABLE: 544 case BFA_FCPORT_SM_ENABLE:
527 if (bfa_pport_send_enable(pport)) 545 if (bfa_fcport_send_enable(fcport))
528 bfa_sm_set_state(pport, bfa_pport_sm_enabling); 546 bfa_sm_set_state(fcport, bfa_fcport_sm_enabling);
529 else 547 else
530 bfa_sm_set_state(pport, bfa_pport_sm_enabling_qwait); 548 bfa_sm_set_state(fcport, bfa_fcport_sm_enabling_qwait);
531 549
532 bfa_plog_str(pport->bfa->plog, BFA_PL_MID_HAL, 550 bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL,
533 BFA_PL_EID_PORT_ENABLE, 0, "Port Enable"); 551 BFA_PL_EID_PORT_ENABLE, 0, "Port Enable");
534 bfa_pport_aen_post(pport, BFA_PORT_AEN_ENABLE); 552 bfa_fcport_aen_post(fcport, BFA_PORT_AEN_ENABLE);
535 break; 553 break;
536 554
537 case BFA_PPORT_SM_DISABLE: 555 case BFA_FCPORT_SM_DISABLE:
538 /** 556 /**
539 * Already disabled. 557 * Already disabled.
540 */ 558 */
541 break; 559 break;
542 560
543 case BFA_PPORT_SM_HWFAIL: 561 case BFA_FCPORT_SM_HWFAIL:
544 bfa_sm_set_state(pport, bfa_pport_sm_iocfail); 562 bfa_sm_set_state(fcport, bfa_fcport_sm_iocfail);
545 break; 563 break;
546 564
547 default: 565 default:
548 bfa_sm_fault(pport->bfa, event); 566 bfa_sm_fault(fcport->bfa, event);
549 } 567 }
550} 568}
551 569
552static void 570static void
553bfa_pport_sm_stopped(struct bfa_pport_s *pport, enum bfa_pport_sm_event event) 571bfa_fcport_sm_stopped(struct bfa_fcport_s *fcport,
572 enum bfa_fcport_sm_event event)
554{ 573{
555 bfa_trc(pport->bfa, event); 574 bfa_trc(fcport->bfa, event);
556 575
557 switch (event) { 576 switch (event) {
558 case BFA_PPORT_SM_START: 577 case BFA_FCPORT_SM_START:
559 if (bfa_pport_send_enable(pport)) 578 if (bfa_fcport_send_enable(fcport))
560 bfa_sm_set_state(pport, bfa_pport_sm_enabling); 579 bfa_sm_set_state(fcport, bfa_fcport_sm_enabling);
561 else 580 else
562 bfa_sm_set_state(pport, bfa_pport_sm_enabling_qwait); 581 bfa_sm_set_state(fcport, bfa_fcport_sm_enabling_qwait);
563 break; 582 break;
564 583
565 default: 584 default:
@@ -574,16 +593,17 @@ bfa_pport_sm_stopped(struct bfa_pport_s *pport, enum bfa_pport_sm_event event)
574 * Port is enabled. IOC is down/failed. 593 * Port is enabled. IOC is down/failed.
575 */ 594 */
576static void 595static void
577bfa_pport_sm_iocdown(struct bfa_pport_s *pport, enum bfa_pport_sm_event event) 596bfa_fcport_sm_iocdown(struct bfa_fcport_s *fcport,
597 enum bfa_fcport_sm_event event)
578{ 598{
579 bfa_trc(pport->bfa, event); 599 bfa_trc(fcport->bfa, event);
580 600
581 switch (event) { 601 switch (event) {
582 case BFA_PPORT_SM_START: 602 case BFA_FCPORT_SM_START:
583 if (bfa_pport_send_enable(pport)) 603 if (bfa_fcport_send_enable(fcport))
584 bfa_sm_set_state(pport, bfa_pport_sm_enabling); 604 bfa_sm_set_state(fcport, bfa_fcport_sm_enabling);
585 else 605 else
586 bfa_sm_set_state(pport, bfa_pport_sm_enabling_qwait); 606 bfa_sm_set_state(fcport, bfa_fcport_sm_enabling_qwait);
587 break; 607 break;
588 608
589 default: 609 default:
@@ -598,17 +618,18 @@ bfa_pport_sm_iocdown(struct bfa_pport_s *pport, enum bfa_pport_sm_event event)
598 * Port is disabled. IOC is down/failed. 618 * Port is disabled. IOC is down/failed.
599 */ 619 */
600static void 620static void
601bfa_pport_sm_iocfail(struct bfa_pport_s *pport, enum bfa_pport_sm_event event) 621bfa_fcport_sm_iocfail(struct bfa_fcport_s *fcport,
622 enum bfa_fcport_sm_event event)
602{ 623{
603 bfa_trc(pport->bfa, event); 624 bfa_trc(fcport->bfa, event);
604 625
605 switch (event) { 626 switch (event) {
606 case BFA_PPORT_SM_START: 627 case BFA_FCPORT_SM_START:
607 bfa_sm_set_state(pport, bfa_pport_sm_disabled); 628 bfa_sm_set_state(fcport, bfa_fcport_sm_disabled);
608 break; 629 break;
609 630
610 case BFA_PPORT_SM_ENABLE: 631 case BFA_FCPORT_SM_ENABLE:
611 bfa_sm_set_state(pport, bfa_pport_sm_iocdown); 632 bfa_sm_set_state(fcport, bfa_fcport_sm_iocdown);
612 break; 633 break;
613 634
614 default: 635 default:
@@ -619,41 +640,226 @@ bfa_pport_sm_iocfail(struct bfa_pport_s *pport, enum bfa_pport_sm_event event)
619 } 640 }
620} 641}
621 642
643/**
644 * Link state is down
645 */
646static void
647bfa_fcport_ln_sm_dn(struct bfa_fcport_ln_s *ln,
648 enum bfa_fcport_ln_sm_event event)
649{
650 bfa_trc(ln->fcport->bfa, event);
651
652 switch (event) {
653 case BFA_FCPORT_LN_SM_LINKUP:
654 bfa_sm_set_state(ln, bfa_fcport_ln_sm_up_nf);
655 bfa_fcport_queue_cb(ln, BFA_PPORT_LINKUP);
656 break;
657
658 default:
659 bfa_sm_fault(ln->fcport->bfa, event);
660 }
661}
662
663/**
664 * Link state is waiting for down notification
665 */
666static void
667bfa_fcport_ln_sm_dn_nf(struct bfa_fcport_ln_s *ln,
668 enum bfa_fcport_ln_sm_event event)
669{
670 bfa_trc(ln->fcport->bfa, event);
671
672 switch (event) {
673 case BFA_FCPORT_LN_SM_LINKUP:
674 bfa_sm_set_state(ln, bfa_fcport_ln_sm_dn_up_nf);
675 break;
676
677 case BFA_FCPORT_LN_SM_NOTIFICATION:
678 bfa_sm_set_state(ln, bfa_fcport_ln_sm_dn);
679 break;
680
681 default:
682 bfa_sm_fault(ln->fcport->bfa, event);
683 }
684}
685
686/**
687 * Link state is waiting for down notification and there is a pending up
688 */
689static void
690bfa_fcport_ln_sm_dn_up_nf(struct bfa_fcport_ln_s *ln,
691 enum bfa_fcport_ln_sm_event event)
692{
693 bfa_trc(ln->fcport->bfa, event);
694
695 switch (event) {
696 case BFA_FCPORT_LN_SM_LINKDOWN:
697 bfa_sm_set_state(ln, bfa_fcport_ln_sm_dn_nf);
698 break;
699
700 case BFA_FCPORT_LN_SM_NOTIFICATION:
701 bfa_sm_set_state(ln, bfa_fcport_ln_sm_up_nf);
702 bfa_fcport_queue_cb(ln, BFA_PPORT_LINKUP);
703 break;
704
705 default:
706 bfa_sm_fault(ln->fcport->bfa, event);
707 }
708}
709
710/**
711 * Link state is up
712 */
713static void
714bfa_fcport_ln_sm_up(struct bfa_fcport_ln_s *ln,
715 enum bfa_fcport_ln_sm_event event)
716{
717 bfa_trc(ln->fcport->bfa, event);
718
719 switch (event) {
720 case BFA_FCPORT_LN_SM_LINKDOWN:
721 bfa_sm_set_state(ln, bfa_fcport_ln_sm_dn_nf);
722 bfa_fcport_queue_cb(ln, BFA_PPORT_LINKDOWN);
723 break;
622 724
725 default:
726 bfa_sm_fault(ln->fcport->bfa, event);
727 }
728}
729
730/**
731 * Link state is waiting for up notification
732 */
733static void
734bfa_fcport_ln_sm_up_nf(struct bfa_fcport_ln_s *ln,
735 enum bfa_fcport_ln_sm_event event)
736{
737 bfa_trc(ln->fcport->bfa, event);
738
739 switch (event) {
740 case BFA_FCPORT_LN_SM_LINKDOWN:
741 bfa_sm_set_state(ln, bfa_fcport_ln_sm_up_dn_nf);
742 break;
743
744 case BFA_FCPORT_LN_SM_NOTIFICATION:
745 bfa_sm_set_state(ln, bfa_fcport_ln_sm_up);
746 break;
747
748 default:
749 bfa_sm_fault(ln->fcport->bfa, event);
750 }
751}
752
753/**
754 * Link state is waiting for up notification and there is a pending down
755 */
756static void
757bfa_fcport_ln_sm_up_dn_nf(struct bfa_fcport_ln_s *ln,
758 enum bfa_fcport_ln_sm_event event)
759{
760 bfa_trc(ln->fcport->bfa, event);
761
762 switch (event) {
763 case BFA_FCPORT_LN_SM_LINKUP:
764 bfa_sm_set_state(ln, bfa_fcport_ln_sm_up_dn_up_nf);
765 break;
766
767 case BFA_FCPORT_LN_SM_NOTIFICATION:
768 bfa_sm_set_state(ln, bfa_fcport_ln_sm_dn_nf);
769 bfa_fcport_queue_cb(ln, BFA_PPORT_LINKDOWN);
770 break;
771
772 default:
773 bfa_sm_fault(ln->fcport->bfa, event);
774 }
775}
776
777/**
778 * Link state is waiting for up notification and there are pending down and up
779 */
780static void
781bfa_fcport_ln_sm_up_dn_up_nf(struct bfa_fcport_ln_s *ln,
782 enum bfa_fcport_ln_sm_event event)
783{
784 bfa_trc(ln->fcport->bfa, event);
785
786 switch (event) {
787 case BFA_FCPORT_LN_SM_LINKDOWN:
788 bfa_sm_set_state(ln, bfa_fcport_ln_sm_up_dn_nf);
789 break;
790
791 case BFA_FCPORT_LN_SM_NOTIFICATION:
792 bfa_sm_set_state(ln, bfa_fcport_ln_sm_dn_up_nf);
793 bfa_fcport_queue_cb(ln, BFA_PPORT_LINKDOWN);
794 break;
795
796 default:
797 bfa_sm_fault(ln->fcport->bfa, event);
798 }
799}
623 800
624/** 801/**
625 * bfa_pport_private 802 * bfa_pport_private
626 */ 803 */
627 804
628static void 805static void
629__bfa_cb_port_event(void *cbarg, bfa_boolean_t complete) 806__bfa_cb_fcport_event(void *cbarg, bfa_boolean_t complete)
630{ 807{
631 struct bfa_pport_s *pport = cbarg; 808 struct bfa_fcport_ln_s *ln = cbarg;
632 809
633 if (complete) 810 if (complete)
634 pport->event_cbfn(pport->event_cbarg, pport->hcb_event); 811 ln->fcport->event_cbfn(ln->fcport->event_cbarg, ln->ln_event);
812 else
813 bfa_sm_send_event(ln, BFA_FCPORT_LN_SM_NOTIFICATION);
635} 814}
636 815
637#define PPORT_STATS_DMA_SZ (BFA_ROUNDUP(sizeof(union bfa_pport_stats_u), \ 816static void
817bfa_fcport_callback(struct bfa_fcport_s *fcport, enum bfa_pport_linkstate event)
818{
819 if (fcport->bfa->fcs) {
820 fcport->event_cbfn(fcport->event_cbarg, event);
821 return;
822 }
823
824 switch (event) {
825 case BFA_PPORT_LINKUP:
826 bfa_sm_send_event(&fcport->ln, BFA_FCPORT_LN_SM_LINKUP);
827 break;
828 case BFA_PPORT_LINKDOWN:
829 bfa_sm_send_event(&fcport->ln, BFA_FCPORT_LN_SM_LINKDOWN);
830 break;
831 default:
832 bfa_assert(0);
833 }
834}
835
836static void
837bfa_fcport_queue_cb(struct bfa_fcport_ln_s *ln, enum bfa_pport_linkstate event)
838{
839 ln->ln_event = event;
840 bfa_cb_queue(ln->fcport->bfa, &ln->ln_qe, __bfa_cb_fcport_event, ln);
841}
842
843#define FCPORT_STATS_DMA_SZ (BFA_ROUNDUP(sizeof(union bfa_fcport_stats_u), \
638 BFA_CACHELINE_SZ)) 844 BFA_CACHELINE_SZ))
639 845
640static void 846static void
641bfa_pport_meminfo(struct bfa_iocfc_cfg_s *cfg, u32 *ndm_len, 847bfa_fcport_meminfo(struct bfa_iocfc_cfg_s *cfg, u32 *ndm_len,
642 u32 *dm_len) 848 u32 *dm_len)
643{ 849{
644 *dm_len += PPORT_STATS_DMA_SZ; 850 *dm_len += FCPORT_STATS_DMA_SZ;
645} 851}
646 852
647static void 853static void
648bfa_pport_qresume(void *cbarg) 854bfa_fcport_qresume(void *cbarg)
649{ 855{
650 struct bfa_pport_s *port = cbarg; 856 struct bfa_fcport_s *fcport = cbarg;
651 857
652 bfa_sm_send_event(port, BFA_PPORT_SM_QRESUME); 858 bfa_sm_send_event(fcport, BFA_FCPORT_SM_QRESUME);
653} 859}
654 860
655static void 861static void
656bfa_pport_mem_claim(struct bfa_pport_s *pport, struct bfa_meminfo_s *meminfo) 862bfa_fcport_mem_claim(struct bfa_fcport_s *fcport, struct bfa_meminfo_s *meminfo)
657{ 863{
658 u8 *dm_kva; 864 u8 *dm_kva;
659 u64 dm_pa; 865 u64 dm_pa;
@@ -661,12 +867,12 @@ bfa_pport_mem_claim(struct bfa_pport_s *pport, struct bfa_meminfo_s *meminfo)
661 dm_kva = bfa_meminfo_dma_virt(meminfo); 867 dm_kva = bfa_meminfo_dma_virt(meminfo);
662 dm_pa = bfa_meminfo_dma_phys(meminfo); 868 dm_pa = bfa_meminfo_dma_phys(meminfo);
663 869
664 pport->stats_kva = dm_kva; 870 fcport->stats_kva = dm_kva;
665 pport->stats_pa = dm_pa; 871 fcport->stats_pa = dm_pa;
666 pport->stats = (union bfa_pport_stats_u *)dm_kva; 872 fcport->stats = (union bfa_fcport_stats_u *)dm_kva;
667 873
668 dm_kva += PPORT_STATS_DMA_SZ; 874 dm_kva += FCPORT_STATS_DMA_SZ;
669 dm_pa += PPORT_STATS_DMA_SZ; 875 dm_pa += FCPORT_STATS_DMA_SZ;
670 876
671 bfa_meminfo_dma_virt(meminfo) = dm_kva; 877 bfa_meminfo_dma_virt(meminfo) = dm_kva;
672 bfa_meminfo_dma_phys(meminfo) = dm_pa; 878 bfa_meminfo_dma_phys(meminfo) = dm_pa;
@@ -676,18 +882,21 @@ bfa_pport_mem_claim(struct bfa_pport_s *pport, struct bfa_meminfo_s *meminfo)
676 * Memory initialization. 882 * Memory initialization.
677 */ 883 */
678static void 884static void
679bfa_pport_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg, 885bfa_fcport_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
680 struct bfa_meminfo_s *meminfo, struct bfa_pcidev_s *pcidev) 886 struct bfa_meminfo_s *meminfo, struct bfa_pcidev_s *pcidev)
681{ 887{
682 struct bfa_pport_s *pport = BFA_PORT_MOD(bfa); 888 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
683 struct bfa_pport_cfg_s *port_cfg = &pport->cfg; 889 struct bfa_pport_cfg_s *port_cfg = &fcport->cfg;
890 struct bfa_fcport_ln_s *ln = &fcport->ln;
684 891
685 bfa_os_memset(pport, 0, sizeof(struct bfa_pport_s)); 892 bfa_os_memset(fcport, 0, sizeof(struct bfa_fcport_s));
686 pport->bfa = bfa; 893 fcport->bfa = bfa;
894 ln->fcport = fcport;
687 895
688 bfa_pport_mem_claim(pport, meminfo); 896 bfa_fcport_mem_claim(fcport, meminfo);
689 897
690 bfa_sm_set_state(pport, bfa_pport_sm_uninit); 898 bfa_sm_set_state(fcport, bfa_fcport_sm_uninit);
899 bfa_sm_set_state(ln, bfa_fcport_ln_sm_dn);
691 900
692 /** 901 /**
693 * initialize and set default configuration 902 * initialize and set default configuration
@@ -699,30 +908,30 @@ bfa_pport_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
699 908
700 port_cfg->trl_def_speed = BFA_PPORT_SPEED_1GBPS; 909 port_cfg->trl_def_speed = BFA_PPORT_SPEED_1GBPS;
701 910
702 bfa_reqq_winit(&pport->reqq_wait, bfa_pport_qresume, pport); 911 bfa_reqq_winit(&fcport->reqq_wait, bfa_fcport_qresume, fcport);
703} 912}
704 913
705static void 914static void
706bfa_pport_initdone(struct bfa_s *bfa) 915bfa_fcport_initdone(struct bfa_s *bfa)
707{ 916{
708 struct bfa_pport_s *pport = BFA_PORT_MOD(bfa); 917 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
709 918
710 /** 919 /**
711 * Initialize port attributes from IOC hardware data. 920 * Initialize port attributes from IOC hardware data.
712 */ 921 */
713 bfa_pport_set_wwns(pport); 922 bfa_fcport_set_wwns(fcport);
714 if (pport->cfg.maxfrsize == 0) 923 if (fcport->cfg.maxfrsize == 0)
715 pport->cfg.maxfrsize = bfa_ioc_maxfrsize(&bfa->ioc); 924 fcport->cfg.maxfrsize = bfa_ioc_maxfrsize(&bfa->ioc);
716 pport->cfg.rx_bbcredit = bfa_ioc_rx_bbcredit(&bfa->ioc); 925 fcport->cfg.rx_bbcredit = bfa_ioc_rx_bbcredit(&bfa->ioc);
717 pport->speed_sup = bfa_ioc_speed_sup(&bfa->ioc); 926 fcport->speed_sup = bfa_ioc_speed_sup(&bfa->ioc);
718 927
719 bfa_assert(pport->cfg.maxfrsize); 928 bfa_assert(fcport->cfg.maxfrsize);
720 bfa_assert(pport->cfg.rx_bbcredit); 929 bfa_assert(fcport->cfg.rx_bbcredit);
721 bfa_assert(pport->speed_sup); 930 bfa_assert(fcport->speed_sup);
722} 931}
723 932
724static void 933static void
725bfa_pport_detach(struct bfa_s *bfa) 934bfa_fcport_detach(struct bfa_s *bfa)
726{ 935{
727} 936}
728 937
@@ -730,95 +939,97 @@ bfa_pport_detach(struct bfa_s *bfa)
730 * Called when IOC is ready. 939 * Called when IOC is ready.
731 */ 940 */
732static void 941static void
733bfa_pport_start(struct bfa_s *bfa) 942bfa_fcport_start(struct bfa_s *bfa)
734{ 943{
735 bfa_sm_send_event(BFA_PORT_MOD(bfa), BFA_PPORT_SM_START); 944 bfa_sm_send_event(BFA_FCPORT_MOD(bfa), BFA_FCPORT_SM_START);
736} 945}
737 946
738/** 947/**
739 * Called before IOC is stopped. 948 * Called before IOC is stopped.
740 */ 949 */
741static void 950static void
742bfa_pport_stop(struct bfa_s *bfa) 951bfa_fcport_stop(struct bfa_s *bfa)
743{ 952{
744 bfa_sm_send_event(BFA_PORT_MOD(bfa), BFA_PPORT_SM_STOP); 953 bfa_sm_send_event(BFA_FCPORT_MOD(bfa), BFA_FCPORT_SM_STOP);
745} 954}
746 955
747/** 956/**
748 * Called when IOC failure is detected. 957 * Called when IOC failure is detected.
749 */ 958 */
750static void 959static void
751bfa_pport_iocdisable(struct bfa_s *bfa) 960bfa_fcport_iocdisable(struct bfa_s *bfa)
752{ 961{
753 bfa_sm_send_event(BFA_PORT_MOD(bfa), BFA_PPORT_SM_HWFAIL); 962 bfa_sm_send_event(BFA_FCPORT_MOD(bfa), BFA_FCPORT_SM_HWFAIL);
754} 963}
755 964
756static void 965static void
757bfa_pport_update_linkinfo(struct bfa_pport_s *pport) 966bfa_fcport_update_linkinfo(struct bfa_fcport_s *fcport)
758{ 967{
759 struct bfi_pport_event_s *pevent = pport->event_arg.i2hmsg.event; 968 struct bfi_fcport_event_s *pevent = fcport->event_arg.i2hmsg.event;
760 969
761 pport->speed = pevent->link_state.speed; 970 fcport->speed = pevent->link_state.speed;
762 pport->topology = pevent->link_state.topology; 971 fcport->topology = pevent->link_state.topology;
763 972
764 if (pport->topology == BFA_PPORT_TOPOLOGY_LOOP) 973 if (fcport->topology == BFA_PPORT_TOPOLOGY_LOOP)
765 pport->myalpa = pevent->link_state.tl.loop_info.myalpa; 974 fcport->myalpa =
975 pevent->link_state.tl.loop_info.myalpa;
766 976
767 /* 977 /*
768 * QoS Details 978 * QoS Details
769 */ 979 */
770 bfa_os_assign(pport->qos_attr, pevent->link_state.qos_attr); 980 bfa_os_assign(fcport->qos_attr, pevent->link_state.qos_attr);
771 bfa_os_assign(pport->qos_vc_attr, pevent->link_state.qos_vc_attr); 981 bfa_os_assign(fcport->qos_vc_attr, pevent->link_state.qos_vc_attr);
772 982
773 bfa_trc(pport->bfa, pport->speed); 983 bfa_trc(fcport->bfa, fcport->speed);
774 bfa_trc(pport->bfa, pport->topology); 984 bfa_trc(fcport->bfa, fcport->topology);
775} 985}
776 986
777static void 987static void
778bfa_pport_reset_linkinfo(struct bfa_pport_s *pport) 988bfa_fcport_reset_linkinfo(struct bfa_fcport_s *fcport)
779{ 989{
780 pport->speed = BFA_PPORT_SPEED_UNKNOWN; 990 fcport->speed = BFA_PPORT_SPEED_UNKNOWN;
781 pport->topology = BFA_PPORT_TOPOLOGY_NONE; 991 fcport->topology = BFA_PPORT_TOPOLOGY_NONE;
782} 992}
783 993
784/** 994/**
785 * Send port enable message to firmware. 995 * Send port enable message to firmware.
786 */ 996 */
787static bfa_boolean_t 997static bfa_boolean_t
788bfa_pport_send_enable(struct bfa_pport_s *port) 998bfa_fcport_send_enable(struct bfa_fcport_s *fcport)
789{ 999{
790 struct bfi_pport_enable_req_s *m; 1000 struct bfi_fcport_enable_req_s *m;
791 1001
792 /** 1002 /**
793 * Increment message tag before queue check, so that responses to old 1003 * Increment message tag before queue check, so that responses to old
794 * requests are discarded. 1004 * requests are discarded.
795 */ 1005 */
796 port->msgtag++; 1006 fcport->msgtag++;
797 1007
798 /** 1008 /**
799 * check for room in queue to send request now 1009 * check for room in queue to send request now
800 */ 1010 */
801 m = bfa_reqq_next(port->bfa, BFA_REQQ_PORT); 1011 m = bfa_reqq_next(fcport->bfa, BFA_REQQ_PORT);
802 if (!m) { 1012 if (!m) {
803 bfa_reqq_wait(port->bfa, BFA_REQQ_PORT, &port->reqq_wait); 1013 bfa_reqq_wait(fcport->bfa, BFA_REQQ_PORT,
1014 &fcport->reqq_wait);
804 return BFA_FALSE; 1015 return BFA_FALSE;
805 } 1016 }
806 1017
807 bfi_h2i_set(m->mh, BFI_MC_FC_PORT, BFI_PPORT_H2I_ENABLE_REQ, 1018 bfi_h2i_set(m->mh, BFI_MC_FCPORT, BFI_FCPORT_H2I_ENABLE_REQ,
808 bfa_lpuid(port->bfa)); 1019 bfa_lpuid(fcport->bfa));
809 m->nwwn = port->nwwn; 1020 m->nwwn = fcport->nwwn;
810 m->pwwn = port->pwwn; 1021 m->pwwn = fcport->pwwn;
811 m->port_cfg = port->cfg; 1022 m->port_cfg = fcport->cfg;
812 m->msgtag = port->msgtag; 1023 m->msgtag = fcport->msgtag;
813 m->port_cfg.maxfrsize = bfa_os_htons(port->cfg.maxfrsize); 1024 m->port_cfg.maxfrsize = bfa_os_htons(fcport->cfg.maxfrsize);
814 bfa_dma_be_addr_set(m->stats_dma_addr, port->stats_pa); 1025 bfa_dma_be_addr_set(m->stats_dma_addr, fcport->stats_pa);
815 bfa_trc(port->bfa, m->stats_dma_addr.a32.addr_lo); 1026 bfa_trc(fcport->bfa, m->stats_dma_addr.a32.addr_lo);
816 bfa_trc(port->bfa, m->stats_dma_addr.a32.addr_hi); 1027 bfa_trc(fcport->bfa, m->stats_dma_addr.a32.addr_hi);
817 1028
818 /** 1029 /**
819 * queue I/O message to firmware 1030 * queue I/O message to firmware
820 */ 1031 */
821 bfa_reqq_produce(port->bfa, BFA_REQQ_PORT); 1032 bfa_reqq_produce(fcport->bfa, BFA_REQQ_PORT);
822 return BFA_TRUE; 1033 return BFA_TRUE;
823} 1034}
824 1035
@@ -826,74 +1037,226 @@ bfa_pport_send_enable(struct bfa_pport_s *port)
826 * Send port disable message to firmware. 1037 * Send port disable message to firmware.
827 */ 1038 */
828static bfa_boolean_t 1039static bfa_boolean_t
829bfa_pport_send_disable(struct bfa_pport_s *port) 1040bfa_fcport_send_disable(struct bfa_fcport_s *fcport)
830{ 1041{
831 bfi_pport_disable_req_t *m; 1042 struct bfi_fcport_req_s *m;
832 1043
833 /** 1044 /**
834 * Increment message tag before queue check, so that responses to old 1045 * Increment message tag before queue check, so that responses to old
835 * requests are discarded. 1046 * requests are discarded.
836 */ 1047 */
837 port->msgtag++; 1048 fcport->msgtag++;
838 1049
839 /** 1050 /**
840 * check for room in queue to send request now 1051 * check for room in queue to send request now
841 */ 1052 */
842 m = bfa_reqq_next(port->bfa, BFA_REQQ_PORT); 1053 m = bfa_reqq_next(fcport->bfa, BFA_REQQ_PORT);
843 if (!m) { 1054 if (!m) {
844 bfa_reqq_wait(port->bfa, BFA_REQQ_PORT, &port->reqq_wait); 1055 bfa_reqq_wait(fcport->bfa, BFA_REQQ_PORT,
1056 &fcport->reqq_wait);
845 return BFA_FALSE; 1057 return BFA_FALSE;
846 } 1058 }
847 1059
848 bfi_h2i_set(m->mh, BFI_MC_FC_PORT, BFI_PPORT_H2I_DISABLE_REQ, 1060 bfi_h2i_set(m->mh, BFI_MC_FCPORT, BFI_FCPORT_H2I_DISABLE_REQ,
849 bfa_lpuid(port->bfa)); 1061 bfa_lpuid(fcport->bfa));
850 m->msgtag = port->msgtag; 1062 m->msgtag = fcport->msgtag;
851 1063
852 /** 1064 /**
853 * queue I/O message to firmware 1065 * queue I/O message to firmware
854 */ 1066 */
855 bfa_reqq_produce(port->bfa, BFA_REQQ_PORT); 1067 bfa_reqq_produce(fcport->bfa, BFA_REQQ_PORT);
856 1068
857 return BFA_TRUE; 1069 return BFA_TRUE;
858} 1070}
859 1071
860static void 1072static void
861bfa_pport_set_wwns(struct bfa_pport_s *port) 1073bfa_fcport_set_wwns(struct bfa_fcport_s *fcport)
862{ 1074{
863 port->pwwn = bfa_ioc_get_pwwn(&port->bfa->ioc); 1075 fcport->pwwn = bfa_ioc_get_pwwn(&fcport->bfa->ioc);
864 port->nwwn = bfa_ioc_get_nwwn(&port->bfa->ioc); 1076 fcport->nwwn = bfa_ioc_get_nwwn(&fcport->bfa->ioc);
865 1077
866 bfa_trc(port->bfa, port->pwwn); 1078 bfa_trc(fcport->bfa, fcport->pwwn);
867 bfa_trc(port->bfa, port->nwwn); 1079 bfa_trc(fcport->bfa, fcport->nwwn);
868} 1080}
869 1081
870static void 1082static void
871bfa_port_send_txcredit(void *port_cbarg) 1083bfa_fcport_send_txcredit(void *port_cbarg)
872{ 1084{
873 1085
874 struct bfa_pport_s *port = port_cbarg; 1086 struct bfa_fcport_s *fcport = port_cbarg;
875 struct bfi_pport_set_svc_params_req_s *m; 1087 struct bfi_fcport_set_svc_params_req_s *m;
876 1088
877 /** 1089 /**
878 * check for room in queue to send request now 1090 * check for room in queue to send request now
879 */ 1091 */
880 m = bfa_reqq_next(port->bfa, BFA_REQQ_PORT); 1092 m = bfa_reqq_next(fcport->bfa, BFA_REQQ_PORT);
881 if (!m) { 1093 if (!m) {
882 bfa_trc(port->bfa, port->cfg.tx_bbcredit); 1094 bfa_trc(fcport->bfa, fcport->cfg.tx_bbcredit);
883 return; 1095 return;
884 } 1096 }
885 1097
886 bfi_h2i_set(m->mh, BFI_MC_FC_PORT, BFI_PPORT_H2I_SET_SVC_PARAMS_REQ, 1098 bfi_h2i_set(m->mh, BFI_MC_FCPORT, BFI_FCPORT_H2I_SET_SVC_PARAMS_REQ,
887 bfa_lpuid(port->bfa)); 1099 bfa_lpuid(fcport->bfa));
888 m->tx_bbcredit = bfa_os_htons((u16) port->cfg.tx_bbcredit); 1100 m->tx_bbcredit = bfa_os_htons((u16) fcport->cfg.tx_bbcredit);
889 1101
890 /** 1102 /**
891 * queue I/O message to firmware 1103 * queue I/O message to firmware
892 */ 1104 */
893 bfa_reqq_produce(port->bfa, BFA_REQQ_PORT); 1105 bfa_reqq_produce(fcport->bfa, BFA_REQQ_PORT);
894} 1106}
895 1107
1108static void
1109bfa_fcport_qos_stats_swap(struct bfa_qos_stats_s *d,
1110 struct bfa_qos_stats_s *s)
1111{
1112 u32 *dip = (u32 *) d;
1113 u32 *sip = (u32 *) s;
1114 int i;
1115
1116 /* Now swap the 32 bit fields */
1117 for (i = 0; i < (sizeof(struct bfa_qos_stats_s)/sizeof(u32)); ++i)
1118 dip[i] = bfa_os_ntohl(sip[i]);
1119}
896 1120
1121static void
1122bfa_fcport_fcoe_stats_swap(struct bfa_fcoe_stats_s *d,
1123 struct bfa_fcoe_stats_s *s)
1124{
1125 u32 *dip = (u32 *) d;
1126 u32 *sip = (u32 *) s;
1127 int i;
1128
1129 for (i = 0; i < ((sizeof(struct bfa_fcoe_stats_s))/sizeof(u32));
1130 i = i + 2) {
1131#ifdef __BIGENDIAN
1132 dip[i] = bfa_os_ntohl(sip[i]);
1133 dip[i + 1] = bfa_os_ntohl(sip[i + 1]);
1134#else
1135 dip[i] = bfa_os_ntohl(sip[i + 1]);
1136 dip[i + 1] = bfa_os_ntohl(sip[i]);
1137#endif
1138 }
1139}
1140
1141static void
1142__bfa_cb_fcport_stats_get(void *cbarg, bfa_boolean_t complete)
1143{
1144 struct bfa_fcport_s *fcport = cbarg;
1145
1146 if (complete) {
1147 if (fcport->stats_status == BFA_STATUS_OK) {
1148
1149 /* Swap FC QoS or FCoE stats */
1150 if (bfa_ioc_get_fcmode(&fcport->bfa->ioc))
1151 bfa_fcport_qos_stats_swap(
1152 &fcport->stats_ret->fcqos,
1153 &fcport->stats->fcqos);
1154 else
1155 bfa_fcport_fcoe_stats_swap(
1156 &fcport->stats_ret->fcoe,
1157 &fcport->stats->fcoe);
1158 }
1159 fcport->stats_cbfn(fcport->stats_cbarg, fcport->stats_status);
1160 } else {
1161 fcport->stats_busy = BFA_FALSE;
1162 fcport->stats_status = BFA_STATUS_OK;
1163 }
1164}
1165
1166static void
1167bfa_fcport_stats_get_timeout(void *cbarg)
1168{
1169 struct bfa_fcport_s *fcport = (struct bfa_fcport_s *) cbarg;
1170
1171 bfa_trc(fcport->bfa, fcport->stats_qfull);
1172
1173 if (fcport->stats_qfull) {
1174 bfa_reqq_wcancel(&fcport->stats_reqq_wait);
1175 fcport->stats_qfull = BFA_FALSE;
1176 }
1177
1178 fcport->stats_status = BFA_STATUS_ETIMER;
1179 bfa_cb_queue(fcport->bfa, &fcport->hcb_qe, __bfa_cb_fcport_stats_get,
1180 fcport);
1181}
1182
1183static void
1184bfa_fcport_send_stats_get(void *cbarg)
1185{
1186 struct bfa_fcport_s *fcport = (struct bfa_fcport_s *) cbarg;
1187 struct bfi_fcport_req_s *msg;
1188
1189 msg = bfa_reqq_next(fcport->bfa, BFA_REQQ_PORT);
1190
1191 if (!msg) {
1192 fcport->stats_qfull = BFA_TRUE;
1193 bfa_reqq_winit(&fcport->stats_reqq_wait,
1194 bfa_fcport_send_stats_get, fcport);
1195 bfa_reqq_wait(fcport->bfa, BFA_REQQ_PORT,
1196 &fcport->stats_reqq_wait);
1197 return;
1198 }
1199 fcport->stats_qfull = BFA_FALSE;
1200
1201 bfa_os_memset(msg, 0, sizeof(struct bfi_fcport_req_s));
1202 bfi_h2i_set(msg->mh, BFI_MC_FCPORT, BFI_FCPORT_H2I_STATS_GET_REQ,
1203 bfa_lpuid(fcport->bfa));
1204 bfa_reqq_produce(fcport->bfa, BFA_REQQ_PORT);
1205}
1206
1207static void
1208__bfa_cb_fcport_stats_clr(void *cbarg, bfa_boolean_t complete)
1209{
1210 struct bfa_fcport_s *fcport = cbarg;
1211
1212 if (complete) {
1213 fcport->stats_cbfn(fcport->stats_cbarg, fcport->stats_status);
1214 } else {
1215 fcport->stats_busy = BFA_FALSE;
1216 fcport->stats_status = BFA_STATUS_OK;
1217 }
1218}
1219
1220static void
1221bfa_fcport_stats_clr_timeout(void *cbarg)
1222{
1223 struct bfa_fcport_s *fcport = (struct bfa_fcport_s *) cbarg;
1224
1225 bfa_trc(fcport->bfa, fcport->stats_qfull);
1226
1227 if (fcport->stats_qfull) {
1228 bfa_reqq_wcancel(&fcport->stats_reqq_wait);
1229 fcport->stats_qfull = BFA_FALSE;
1230 }
1231
1232 fcport->stats_status = BFA_STATUS_ETIMER;
1233 bfa_cb_queue(fcport->bfa, &fcport->hcb_qe,
1234 __bfa_cb_fcport_stats_clr, fcport);
1235}
1236
1237static void
1238bfa_fcport_send_stats_clear(void *cbarg)
1239{
1240 struct bfa_fcport_s *fcport = (struct bfa_fcport_s *) cbarg;
1241 struct bfi_fcport_req_s *msg;
1242
1243 msg = bfa_reqq_next(fcport->bfa, BFA_REQQ_PORT);
1244
1245 if (!msg) {
1246 fcport->stats_qfull = BFA_TRUE;
1247 bfa_reqq_winit(&fcport->stats_reqq_wait,
1248 bfa_fcport_send_stats_clear, fcport);
1249 bfa_reqq_wait(fcport->bfa, BFA_REQQ_PORT,
1250 &fcport->stats_reqq_wait);
1251 return;
1252 }
1253 fcport->stats_qfull = BFA_FALSE;
1254
1255 bfa_os_memset(msg, 0, sizeof(struct bfi_fcport_req_s));
1256 bfi_h2i_set(msg->mh, BFI_MC_FCPORT, BFI_FCPORT_H2I_STATS_CLEAR_REQ,
1257 bfa_lpuid(fcport->bfa));
1258 bfa_reqq_produce(fcport->bfa, BFA_REQQ_PORT);
1259}
897 1260
898/** 1261/**
899 * bfa_pport_public 1262 * bfa_pport_public
@@ -903,32 +1266,32 @@ bfa_port_send_txcredit(void *port_cbarg)
903 * Firmware message handler. 1266 * Firmware message handler.
904 */ 1267 */
905void 1268void
906bfa_pport_isr(struct bfa_s *bfa, struct bfi_msg_s *msg) 1269bfa_fcport_isr(struct bfa_s *bfa, struct bfi_msg_s *msg)
907{ 1270{
908 struct bfa_pport_s *pport = BFA_PORT_MOD(bfa); 1271 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
909 union bfi_pport_i2h_msg_u i2hmsg; 1272 union bfi_fcport_i2h_msg_u i2hmsg;
910 1273
911 i2hmsg.msg = msg; 1274 i2hmsg.msg = msg;
912 pport->event_arg.i2hmsg = i2hmsg; 1275 fcport->event_arg.i2hmsg = i2hmsg;
913 1276
914 switch (msg->mhdr.msg_id) { 1277 switch (msg->mhdr.msg_id) {
915 case BFI_PPORT_I2H_ENABLE_RSP: 1278 case BFI_FCPORT_I2H_ENABLE_RSP:
916 if (pport->msgtag == i2hmsg.enable_rsp->msgtag) 1279 if (fcport->msgtag == i2hmsg.penable_rsp->msgtag)
917 bfa_sm_send_event(pport, BFA_PPORT_SM_FWRSP); 1280 bfa_sm_send_event(fcport, BFA_FCPORT_SM_FWRSP);
918 break; 1281 break;
919 1282
920 case BFI_PPORT_I2H_DISABLE_RSP: 1283 case BFI_FCPORT_I2H_DISABLE_RSP:
921 if (pport->msgtag == i2hmsg.enable_rsp->msgtag) 1284 if (fcport->msgtag == i2hmsg.penable_rsp->msgtag)
922 bfa_sm_send_event(pport, BFA_PPORT_SM_FWRSP); 1285 bfa_sm_send_event(fcport, BFA_FCPORT_SM_FWRSP);
923 break; 1286 break;
924 1287
925 case BFI_PPORT_I2H_EVENT: 1288 case BFI_FCPORT_I2H_EVENT:
926 switch (i2hmsg.event->link_state.linkstate) { 1289 switch (i2hmsg.event->link_state.linkstate) {
927 case BFA_PPORT_LINKUP: 1290 case BFA_PPORT_LINKUP:
928 bfa_sm_send_event(pport, BFA_PPORT_SM_LINKUP); 1291 bfa_sm_send_event(fcport, BFA_FCPORT_SM_LINKUP);
929 break; 1292 break;
930 case BFA_PPORT_LINKDOWN: 1293 case BFA_PPORT_LINKDOWN:
931 bfa_sm_send_event(pport, BFA_PPORT_SM_LINKDOWN); 1294 bfa_sm_send_event(fcport, BFA_FCPORT_SM_LINKDOWN);
932 break; 1295 break;
933 case BFA_PPORT_TRUNK_LINKDOWN: 1296 case BFA_PPORT_TRUNK_LINKDOWN:
934 /** todo: event notification */ 1297 /** todo: event notification */
@@ -936,42 +1299,40 @@ bfa_pport_isr(struct bfa_s *bfa, struct bfi_msg_s *msg)
936 } 1299 }
937 break; 1300 break;
938 1301
939 case BFI_PPORT_I2H_GET_STATS_RSP: 1302 case BFI_FCPORT_I2H_STATS_GET_RSP:
940 case BFI_PPORT_I2H_GET_QOS_STATS_RSP:
941 /* 1303 /*
942 * check for timer pop before processing the rsp 1304 * check for timer pop before processing the rsp
943 */ 1305 */
944 if (pport->stats_busy == BFA_FALSE 1306 if (fcport->stats_busy == BFA_FALSE ||
945 || pport->stats_status == BFA_STATUS_ETIMER) 1307 fcport->stats_status == BFA_STATUS_ETIMER)
946 break; 1308 break;
947 1309
948 bfa_timer_stop(&pport->timer); 1310 bfa_timer_stop(&fcport->timer);
949 pport->stats_status = i2hmsg.getstats_rsp->status; 1311 fcport->stats_status = i2hmsg.pstatsget_rsp->status;
950 bfa_cb_queue(pport->bfa, &pport->hcb_qe, __bfa_cb_port_stats, 1312 bfa_cb_queue(fcport->bfa, &fcport->hcb_qe,
951 pport); 1313 __bfa_cb_fcport_stats_get, fcport);
952 break; 1314 break;
953 case BFI_PPORT_I2H_CLEAR_STATS_RSP: 1315
954 case BFI_PPORT_I2H_CLEAR_QOS_STATS_RSP: 1316 case BFI_FCPORT_I2H_STATS_CLEAR_RSP:
955 /* 1317 /*
956 * check for timer pop before processing the rsp 1318 * check for timer pop before processing the rsp
957 */ 1319 */
958 if (pport->stats_busy == BFA_FALSE 1320 if (fcport->stats_busy == BFA_FALSE ||
959 || pport->stats_status == BFA_STATUS_ETIMER) 1321 fcport->stats_status == BFA_STATUS_ETIMER)
960 break; 1322 break;
961 1323
962 bfa_timer_stop(&pport->timer); 1324 bfa_timer_stop(&fcport->timer);
963 pport->stats_status = BFA_STATUS_OK; 1325 fcport->stats_status = BFA_STATUS_OK;
964 bfa_cb_queue(pport->bfa, &pport->hcb_qe, 1326 bfa_cb_queue(fcport->bfa, &fcport->hcb_qe,
965 __bfa_cb_port_stats_clr, pport); 1327 __bfa_cb_fcport_stats_clr, fcport);
966 break; 1328 break;
967 1329
968 default: 1330 default:
969 bfa_assert(0); 1331 bfa_assert(0);
1332 break;
970 } 1333 }
971} 1334}
972 1335
973
974
975/** 1336/**
976 * bfa_pport_api 1337 * bfa_pport_api
977 */ 1338 */
@@ -980,35 +1341,35 @@ bfa_pport_isr(struct bfa_s *bfa, struct bfi_msg_s *msg)
980 * Registered callback for port events. 1341 * Registered callback for port events.
981 */ 1342 */
982void 1343void
983bfa_pport_event_register(struct bfa_s *bfa, 1344bfa_fcport_event_register(struct bfa_s *bfa,
984 void (*cbfn) (void *cbarg, bfa_pport_event_t event), 1345 void (*cbfn) (void *cbarg, bfa_pport_event_t event),
985 void *cbarg) 1346 void *cbarg)
986{ 1347{
987 struct bfa_pport_s *pport = BFA_PORT_MOD(bfa); 1348 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
988 1349
989 pport->event_cbfn = cbfn; 1350 fcport->event_cbfn = cbfn;
990 pport->event_cbarg = cbarg; 1351 fcport->event_cbarg = cbarg;
991} 1352}
992 1353
993bfa_status_t 1354bfa_status_t
994bfa_pport_enable(struct bfa_s *bfa) 1355bfa_fcport_enable(struct bfa_s *bfa)
995{ 1356{
996 struct bfa_pport_s *pport = BFA_PORT_MOD(bfa); 1357 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
997 1358
998 if (pport->diag_busy) 1359 if (fcport->diag_busy)
999 return BFA_STATUS_DIAG_BUSY; 1360 return BFA_STATUS_DIAG_BUSY;
1000 else if (bfa_sm_cmp_state 1361 else if (bfa_sm_cmp_state
1001 (BFA_PORT_MOD(bfa), bfa_pport_sm_disabling_qwait)) 1362 (BFA_FCPORT_MOD(bfa), bfa_fcport_sm_disabling_qwait))
1002 return BFA_STATUS_DEVBUSY; 1363 return BFA_STATUS_DEVBUSY;
1003 1364
1004 bfa_sm_send_event(BFA_PORT_MOD(bfa), BFA_PPORT_SM_ENABLE); 1365 bfa_sm_send_event(BFA_FCPORT_MOD(bfa), BFA_FCPORT_SM_ENABLE);
1005 return BFA_STATUS_OK; 1366 return BFA_STATUS_OK;
1006} 1367}
1007 1368
1008bfa_status_t 1369bfa_status_t
1009bfa_pport_disable(struct bfa_s *bfa) 1370bfa_fcport_disable(struct bfa_s *bfa)
1010{ 1371{
1011 bfa_sm_send_event(BFA_PORT_MOD(bfa), BFA_PPORT_SM_DISABLE); 1372 bfa_sm_send_event(BFA_FCPORT_MOD(bfa), BFA_FCPORT_SM_DISABLE);
1012 return BFA_STATUS_OK; 1373 return BFA_STATUS_OK;
1013} 1374}
1014 1375
@@ -1016,18 +1377,18 @@ bfa_pport_disable(struct bfa_s *bfa)
1016 * Configure port speed. 1377 * Configure port speed.
1017 */ 1378 */
1018bfa_status_t 1379bfa_status_t
1019bfa_pport_cfg_speed(struct bfa_s *bfa, enum bfa_pport_speed speed) 1380bfa_fcport_cfg_speed(struct bfa_s *bfa, enum bfa_pport_speed speed)
1020{ 1381{
1021 struct bfa_pport_s *pport = BFA_PORT_MOD(bfa); 1382 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
1022 1383
1023 bfa_trc(bfa, speed); 1384 bfa_trc(bfa, speed);
1024 1385
1025 if ((speed != BFA_PPORT_SPEED_AUTO) && (speed > pport->speed_sup)) { 1386 if ((speed != BFA_PPORT_SPEED_AUTO) && (speed > fcport->speed_sup)) {
1026 bfa_trc(bfa, pport->speed_sup); 1387 bfa_trc(bfa, fcport->speed_sup);
1027 return BFA_STATUS_UNSUPP_SPEED; 1388 return BFA_STATUS_UNSUPP_SPEED;
1028 } 1389 }
1029 1390
1030 pport->cfg.speed = speed; 1391 fcport->cfg.speed = speed;
1031 1392
1032 return BFA_STATUS_OK; 1393 return BFA_STATUS_OK;
1033} 1394}
@@ -1036,23 +1397,23 @@ bfa_pport_cfg_speed(struct bfa_s *bfa, enum bfa_pport_speed speed)
1036 * Get current speed. 1397 * Get current speed.
1037 */ 1398 */
1038enum bfa_pport_speed 1399enum bfa_pport_speed
1039bfa_pport_get_speed(struct bfa_s *bfa) 1400bfa_fcport_get_speed(struct bfa_s *bfa)
1040{ 1401{
1041 struct bfa_pport_s *port = BFA_PORT_MOD(bfa); 1402 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
1042 1403
1043 return port->speed; 1404 return fcport->speed;
1044} 1405}
1045 1406
1046/** 1407/**
1047 * Configure port topology. 1408 * Configure port topology.
1048 */ 1409 */
1049bfa_status_t 1410bfa_status_t
1050bfa_pport_cfg_topology(struct bfa_s *bfa, enum bfa_pport_topology topology) 1411bfa_fcport_cfg_topology(struct bfa_s *bfa, enum bfa_pport_topology topology)
1051{ 1412{
1052 struct bfa_pport_s *pport = BFA_PORT_MOD(bfa); 1413 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
1053 1414
1054 bfa_trc(bfa, topology); 1415 bfa_trc(bfa, topology);
1055 bfa_trc(bfa, pport->cfg.topology); 1416 bfa_trc(bfa, fcport->cfg.topology);
1056 1417
1057 switch (topology) { 1418 switch (topology) {
1058 case BFA_PPORT_TOPOLOGY_P2P: 1419 case BFA_PPORT_TOPOLOGY_P2P:
@@ -1064,7 +1425,7 @@ bfa_pport_cfg_topology(struct bfa_s *bfa, enum bfa_pport_topology topology)
1064 return BFA_STATUS_EINVAL; 1425 return BFA_STATUS_EINVAL;
1065 } 1426 }
1066 1427
1067 pport->cfg.topology = topology; 1428 fcport->cfg.topology = topology;
1068 return BFA_STATUS_OK; 1429 return BFA_STATUS_OK;
1069} 1430}
1070 1431
@@ -1072,64 +1433,64 @@ bfa_pport_cfg_topology(struct bfa_s *bfa, enum bfa_pport_topology topology)
1072 * Get current topology. 1433 * Get current topology.
1073 */ 1434 */
1074enum bfa_pport_topology 1435enum bfa_pport_topology
1075bfa_pport_get_topology(struct bfa_s *bfa) 1436bfa_fcport_get_topology(struct bfa_s *bfa)
1076{ 1437{
1077 struct bfa_pport_s *port = BFA_PORT_MOD(bfa); 1438 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
1078 1439
1079 return port->topology; 1440 return fcport->topology;
1080} 1441}
1081 1442
1082bfa_status_t 1443bfa_status_t
1083bfa_pport_cfg_hardalpa(struct bfa_s *bfa, u8 alpa) 1444bfa_fcport_cfg_hardalpa(struct bfa_s *bfa, u8 alpa)
1084{ 1445{
1085 struct bfa_pport_s *pport = BFA_PORT_MOD(bfa); 1446 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
1086 1447
1087 bfa_trc(bfa, alpa); 1448 bfa_trc(bfa, alpa);
1088 bfa_trc(bfa, pport->cfg.cfg_hardalpa); 1449 bfa_trc(bfa, fcport->cfg.cfg_hardalpa);
1089 bfa_trc(bfa, pport->cfg.hardalpa); 1450 bfa_trc(bfa, fcport->cfg.hardalpa);
1090 1451
1091 pport->cfg.cfg_hardalpa = BFA_TRUE; 1452 fcport->cfg.cfg_hardalpa = BFA_TRUE;
1092 pport->cfg.hardalpa = alpa; 1453 fcport->cfg.hardalpa = alpa;
1093 1454
1094 return BFA_STATUS_OK; 1455 return BFA_STATUS_OK;
1095} 1456}
1096 1457
1097bfa_status_t 1458bfa_status_t
1098bfa_pport_clr_hardalpa(struct bfa_s *bfa) 1459bfa_fcport_clr_hardalpa(struct bfa_s *bfa)
1099{ 1460{
1100 struct bfa_pport_s *pport = BFA_PORT_MOD(bfa); 1461 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
1101 1462
1102 bfa_trc(bfa, pport->cfg.cfg_hardalpa); 1463 bfa_trc(bfa, fcport->cfg.cfg_hardalpa);
1103 bfa_trc(bfa, pport->cfg.hardalpa); 1464 bfa_trc(bfa, fcport->cfg.hardalpa);
1104 1465
1105 pport->cfg.cfg_hardalpa = BFA_FALSE; 1466 fcport->cfg.cfg_hardalpa = BFA_FALSE;
1106 return BFA_STATUS_OK; 1467 return BFA_STATUS_OK;
1107} 1468}
1108 1469
1109bfa_boolean_t 1470bfa_boolean_t
1110bfa_pport_get_hardalpa(struct bfa_s *bfa, u8 *alpa) 1471bfa_fcport_get_hardalpa(struct bfa_s *bfa, u8 *alpa)
1111{ 1472{
1112 struct bfa_pport_s *port = BFA_PORT_MOD(bfa); 1473 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
1113 1474
1114 *alpa = port->cfg.hardalpa; 1475 *alpa = fcport->cfg.hardalpa;
1115 return port->cfg.cfg_hardalpa; 1476 return fcport->cfg.cfg_hardalpa;
1116} 1477}
1117 1478
1118u8 1479u8
1119bfa_pport_get_myalpa(struct bfa_s *bfa) 1480bfa_fcport_get_myalpa(struct bfa_s *bfa)
1120{ 1481{
1121 struct bfa_pport_s *port = BFA_PORT_MOD(bfa); 1482 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
1122 1483
1123 return port->myalpa; 1484 return fcport->myalpa;
1124} 1485}
1125 1486
1126bfa_status_t 1487bfa_status_t
1127bfa_pport_cfg_maxfrsize(struct bfa_s *bfa, u16 maxfrsize) 1488bfa_fcport_cfg_maxfrsize(struct bfa_s *bfa, u16 maxfrsize)
1128{ 1489{
1129 struct bfa_pport_s *pport = BFA_PORT_MOD(bfa); 1490 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
1130 1491
1131 bfa_trc(bfa, maxfrsize); 1492 bfa_trc(bfa, maxfrsize);
1132 bfa_trc(bfa, pport->cfg.maxfrsize); 1493 bfa_trc(bfa, fcport->cfg.maxfrsize);
1133 1494
1134 /* 1495 /*
1135 * with in range 1496 * with in range
@@ -1143,41 +1504,41 @@ bfa_pport_cfg_maxfrsize(struct bfa_s *bfa, u16 maxfrsize)
1143 if ((maxfrsize != FC_MAX_PDUSZ) && (maxfrsize & (maxfrsize - 1))) 1504 if ((maxfrsize != FC_MAX_PDUSZ) && (maxfrsize & (maxfrsize - 1)))
1144 return BFA_STATUS_INVLD_DFSZ; 1505 return BFA_STATUS_INVLD_DFSZ;
1145 1506
1146 pport->cfg.maxfrsize = maxfrsize; 1507 fcport->cfg.maxfrsize = maxfrsize;
1147 return BFA_STATUS_OK; 1508 return BFA_STATUS_OK;
1148} 1509}
1149 1510
1150u16 1511u16
1151bfa_pport_get_maxfrsize(struct bfa_s *bfa) 1512bfa_fcport_get_maxfrsize(struct bfa_s *bfa)
1152{ 1513{
1153 struct bfa_pport_s *port = BFA_PORT_MOD(bfa); 1514 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
1154 1515
1155 return port->cfg.maxfrsize; 1516 return fcport->cfg.maxfrsize;
1156} 1517}
1157 1518
1158u32 1519u32
1159bfa_pport_mypid(struct bfa_s *bfa) 1520bfa_fcport_mypid(struct bfa_s *bfa)
1160{ 1521{
1161 struct bfa_pport_s *port = BFA_PORT_MOD(bfa); 1522 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
1162 1523
1163 return port->mypid; 1524 return fcport->mypid;
1164} 1525}
1165 1526
1166u8 1527u8
1167bfa_pport_get_rx_bbcredit(struct bfa_s *bfa) 1528bfa_fcport_get_rx_bbcredit(struct bfa_s *bfa)
1168{ 1529{
1169 struct bfa_pport_s *port = BFA_PORT_MOD(bfa); 1530 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
1170 1531
1171 return port->cfg.rx_bbcredit; 1532 return fcport->cfg.rx_bbcredit;
1172} 1533}
1173 1534
1174void 1535void
1175bfa_pport_set_tx_bbcredit(struct bfa_s *bfa, u16 tx_bbcredit) 1536bfa_fcport_set_tx_bbcredit(struct bfa_s *bfa, u16 tx_bbcredit)
1176{ 1537{
1177 struct bfa_pport_s *port = BFA_PORT_MOD(bfa); 1538 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
1178 1539
1179 port->cfg.tx_bbcredit = (u8) tx_bbcredit; 1540 fcport->cfg.tx_bbcredit = (u8) tx_bbcredit;
1180 bfa_port_send_txcredit(port); 1541 bfa_fcport_send_txcredit(fcport);
1181} 1542}
1182 1543
1183/** 1544/**
@@ -1185,302 +1546,192 @@ bfa_pport_set_tx_bbcredit(struct bfa_s *bfa, u16 tx_bbcredit)
1185 */ 1546 */
1186 1547
1187wwn_t 1548wwn_t
1188bfa_pport_get_wwn(struct bfa_s *bfa, bfa_boolean_t node) 1549bfa_fcport_get_wwn(struct bfa_s *bfa, bfa_boolean_t node)
1189{ 1550{
1190 struct bfa_pport_s *pport = BFA_PORT_MOD(bfa); 1551 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
1191 if (node) 1552 if (node)
1192 return pport->nwwn; 1553 return fcport->nwwn;
1193 else 1554 else
1194 return pport->pwwn; 1555 return fcport->pwwn;
1195} 1556}
1196 1557
1197void 1558void
1198bfa_pport_get_attr(struct bfa_s *bfa, struct bfa_pport_attr_s *attr) 1559bfa_fcport_get_attr(struct bfa_s *bfa, struct bfa_pport_attr_s *attr)
1199{ 1560{
1200 struct bfa_pport_s *pport = BFA_PORT_MOD(bfa); 1561 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
1201 1562
1202 bfa_os_memset(attr, 0, sizeof(struct bfa_pport_attr_s)); 1563 bfa_os_memset(attr, 0, sizeof(struct bfa_pport_attr_s));
1203 1564
1204 attr->nwwn = pport->nwwn; 1565 attr->nwwn = fcport->nwwn;
1205 attr->pwwn = pport->pwwn; 1566 attr->pwwn = fcport->pwwn;
1206 1567
1207 bfa_os_memcpy(&attr->pport_cfg, &pport->cfg, 1568 bfa_os_memcpy(&attr->pport_cfg, &fcport->cfg,
1208 sizeof(struct bfa_pport_cfg_s)); 1569 sizeof(struct bfa_pport_cfg_s));
1209 /* 1570 /*
1210 * speed attributes 1571 * speed attributes
1211 */ 1572 */
1212 attr->pport_cfg.speed = pport->cfg.speed; 1573 attr->pport_cfg.speed = fcport->cfg.speed;
1213 attr->speed_supported = pport->speed_sup; 1574 attr->speed_supported = fcport->speed_sup;
1214 attr->speed = pport->speed; 1575 attr->speed = fcport->speed;
1215 attr->cos_supported = FC_CLASS_3; 1576 attr->cos_supported = FC_CLASS_3;
1216 1577
1217 /* 1578 /*
1218 * topology attributes 1579 * topology attributes
1219 */ 1580 */
1220 attr->pport_cfg.topology = pport->cfg.topology; 1581 attr->pport_cfg.topology = fcport->cfg.topology;
1221 attr->topology = pport->topology; 1582 attr->topology = fcport->topology;
1222 1583
1223 /* 1584 /*
1224 * beacon attributes 1585 * beacon attributes
1225 */ 1586 */
1226 attr->beacon = pport->beacon; 1587 attr->beacon = fcport->beacon;
1227 attr->link_e2e_beacon = pport->link_e2e_beacon; 1588 attr->link_e2e_beacon = fcport->link_e2e_beacon;
1228 attr->plog_enabled = bfa_plog_get_setting(pport->bfa->plog); 1589 attr->plog_enabled = bfa_plog_get_setting(fcport->bfa->plog);
1229 1590
1230 attr->pport_cfg.path_tov = bfa_fcpim_path_tov_get(bfa); 1591 attr->pport_cfg.path_tov = bfa_fcpim_path_tov_get(bfa);
1231 attr->pport_cfg.q_depth = bfa_fcpim_qdepth_get(bfa); 1592 attr->pport_cfg.q_depth = bfa_fcpim_qdepth_get(bfa);
1232 attr->port_state = bfa_sm_to_state(hal_pport_sm_table, pport->sm); 1593 attr->port_state = bfa_sm_to_state(hal_pport_sm_table, fcport->sm);
1233 if (bfa_ioc_is_disabled(&pport->bfa->ioc)) 1594 if (bfa_ioc_is_disabled(&fcport->bfa->ioc))
1234 attr->port_state = BFA_PPORT_ST_IOCDIS; 1595 attr->port_state = BFA_PPORT_ST_IOCDIS;
1235 else if (bfa_ioc_fw_mismatch(&pport->bfa->ioc)) 1596 else if (bfa_ioc_fw_mismatch(&fcport->bfa->ioc))
1236 attr->port_state = BFA_PPORT_ST_FWMISMATCH; 1597 attr->port_state = BFA_PPORT_ST_FWMISMATCH;
1237} 1598}
1238 1599
1239static void 1600#define BFA_FCPORT_STATS_TOV 1000
1240bfa_port_stats_query(void *cbarg)
1241{
1242 struct bfa_pport_s *port = (struct bfa_pport_s *)cbarg;
1243 bfi_pport_get_stats_req_t *msg;
1244
1245 msg = bfa_reqq_next(port->bfa, BFA_REQQ_PORT);
1246
1247 if (!msg) {
1248 port->stats_qfull = BFA_TRUE;
1249 bfa_reqq_winit(&port->stats_reqq_wait, bfa_port_stats_query,
1250 port);
1251 bfa_reqq_wait(port->bfa, BFA_REQQ_PORT, &port->stats_reqq_wait);
1252 return;
1253 }
1254 port->stats_qfull = BFA_FALSE;
1255
1256 bfa_os_memset(msg, 0, sizeof(bfi_pport_get_stats_req_t));
1257 bfi_h2i_set(msg->mh, BFI_MC_FC_PORT, BFI_PPORT_H2I_GET_STATS_REQ,
1258 bfa_lpuid(port->bfa));
1259 bfa_reqq_produce(port->bfa, BFA_REQQ_PORT);
1260
1261 return;
1262}
1263
1264static void
1265bfa_port_stats_clear(void *cbarg)
1266{
1267 struct bfa_pport_s *port = (struct bfa_pport_s *)cbarg;
1268 bfi_pport_clear_stats_req_t *msg;
1269
1270 msg = bfa_reqq_next(port->bfa, BFA_REQQ_PORT);
1271 1601
1272 if (!msg) { 1602/**
1273 port->stats_qfull = BFA_TRUE; 1603 * Fetch port attributes (FCQoS or FCoE).
1274 bfa_reqq_winit(&port->stats_reqq_wait, bfa_port_stats_clear, 1604 */
1275 port); 1605bfa_status_t
1276 bfa_reqq_wait(port->bfa, BFA_REQQ_PORT, &port->stats_reqq_wait); 1606bfa_fcport_get_stats(struct bfa_s *bfa, union bfa_fcport_stats_u *stats,
1277 return; 1607 bfa_cb_pport_t cbfn, void *cbarg)
1278 }
1279 port->stats_qfull = BFA_FALSE;
1280
1281 bfa_os_memset(msg, 0, sizeof(bfi_pport_clear_stats_req_t));
1282 bfi_h2i_set(msg->mh, BFI_MC_FC_PORT, BFI_PPORT_H2I_CLEAR_STATS_REQ,
1283 bfa_lpuid(port->bfa));
1284 bfa_reqq_produce(port->bfa, BFA_REQQ_PORT);
1285 return;
1286}
1287
1288static void
1289bfa_port_qos_stats_clear(void *cbarg)
1290{ 1608{
1291 struct bfa_pport_s *port = (struct bfa_pport_s *)cbarg; 1609 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
1292 bfi_pport_clear_qos_stats_req_t *msg;
1293
1294 msg = bfa_reqq_next(port->bfa, BFA_REQQ_PORT);
1295 1610
1296 if (!msg) { 1611 if (fcport->stats_busy) {
1297 port->stats_qfull = BFA_TRUE; 1612 bfa_trc(bfa, fcport->stats_busy);
1298 bfa_reqq_winit(&port->stats_reqq_wait, bfa_port_qos_stats_clear, 1613 return BFA_STATUS_DEVBUSY;
1299 port);
1300 bfa_reqq_wait(port->bfa, BFA_REQQ_PORT, &port->stats_reqq_wait);
1301 return;
1302 } 1614 }
1303 port->stats_qfull = BFA_FALSE;
1304 1615
1305 bfa_os_memset(msg, 0, sizeof(bfi_pport_clear_qos_stats_req_t)); 1616 fcport->stats_busy = BFA_TRUE;
1306 bfi_h2i_set(msg->mh, BFI_MC_FC_PORT, BFI_PPORT_H2I_CLEAR_QOS_STATS_REQ, 1617 fcport->stats_ret = stats;
1307 bfa_lpuid(port->bfa)); 1618 fcport->stats_cbfn = cbfn;
1308 bfa_reqq_produce(port->bfa, BFA_REQQ_PORT); 1619 fcport->stats_cbarg = cbarg;
1309 return;
1310}
1311
1312static void
1313bfa_pport_stats_swap(union bfa_pport_stats_u *d, union bfa_pport_stats_u *s)
1314{
1315 u32 *dip = (u32 *) d;
1316 u32 *sip = (u32 *) s;
1317 int i;
1318 1620
1319 /* 1621 bfa_fcport_send_stats_get(fcport);
1320 * Do 64 bit fields swap first
1321 */
1322 for (i = 0;
1323 i <
1324 ((sizeof(union bfa_pport_stats_u) -
1325 sizeof(struct bfa_qos_stats_s)) / sizeof(u32)); i = i + 2) {
1326#ifdef __BIGENDIAN
1327 dip[i] = bfa_os_ntohl(sip[i]);
1328 dip[i + 1] = bfa_os_ntohl(sip[i + 1]);
1329#else
1330 dip[i] = bfa_os_ntohl(sip[i + 1]);
1331 dip[i + 1] = bfa_os_ntohl(sip[i]);
1332#endif
1333 }
1334 1622
1335 /* 1623 bfa_timer_start(bfa, &fcport->timer, bfa_fcport_stats_get_timeout,
1336 * Now swap the 32 bit fields 1624 fcport, BFA_FCPORT_STATS_TOV);
1337 */ 1625 return BFA_STATUS_OK;
1338 for (; i < (sizeof(union bfa_pport_stats_u) / sizeof(u32)); ++i)
1339 dip[i] = bfa_os_ntohl(sip[i]);
1340} 1626}
1341 1627
1342static void 1628/**
1343__bfa_cb_port_stats_clr(void *cbarg, bfa_boolean_t complete) 1629 * Reset port statistics (FCQoS or FCoE).
1630 */
1631bfa_status_t
1632bfa_fcport_clear_stats(struct bfa_s *bfa, bfa_cb_pport_t cbfn, void *cbarg)
1344{ 1633{
1345 struct bfa_pport_s *port = cbarg; 1634 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
1346 1635
1347 if (complete) { 1636 if (fcport->stats_busy) {
1348 port->stats_cbfn(port->stats_cbarg, port->stats_status); 1637 bfa_trc(bfa, fcport->stats_busy);
1349 } else { 1638 return BFA_STATUS_DEVBUSY;
1350 port->stats_busy = BFA_FALSE;
1351 port->stats_status = BFA_STATUS_OK;
1352 } 1639 }
1353}
1354
1355static void
1356bfa_port_stats_clr_timeout(void *cbarg)
1357{
1358 struct bfa_pport_s *port = (struct bfa_pport_s *)cbarg;
1359 1640
1360 bfa_trc(port->bfa, port->stats_qfull); 1641 fcport->stats_busy = BFA_TRUE;
1642 fcport->stats_cbfn = cbfn;
1643 fcport->stats_cbarg = cbarg;
1361 1644
1362 if (port->stats_qfull) { 1645 bfa_fcport_send_stats_clear(fcport);
1363 bfa_reqq_wcancel(&port->stats_reqq_wait);
1364 port->stats_qfull = BFA_FALSE;
1365 }
1366 1646
1367 port->stats_status = BFA_STATUS_ETIMER; 1647 bfa_timer_start(bfa, &fcport->timer, bfa_fcport_stats_clr_timeout,
1368 bfa_cb_queue(port->bfa, &port->hcb_qe, __bfa_cb_port_stats_clr, port); 1648 fcport, BFA_FCPORT_STATS_TOV);
1649 return BFA_STATUS_OK;
1369} 1650}
1370 1651
1371static void 1652/**
1372__bfa_cb_port_stats(void *cbarg, bfa_boolean_t complete) 1653 * Fetch FCQoS port statistics
1654 */
1655bfa_status_t
1656bfa_fcport_get_qos_stats(struct bfa_s *bfa, union bfa_fcport_stats_u *stats,
1657 bfa_cb_pport_t cbfn, void *cbarg)
1373{ 1658{
1374 struct bfa_pport_s *port = cbarg; 1659 /* Meaningful only for FC mode */
1660 bfa_assert(bfa_ioc_get_fcmode(&bfa->ioc));
1375 1661
1376 if (complete) { 1662 return bfa_fcport_get_stats(bfa, stats, cbfn, cbarg);
1377 if (port->stats_status == BFA_STATUS_OK)
1378 bfa_pport_stats_swap(port->stats_ret, port->stats);
1379 port->stats_cbfn(port->stats_cbarg, port->stats_status);
1380 } else {
1381 port->stats_busy = BFA_FALSE;
1382 port->stats_status = BFA_STATUS_OK;
1383 }
1384} 1663}
1385 1664
1386static void 1665/**
1387bfa_port_stats_timeout(void *cbarg) 1666 * Reset FCoE port statistics
1667 */
1668bfa_status_t
1669bfa_fcport_clear_qos_stats(struct bfa_s *bfa, bfa_cb_pport_t cbfn, void *cbarg)
1388{ 1670{
1389 struct bfa_pport_s *port = (struct bfa_pport_s *)cbarg; 1671 /* Meaningful only for FC mode */
1390 1672 bfa_assert(bfa_ioc_get_fcmode(&bfa->ioc));
1391 bfa_trc(port->bfa, port->stats_qfull);
1392 1673
1393 if (port->stats_qfull) { 1674 return bfa_fcport_clear_stats(bfa, cbfn, cbarg);
1394 bfa_reqq_wcancel(&port->stats_reqq_wait);
1395 port->stats_qfull = BFA_FALSE;
1396 }
1397
1398 port->stats_status = BFA_STATUS_ETIMER;
1399 bfa_cb_queue(port->bfa, &port->hcb_qe, __bfa_cb_port_stats, port);
1400} 1675}
1401 1676
1402#define BFA_PORT_STATS_TOV 1000
1403
1404/** 1677/**
1405 * Fetch port attributes. 1678 * Fetch FCQoS port statistics
1406 */ 1679 */
1407bfa_status_t 1680bfa_status_t
1408bfa_pport_get_stats(struct bfa_s *bfa, union bfa_pport_stats_u *stats, 1681bfa_fcport_get_fcoe_stats(struct bfa_s *bfa, union bfa_fcport_stats_u *stats,
1409 bfa_cb_pport_t cbfn, void *cbarg) 1682 bfa_cb_pport_t cbfn, void *cbarg)
1410{ 1683{
1411 struct bfa_pport_s *port = BFA_PORT_MOD(bfa); 1684 /* Meaningful only for FCoE mode */
1412 1685 bfa_assert(!bfa_ioc_get_fcmode(&bfa->ioc));
1413 if (port->stats_busy) {
1414 bfa_trc(bfa, port->stats_busy);
1415 return BFA_STATUS_DEVBUSY;
1416 }
1417
1418 port->stats_busy = BFA_TRUE;
1419 port->stats_ret = stats;
1420 port->stats_cbfn = cbfn;
1421 port->stats_cbarg = cbarg;
1422
1423 bfa_port_stats_query(port);
1424 1686
1425 bfa_timer_start(bfa, &port->timer, bfa_port_stats_timeout, port, 1687 return bfa_fcport_get_stats(bfa, stats, cbfn, cbarg);
1426 BFA_PORT_STATS_TOV);
1427 return BFA_STATUS_OK;
1428} 1688}
1429 1689
1690/**
1691 * Reset FCoE port statistics
1692 */
1430bfa_status_t 1693bfa_status_t
1431bfa_pport_clear_stats(struct bfa_s *bfa, bfa_cb_pport_t cbfn, void *cbarg) 1694bfa_fcport_clear_fcoe_stats(struct bfa_s *bfa, bfa_cb_pport_t cbfn, void *cbarg)
1432{ 1695{
1433 struct bfa_pport_s *port = BFA_PORT_MOD(bfa); 1696 /* Meaningful only for FCoE mode */
1434 1697 bfa_assert(!bfa_ioc_get_fcmode(&bfa->ioc));
1435 if (port->stats_busy) {
1436 bfa_trc(bfa, port->stats_busy);
1437 return BFA_STATUS_DEVBUSY;
1438 }
1439
1440 port->stats_busy = BFA_TRUE;
1441 port->stats_cbfn = cbfn;
1442 port->stats_cbarg = cbarg;
1443
1444 bfa_port_stats_clear(port);
1445 1698
1446 bfa_timer_start(bfa, &port->timer, bfa_port_stats_clr_timeout, port, 1699 return bfa_fcport_clear_stats(bfa, cbfn, cbarg);
1447 BFA_PORT_STATS_TOV);
1448 return BFA_STATUS_OK;
1449} 1700}
1450 1701
1451bfa_status_t 1702bfa_status_t
1452bfa_pport_trunk_enable(struct bfa_s *bfa, u8 bitmap) 1703bfa_fcport_trunk_enable(struct bfa_s *bfa, u8 bitmap)
1453{ 1704{
1454 struct bfa_pport_s *pport = BFA_PORT_MOD(bfa); 1705 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
1455 1706
1456 bfa_trc(bfa, bitmap); 1707 bfa_trc(bfa, bitmap);
1457 bfa_trc(bfa, pport->cfg.trunked); 1708 bfa_trc(bfa, fcport->cfg.trunked);
1458 bfa_trc(bfa, pport->cfg.trunk_ports); 1709 bfa_trc(bfa, fcport->cfg.trunk_ports);
1459 1710
1460 if (!bitmap || (bitmap & (bitmap - 1))) 1711 if (!bitmap || (bitmap & (bitmap - 1)))
1461 return BFA_STATUS_EINVAL; 1712 return BFA_STATUS_EINVAL;
1462 1713
1463 pport->cfg.trunked = BFA_TRUE; 1714 fcport->cfg.trunked = BFA_TRUE;
1464 pport->cfg.trunk_ports = bitmap; 1715 fcport->cfg.trunk_ports = bitmap;
1465 1716
1466 return BFA_STATUS_OK; 1717 return BFA_STATUS_OK;
1467} 1718}
1468 1719
1469void 1720void
1470bfa_pport_qos_get_attr(struct bfa_s *bfa, struct bfa_qos_attr_s *qos_attr) 1721bfa_fcport_qos_get_attr(struct bfa_s *bfa, struct bfa_qos_attr_s *qos_attr)
1471{ 1722{
1472 struct bfa_pport_s *pport = BFA_PORT_MOD(bfa); 1723 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
1473 1724
1474 qos_attr->state = bfa_os_ntohl(pport->qos_attr.state); 1725 qos_attr->state = bfa_os_ntohl(fcport->qos_attr.state);
1475 qos_attr->total_bb_cr = bfa_os_ntohl(pport->qos_attr.total_bb_cr); 1726 qos_attr->total_bb_cr = bfa_os_ntohl(fcport->qos_attr.total_bb_cr);
1476} 1727}
1477 1728
1478void 1729void
1479bfa_pport_qos_get_vc_attr(struct bfa_s *bfa, 1730bfa_fcport_qos_get_vc_attr(struct bfa_s *bfa,
1480 struct bfa_qos_vc_attr_s *qos_vc_attr) 1731 struct bfa_qos_vc_attr_s *qos_vc_attr)
1481{ 1732{
1482 struct bfa_pport_s *pport = BFA_PORT_MOD(bfa); 1733 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
1483 struct bfa_qos_vc_attr_s *bfa_vc_attr = &pport->qos_vc_attr; 1734 struct bfa_qos_vc_attr_s *bfa_vc_attr = &fcport->qos_vc_attr;
1484 u32 i = 0; 1735 u32 i = 0;
1485 1736
1486 qos_vc_attr->total_vc_count = bfa_os_ntohs(bfa_vc_attr->total_vc_count); 1737 qos_vc_attr->total_vc_count = bfa_os_ntohs(bfa_vc_attr->total_vc_count);
@@ -1503,119 +1754,89 @@ bfa_pport_qos_get_vc_attr(struct bfa_s *bfa,
1503} 1754}
1504 1755
1505/** 1756/**
1506 * Fetch QoS Stats.
1507 */
1508bfa_status_t
1509bfa_pport_get_qos_stats(struct bfa_s *bfa, union bfa_pport_stats_u *stats,
1510 bfa_cb_pport_t cbfn, void *cbarg)
1511{
1512 /*
1513 * QoS stats is embedded in port stats
1514 */
1515 return bfa_pport_get_stats(bfa, stats, cbfn, cbarg);
1516}
1517
1518bfa_status_t
1519bfa_pport_clear_qos_stats(struct bfa_s *bfa, bfa_cb_pport_t cbfn, void *cbarg)
1520{
1521 struct bfa_pport_s *port = BFA_PORT_MOD(bfa);
1522
1523 if (port->stats_busy) {
1524 bfa_trc(bfa, port->stats_busy);
1525 return BFA_STATUS_DEVBUSY;
1526 }
1527
1528 port->stats_busy = BFA_TRUE;
1529 port->stats_cbfn = cbfn;
1530 port->stats_cbarg = cbarg;
1531
1532 bfa_port_qos_stats_clear(port);
1533
1534 bfa_timer_start(bfa, &port->timer, bfa_port_stats_clr_timeout, port,
1535 BFA_PORT_STATS_TOV);
1536 return BFA_STATUS_OK;
1537}
1538
1539/**
1540 * Fetch port attributes. 1757 * Fetch port attributes.
1541 */ 1758 */
1542bfa_status_t 1759bfa_status_t
1543bfa_pport_trunk_disable(struct bfa_s *bfa) 1760bfa_fcport_trunk_disable(struct bfa_s *bfa)
1544{ 1761{
1545 return BFA_STATUS_OK; 1762 return BFA_STATUS_OK;
1546} 1763}
1547 1764
1548bfa_boolean_t 1765bfa_boolean_t
1549bfa_pport_trunk_query(struct bfa_s *bfa, u32 *bitmap) 1766bfa_fcport_trunk_query(struct bfa_s *bfa, u32 *bitmap)
1550{ 1767{
1551 struct bfa_pport_s *port = BFA_PORT_MOD(bfa); 1768 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
1552 1769
1553 *bitmap = port->cfg.trunk_ports; 1770 *bitmap = fcport->cfg.trunk_ports;
1554 return port->cfg.trunked; 1771 return fcport->cfg.trunked;
1555} 1772}
1556 1773
1557bfa_boolean_t 1774bfa_boolean_t
1558bfa_pport_is_disabled(struct bfa_s *bfa) 1775bfa_fcport_is_disabled(struct bfa_s *bfa)
1559{ 1776{
1560 struct bfa_pport_s *port = BFA_PORT_MOD(bfa); 1777 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
1561 1778
1562 return bfa_sm_to_state(hal_pport_sm_table, port->sm) == 1779 return bfa_sm_to_state(hal_pport_sm_table, fcport->sm) ==
1563 BFA_PPORT_ST_DISABLED; 1780 BFA_PPORT_ST_DISABLED;
1564 1781
1565} 1782}
1566 1783
1567bfa_boolean_t 1784bfa_boolean_t
1568bfa_pport_is_ratelim(struct bfa_s *bfa) 1785bfa_fcport_is_ratelim(struct bfa_s *bfa)
1569{ 1786{
1570 struct bfa_pport_s *pport = BFA_PORT_MOD(bfa); 1787 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
1571 1788
1572 return pport->cfg.ratelimit ? BFA_TRUE : BFA_FALSE; 1789 return fcport->cfg.ratelimit ? BFA_TRUE : BFA_FALSE;
1573 1790
1574} 1791}
1575 1792
1576void 1793void
1577bfa_pport_cfg_qos(struct bfa_s *bfa, bfa_boolean_t on_off) 1794bfa_fcport_cfg_qos(struct bfa_s *bfa, bfa_boolean_t on_off)
1578{ 1795{
1579 struct bfa_pport_s *pport = BFA_PORT_MOD(bfa); 1796 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
1797 enum bfa_ioc_type_e ioc_type = bfa_get_type(bfa);
1580 1798
1581 bfa_trc(bfa, on_off); 1799 bfa_trc(bfa, on_off);
1582 bfa_trc(bfa, pport->cfg.qos_enabled); 1800 bfa_trc(bfa, fcport->cfg.qos_enabled);
1801
1802 bfa_trc(bfa, ioc_type);
1583 1803
1584 pport->cfg.qos_enabled = on_off; 1804 if (ioc_type == BFA_IOC_TYPE_FC)
1805 fcport->cfg.qos_enabled = on_off;
1585} 1806}
1586 1807
1587void 1808void
1588bfa_pport_cfg_ratelim(struct bfa_s *bfa, bfa_boolean_t on_off) 1809bfa_fcport_cfg_ratelim(struct bfa_s *bfa, bfa_boolean_t on_off)
1589{ 1810{
1590 struct bfa_pport_s *pport = BFA_PORT_MOD(bfa); 1811 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
1591 1812
1592 bfa_trc(bfa, on_off); 1813 bfa_trc(bfa, on_off);
1593 bfa_trc(bfa, pport->cfg.ratelimit); 1814 bfa_trc(bfa, fcport->cfg.ratelimit);
1594 1815
1595 pport->cfg.ratelimit = on_off; 1816 fcport->cfg.ratelimit = on_off;
1596 if (pport->cfg.trl_def_speed == BFA_PPORT_SPEED_UNKNOWN) 1817 if (fcport->cfg.trl_def_speed == BFA_PPORT_SPEED_UNKNOWN)
1597 pport->cfg.trl_def_speed = BFA_PPORT_SPEED_1GBPS; 1818 fcport->cfg.trl_def_speed = BFA_PPORT_SPEED_1GBPS;
1598} 1819}
1599 1820
1600/** 1821/**
1601 * Configure default minimum ratelim speed 1822 * Configure default minimum ratelim speed
1602 */ 1823 */
1603bfa_status_t 1824bfa_status_t
1604bfa_pport_cfg_ratelim_speed(struct bfa_s *bfa, enum bfa_pport_speed speed) 1825bfa_fcport_cfg_ratelim_speed(struct bfa_s *bfa, enum bfa_pport_speed speed)
1605{ 1826{
1606 struct bfa_pport_s *pport = BFA_PORT_MOD(bfa); 1827 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
1607 1828
1608 bfa_trc(bfa, speed); 1829 bfa_trc(bfa, speed);
1609 1830
1610 /* 1831 /*
1611 * Auto and speeds greater than the supported speed, are invalid 1832 * Auto and speeds greater than the supported speed, are invalid
1612 */ 1833 */
1613 if ((speed == BFA_PPORT_SPEED_AUTO) || (speed > pport->speed_sup)) { 1834 if ((speed == BFA_PPORT_SPEED_AUTO) || (speed > fcport->speed_sup)) {
1614 bfa_trc(bfa, pport->speed_sup); 1835 bfa_trc(bfa, fcport->speed_sup);
1615 return BFA_STATUS_UNSUPP_SPEED; 1836 return BFA_STATUS_UNSUPP_SPEED;
1616 } 1837 }
1617 1838
1618 pport->cfg.trl_def_speed = speed; 1839 fcport->cfg.trl_def_speed = speed;
1619 1840
1620 return BFA_STATUS_OK; 1841 return BFA_STATUS_OK;
1621} 1842}
@@ -1624,45 +1845,45 @@ bfa_pport_cfg_ratelim_speed(struct bfa_s *bfa, enum bfa_pport_speed speed)
1624 * Get default minimum ratelim speed 1845 * Get default minimum ratelim speed
1625 */ 1846 */
1626enum bfa_pport_speed 1847enum bfa_pport_speed
1627bfa_pport_get_ratelim_speed(struct bfa_s *bfa) 1848bfa_fcport_get_ratelim_speed(struct bfa_s *bfa)
1628{ 1849{
1629 struct bfa_pport_s *pport = BFA_PORT_MOD(bfa); 1850 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
1630 1851
1631 bfa_trc(bfa, pport->cfg.trl_def_speed); 1852 bfa_trc(bfa, fcport->cfg.trl_def_speed);
1632 return pport->cfg.trl_def_speed; 1853 return fcport->cfg.trl_def_speed;
1633 1854
1634} 1855}
1635 1856
1636void 1857void
1637bfa_pport_busy(struct bfa_s *bfa, bfa_boolean_t status) 1858bfa_fcport_busy(struct bfa_s *bfa, bfa_boolean_t status)
1638{ 1859{
1639 struct bfa_pport_s *pport = BFA_PORT_MOD(bfa); 1860 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
1640 1861
1641 bfa_trc(bfa, status); 1862 bfa_trc(bfa, status);
1642 bfa_trc(bfa, pport->diag_busy); 1863 bfa_trc(bfa, fcport->diag_busy);
1643 1864
1644 pport->diag_busy = status; 1865 fcport->diag_busy = status;
1645} 1866}
1646 1867
1647void 1868void
1648bfa_pport_beacon(struct bfa_s *bfa, bfa_boolean_t beacon, 1869bfa_fcport_beacon(struct bfa_s *bfa, bfa_boolean_t beacon,
1649 bfa_boolean_t link_e2e_beacon) 1870 bfa_boolean_t link_e2e_beacon)
1650{ 1871{
1651 struct bfa_pport_s *pport = BFA_PORT_MOD(bfa); 1872 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
1652 1873
1653 bfa_trc(bfa, beacon); 1874 bfa_trc(bfa, beacon);
1654 bfa_trc(bfa, link_e2e_beacon); 1875 bfa_trc(bfa, link_e2e_beacon);
1655 bfa_trc(bfa, pport->beacon); 1876 bfa_trc(bfa, fcport->beacon);
1656 bfa_trc(bfa, pport->link_e2e_beacon); 1877 bfa_trc(bfa, fcport->link_e2e_beacon);
1657 1878
1658 pport->beacon = beacon; 1879 fcport->beacon = beacon;
1659 pport->link_e2e_beacon = link_e2e_beacon; 1880 fcport->link_e2e_beacon = link_e2e_beacon;
1660} 1881}
1661 1882
1662bfa_boolean_t 1883bfa_boolean_t
1663bfa_pport_is_linkup(struct bfa_s *bfa) 1884bfa_fcport_is_linkup(struct bfa_s *bfa)
1664{ 1885{
1665 return bfa_sm_cmp_state(BFA_PORT_MOD(bfa), bfa_pport_sm_linkup); 1886 return bfa_sm_cmp_state(BFA_FCPORT_MOD(bfa), bfa_fcport_sm_linkup);
1666} 1887}
1667 1888
1668 1889
diff --git a/drivers/scsi/bfa/bfa_fcs.c b/drivers/scsi/bfa/bfa_fcs.c
index 7cb39a306ea9..3516172c597c 100644
--- a/drivers/scsi/bfa/bfa_fcs.c
+++ b/drivers/scsi/bfa/bfa_fcs.c
@@ -36,6 +36,7 @@
36 * FCS sub-modules 36 * FCS sub-modules
37 */ 37 */
38struct bfa_fcs_mod_s { 38struct bfa_fcs_mod_s {
39 void (*attach) (struct bfa_fcs_s *fcs);
39 void (*modinit) (struct bfa_fcs_s *fcs); 40 void (*modinit) (struct bfa_fcs_s *fcs);
40 void (*modexit) (struct bfa_fcs_s *fcs); 41 void (*modexit) (struct bfa_fcs_s *fcs);
41}; 42};
@@ -43,12 +44,10 @@ struct bfa_fcs_mod_s {
43#define BFA_FCS_MODULE(_mod) { _mod ## _modinit, _mod ## _modexit } 44#define BFA_FCS_MODULE(_mod) { _mod ## _modinit, _mod ## _modexit }
44 45
45static struct bfa_fcs_mod_s fcs_modules[] = { 46static struct bfa_fcs_mod_s fcs_modules[] = {
46 BFA_FCS_MODULE(bfa_fcs_pport), 47 { bfa_fcs_pport_attach, NULL, NULL },
47 BFA_FCS_MODULE(bfa_fcs_uf), 48 { bfa_fcs_uf_attach, NULL, NULL },
48 BFA_FCS_MODULE(bfa_fcs_fabric), 49 { bfa_fcs_fabric_attach, bfa_fcs_fabric_modinit,
49 BFA_FCS_MODULE(bfa_fcs_vport), 50 bfa_fcs_fabric_modexit },
50 BFA_FCS_MODULE(bfa_fcs_rport),
51 BFA_FCS_MODULE(bfa_fcs_fcpim),
52}; 51};
53 52
54/** 53/**
@@ -71,16 +70,10 @@ bfa_fcs_exit_comp(void *fcs_cbarg)
71 */ 70 */
72 71
73/** 72/**
74 * FCS instance initialization. 73 * fcs attach -- called once to initialize data structures at driver attach time
75 *
76 * param[in] fcs FCS instance
77 * param[in] bfa BFA instance
78 * param[in] bfad BFA driver instance
79 *
80 * return None
81 */ 74 */
82void 75void
83bfa_fcs_init(struct bfa_fcs_s *fcs, struct bfa_s *bfa, struct bfad_s *bfad, 76bfa_fcs_attach(struct bfa_fcs_s *fcs, struct bfa_s *bfa, struct bfad_s *bfad,
84 bfa_boolean_t min_cfg) 77 bfa_boolean_t min_cfg)
85{ 78{
86 int i; 79 int i;
@@ -95,7 +88,24 @@ bfa_fcs_init(struct bfa_fcs_s *fcs, struct bfa_s *bfa, struct bfad_s *bfad,
95 88
96 for (i = 0; i < sizeof(fcs_modules) / sizeof(fcs_modules[0]); i++) { 89 for (i = 0; i < sizeof(fcs_modules) / sizeof(fcs_modules[0]); i++) {
97 mod = &fcs_modules[i]; 90 mod = &fcs_modules[i];
98 mod->modinit(fcs); 91 if (mod->attach)
92 mod->attach(fcs);
93 }
94}
95
96/**
97 * fcs initialization, called once after bfa initialization is complete
98 */
99void
100bfa_fcs_init(struct bfa_fcs_s *fcs)
101{
102 int i;
103 struct bfa_fcs_mod_s *mod;
104
105 for (i = 0; i < sizeof(fcs_modules) / sizeof(fcs_modules[0]); i++) {
106 mod = &fcs_modules[i];
107 if (mod->modinit)
108 mod->modinit(fcs);
99 } 109 }
100} 110}
101 111
@@ -127,6 +137,23 @@ bfa_fcs_driver_info_init(struct bfa_fcs_s *fcs,
127} 137}
128 138
129/** 139/**
140 * @brief
141 * FCS FDMI Driver Parameter Initialization
142 *
143 * @param[in] fcs FCS instance
144 * @param[in] fdmi_enable TRUE/FALSE
145 *
146 * @return None
147 */
148void
149bfa_fcs_set_fdmi_param(struct bfa_fcs_s *fcs, bfa_boolean_t fdmi_enable)
150{
151
152 fcs->fdmi_enabled = fdmi_enable;
153
154}
155
156/**
130 * FCS instance cleanup and exit. 157 * FCS instance cleanup and exit.
131 * 158 *
132 * param[in] fcs FCS instance 159 * param[in] fcs FCS instance
@@ -143,10 +170,12 @@ bfa_fcs_exit(struct bfa_fcs_s *fcs)
143 nmods = sizeof(fcs_modules) / sizeof(fcs_modules[0]); 170 nmods = sizeof(fcs_modules) / sizeof(fcs_modules[0]);
144 171
145 for (i = 0; i < nmods; i++) { 172 for (i = 0; i < nmods; i++) {
146 bfa_wc_up(&fcs->wc);
147 173
148 mod = &fcs_modules[i]; 174 mod = &fcs_modules[i];
149 mod->modexit(fcs); 175 if (mod->modexit) {
176 bfa_wc_up(&fcs->wc);
177 mod->modexit(fcs);
178 }
150 } 179 }
151 180
152 bfa_wc_wait(&fcs->wc); 181 bfa_wc_wait(&fcs->wc);
diff --git a/drivers/scsi/bfa/bfa_fcs_lport.c b/drivers/scsi/bfa/bfa_fcs_lport.c
index c7ab257f10a7..7c1251c682d8 100644
--- a/drivers/scsi/bfa/bfa_fcs_lport.c
+++ b/drivers/scsi/bfa/bfa_fcs_lport.c
@@ -114,7 +114,7 @@ bfa_fcs_port_sm_uninit(struct bfa_fcs_port_s *port,
114 break; 114 break;
115 115
116 default: 116 default:
117 bfa_assert(0); 117 bfa_sm_fault(port->fcs, event);
118 } 118 }
119} 119}
120 120
@@ -136,7 +136,7 @@ bfa_fcs_port_sm_init(struct bfa_fcs_port_s *port, enum bfa_fcs_port_event event)
136 break; 136 break;
137 137
138 default: 138 default:
139 bfa_assert(0); 139 bfa_sm_fault(port->fcs, event);
140 } 140 }
141} 141}
142 142
@@ -176,7 +176,7 @@ bfa_fcs_port_sm_online(struct bfa_fcs_port_s *port,
176 break; 176 break;
177 177
178 default: 178 default:
179 bfa_assert(0); 179 bfa_sm_fault(port->fcs, event);
180 } 180 }
181} 181}
182 182
@@ -214,7 +214,7 @@ bfa_fcs_port_sm_offline(struct bfa_fcs_port_s *port,
214 break; 214 break;
215 215
216 default: 216 default:
217 bfa_assert(0); 217 bfa_sm_fault(port->fcs, event);
218 } 218 }
219} 219}
220 220
@@ -234,7 +234,7 @@ bfa_fcs_port_sm_deleting(struct bfa_fcs_port_s *port,
234 break; 234 break;
235 235
236 default: 236 default:
237 bfa_assert(0); 237 bfa_sm_fault(port->fcs, event);
238 } 238 }
239} 239}
240 240
@@ -263,30 +263,8 @@ bfa_fcs_port_aen_post(struct bfa_fcs_port_s *port,
263 263
264 bfa_assert(role <= BFA_PORT_ROLE_FCP_MAX); 264 bfa_assert(role <= BFA_PORT_ROLE_FCP_MAX);
265 265
266 switch (event) { 266 bfa_log(logmod, BFA_LOG_CREATE_ID(BFA_AEN_CAT_LPORT, event), lpwwn_ptr,
267 case BFA_LPORT_AEN_ONLINE: 267 role_str[role/2]);
268 bfa_log(logmod, BFA_AEN_LPORT_ONLINE, lpwwn_ptr,
269 role_str[role / 2]);
270 break;
271 case BFA_LPORT_AEN_OFFLINE:
272 bfa_log(logmod, BFA_AEN_LPORT_OFFLINE, lpwwn_ptr,
273 role_str[role / 2]);
274 break;
275 case BFA_LPORT_AEN_NEW:
276 bfa_log(logmod, BFA_AEN_LPORT_NEW, lpwwn_ptr,
277 role_str[role / 2]);
278 break;
279 case BFA_LPORT_AEN_DELETE:
280 bfa_log(logmod, BFA_AEN_LPORT_DELETE, lpwwn_ptr,
281 role_str[role / 2]);
282 break;
283 case BFA_LPORT_AEN_DISCONNECT:
284 bfa_log(logmod, BFA_AEN_LPORT_DISCONNECT, lpwwn_ptr,
285 role_str[role / 2]);
286 break;
287 default:
288 break;
289 }
290 268
291 aen_data.lport.vf_id = port->fabric->vf_id; 269 aen_data.lport.vf_id = port->fabric->vf_id;
292 aen_data.lport.roles = role; 270 aen_data.lport.roles = role;
@@ -873,36 +851,46 @@ bfa_fcs_port_is_online(struct bfa_fcs_port_s *port)
873} 851}
874 852
875/** 853/**
876 * Logical port initialization of base or virtual port. 854 * Attach time initialization of logical ports.
877 * Called by fabric for base port or by vport for virtual ports.
878 */ 855 */
879void 856void
880bfa_fcs_lport_init(struct bfa_fcs_port_s *lport, struct bfa_fcs_s *fcs, 857bfa_fcs_lport_attach(struct bfa_fcs_port_s *lport, struct bfa_fcs_s *fcs,
881 u16 vf_id, struct bfa_port_cfg_s *port_cfg, 858 uint16_t vf_id, struct bfa_fcs_vport_s *vport)
882 struct bfa_fcs_vport_s *vport)
883{ 859{
884 lport->fcs = fcs; 860 lport->fcs = fcs;
885 lport->fabric = bfa_fcs_vf_lookup(fcs, vf_id); 861 lport->fabric = bfa_fcs_vf_lookup(fcs, vf_id);
886 bfa_os_assign(lport->port_cfg, *port_cfg);
887 lport->vport = vport; 862 lport->vport = vport;
888 lport->lp_tag = (vport) ? bfa_lps_get_tag(vport->lps) : 863 lport->lp_tag = (vport) ? bfa_lps_get_tag(vport->lps) :
889 bfa_lps_get_tag(lport->fabric->lps); 864 bfa_lps_get_tag(lport->fabric->lps);
890 865
891 INIT_LIST_HEAD(&lport->rport_q); 866 INIT_LIST_HEAD(&lport->rport_q);
892 lport->num_rports = 0; 867 lport->num_rports = 0;
868}
869
870/**
871 * Logical port initialization of base or virtual port.
872 * Called by fabric for base port or by vport for virtual ports.
873 */
893 874
894 lport->bfad_port = 875void
895 bfa_fcb_port_new(fcs->bfad, lport, lport->port_cfg.roles, 876bfa_fcs_lport_init(struct bfa_fcs_port_s *lport,
877 struct bfa_port_cfg_s *port_cfg)
878{
879 struct bfa_fcs_vport_s *vport = lport->vport;
880
881 bfa_os_assign(lport->port_cfg, *port_cfg);
882
883 lport->bfad_port = bfa_fcb_port_new(lport->fcs->bfad, lport,
884 lport->port_cfg.roles,
896 lport->fabric->vf_drv, 885 lport->fabric->vf_drv,
897 vport ? vport->vport_drv : NULL); 886 vport ? vport->vport_drv : NULL);
887
898 bfa_fcs_port_aen_post(lport, BFA_LPORT_AEN_NEW); 888 bfa_fcs_port_aen_post(lport, BFA_LPORT_AEN_NEW);
899 889
900 bfa_sm_set_state(lport, bfa_fcs_port_sm_uninit); 890 bfa_sm_set_state(lport, bfa_fcs_port_sm_uninit);
901 bfa_sm_send_event(lport, BFA_FCS_PORT_SM_CREATE); 891 bfa_sm_send_event(lport, BFA_FCS_PORT_SM_CREATE);
902} 892}
903 893
904
905
906/** 894/**
907 * fcs_lport_api 895 * fcs_lport_api
908 */ 896 */
@@ -921,13 +909,20 @@ bfa_fcs_port_get_attr(struct bfa_fcs_port_s *port,
921 if (port->fabric) { 909 if (port->fabric) {
922 port_attr->port_type = bfa_fcs_fabric_port_type(port->fabric); 910 port_attr->port_type = bfa_fcs_fabric_port_type(port->fabric);
923 port_attr->loopback = bfa_fcs_fabric_is_loopback(port->fabric); 911 port_attr->loopback = bfa_fcs_fabric_is_loopback(port->fabric);
912 port_attr->authfail =
913 bfa_fcs_fabric_is_auth_failed(port->fabric);
924 port_attr->fabric_name = bfa_fcs_port_get_fabric_name(port); 914 port_attr->fabric_name = bfa_fcs_port_get_fabric_name(port);
925 memcpy(port_attr->fabric_ip_addr, 915 memcpy(port_attr->fabric_ip_addr,
926 bfa_fcs_port_get_fabric_ipaddr(port), 916 bfa_fcs_port_get_fabric_ipaddr(port),
927 BFA_FCS_FABRIC_IPADDR_SZ); 917 BFA_FCS_FABRIC_IPADDR_SZ);
928 918
929 if (port->vport != NULL) 919 if (port->vport != NULL) {
930 port_attr->port_type = BFA_PPORT_TYPE_VPORT; 920 port_attr->port_type = BFA_PPORT_TYPE_VPORT;
921 port_attr->fpma_mac =
922 bfa_lps_get_lp_mac(port->vport->lps);
923 } else
924 port_attr->fpma_mac =
925 bfa_lps_get_lp_mac(port->fabric->lps);
931 926
932 } else { 927 } else {
933 port_attr->port_type = BFA_PPORT_TYPE_UNKNOWN; 928 port_attr->port_type = BFA_PPORT_TYPE_UNKNOWN;
diff --git a/drivers/scsi/bfa/bfa_fcs_port.c b/drivers/scsi/bfa/bfa_fcs_port.c
index 9c4b24e62de1..3c27788cd527 100644
--- a/drivers/scsi/bfa/bfa_fcs_port.c
+++ b/drivers/scsi/bfa/bfa_fcs_port.c
@@ -55,14 +55,7 @@ bfa_fcs_pport_event_handler(void *cbarg, bfa_pport_event_t event)
55} 55}
56 56
57void 57void
58bfa_fcs_pport_modinit(struct bfa_fcs_s *fcs) 58bfa_fcs_pport_attach(struct bfa_fcs_s *fcs)
59{ 59{
60 bfa_pport_event_register(fcs->bfa, bfa_fcs_pport_event_handler, 60 bfa_fcport_event_register(fcs->bfa, bfa_fcs_pport_event_handler, fcs);
61 fcs);
62}
63
64void
65bfa_fcs_pport_modexit(struct bfa_fcs_s *fcs)
66{
67 bfa_fcs_modexit_comp(fcs);
68} 61}
diff --git a/drivers/scsi/bfa/bfa_fcs_uf.c b/drivers/scsi/bfa/bfa_fcs_uf.c
index ad01db6444b2..3d57d48bbae4 100644
--- a/drivers/scsi/bfa/bfa_fcs_uf.c
+++ b/drivers/scsi/bfa/bfa_fcs_uf.c
@@ -93,13 +93,7 @@ bfa_fcs_uf_recv(void *cbarg, struct bfa_uf_s *uf)
93} 93}
94 94
95void 95void
96bfa_fcs_uf_modinit(struct bfa_fcs_s *fcs) 96bfa_fcs_uf_attach(struct bfa_fcs_s *fcs)
97{ 97{
98 bfa_uf_recv_register(fcs->bfa, bfa_fcs_uf_recv, fcs); 98 bfa_uf_recv_register(fcs->bfa, bfa_fcs_uf_recv, fcs);
99} 99}
100
101void
102bfa_fcs_uf_modexit(struct bfa_fcs_s *fcs)
103{
104 bfa_fcs_modexit_comp(fcs);
105}
diff --git a/drivers/scsi/bfa/bfa_hw_cb.c b/drivers/scsi/bfa/bfa_hw_cb.c
index ede1438619e2..871a4e28575c 100644
--- a/drivers/scsi/bfa/bfa_hw_cb.c
+++ b/drivers/scsi/bfa/bfa_hw_cb.c
@@ -53,6 +53,18 @@ bfa_hwcb_reginit(struct bfa_s *bfa)
53} 53}
54 54
55void 55void
56bfa_hwcb_reqq_ack(struct bfa_s *bfa, int reqq)
57{
58}
59
60static void
61bfa_hwcb_reqq_ack_msix(struct bfa_s *bfa, int reqq)
62{
63 bfa_reg_write(bfa->iocfc.bfa_regs.intr_status,
64 __HFN_INT_CPE_Q0 << CPE_Q_NUM(bfa_ioc_pcifn(&bfa->ioc), reqq));
65}
66
67void
56bfa_hwcb_rspq_ack(struct bfa_s *bfa, int rspq) 68bfa_hwcb_rspq_ack(struct bfa_s *bfa, int rspq)
57{ 69{
58} 70}
@@ -136,6 +148,7 @@ bfa_hwcb_msix_uninstall(struct bfa_s *bfa)
136void 148void
137bfa_hwcb_isr_mode_set(struct bfa_s *bfa, bfa_boolean_t msix) 149bfa_hwcb_isr_mode_set(struct bfa_s *bfa, bfa_boolean_t msix)
138{ 150{
151 bfa->iocfc.hwif.hw_reqq_ack = bfa_hwcb_reqq_ack_msix;
139 bfa->iocfc.hwif.hw_rspq_ack = bfa_hwcb_rspq_ack_msix; 152 bfa->iocfc.hwif.hw_rspq_ack = bfa_hwcb_rspq_ack_msix;
140} 153}
141 154
diff --git a/drivers/scsi/bfa/bfa_hw_ct.c b/drivers/scsi/bfa/bfa_hw_ct.c
index 51ae5740e6e9..76ceb9a4bf2f 100644
--- a/drivers/scsi/bfa/bfa_hw_ct.c
+++ b/drivers/scsi/bfa/bfa_hw_ct.c
@@ -85,6 +85,15 @@ bfa_hwct_reginit(struct bfa_s *bfa)
85} 85}
86 86
87void 87void
88bfa_hwct_reqq_ack(struct bfa_s *bfa, int reqq)
89{
90 u32 r32;
91
92 r32 = bfa_reg_read(bfa->iocfc.bfa_regs.cpe_q_ctrl[reqq]);
93 bfa_reg_write(bfa->iocfc.bfa_regs.cpe_q_ctrl[reqq], r32);
94}
95
96void
88bfa_hwct_rspq_ack(struct bfa_s *bfa, int rspq) 97bfa_hwct_rspq_ack(struct bfa_s *bfa, int rspq)
89{ 98{
90 u32 r32; 99 u32 r32;
diff --git a/drivers/scsi/bfa/bfa_intr.c b/drivers/scsi/bfa/bfa_intr.c
index b36540e4ed76..0eba3f930d5b 100644
--- a/drivers/scsi/bfa/bfa_intr.c
+++ b/drivers/scsi/bfa/bfa_intr.c
@@ -15,7 +15,7 @@
15 * General Public License for more details. 15 * General Public License for more details.
16 */ 16 */
17#include <bfa.h> 17#include <bfa.h>
18#include <bfi/bfi_cbreg.h> 18#include <bfi/bfi_ctreg.h>
19#include <bfa_port_priv.h> 19#include <bfa_port_priv.h>
20#include <bfa_intr_priv.h> 20#include <bfa_intr_priv.h>
21#include <cs/bfa_debug.h> 21#include <cs/bfa_debug.h>
@@ -34,6 +34,26 @@ bfa_msix_lpu(struct bfa_s *bfa)
34 bfa_ioc_mbox_isr(&bfa->ioc); 34 bfa_ioc_mbox_isr(&bfa->ioc);
35} 35}
36 36
37static void
38bfa_reqq_resume(struct bfa_s *bfa, int qid)
39{
40 struct list_head *waitq, *qe, *qen;
41 struct bfa_reqq_wait_s *wqe;
42
43 waitq = bfa_reqq(bfa, qid);
44 list_for_each_safe(qe, qen, waitq) {
45 /**
46 * Callback only as long as there is room in request queue
47 */
48 if (bfa_reqq_full(bfa, qid))
49 break;
50
51 list_del(qe);
52 wqe = (struct bfa_reqq_wait_s *) qe;
53 wqe->qresume(wqe->cbarg);
54 }
55}
56
37void 57void
38bfa_msix_all(struct bfa_s *bfa, int vec) 58bfa_msix_all(struct bfa_s *bfa, int vec)
39{ 59{
@@ -96,7 +116,8 @@ bfa_isr_enable(struct bfa_s *bfa)
96 116
97 bfa_msix_install(bfa); 117 bfa_msix_install(bfa);
98 intr_unmask = (__HFN_INT_ERR_EMC | __HFN_INT_ERR_LPU0 | 118 intr_unmask = (__HFN_INT_ERR_EMC | __HFN_INT_ERR_LPU0 |
99 __HFN_INT_ERR_LPU1 | __HFN_INT_ERR_PSS); 119 __HFN_INT_ERR_LPU1 | __HFN_INT_ERR_PSS |
120 __HFN_INT_LL_HALT);
100 121
101 if (pci_func == 0) 122 if (pci_func == 0)
102 intr_unmask |= (__HFN_INT_CPE_Q0 | __HFN_INT_CPE_Q1 | 123 intr_unmask |= (__HFN_INT_CPE_Q0 | __HFN_INT_CPE_Q1 |
@@ -127,23 +148,18 @@ bfa_isr_disable(struct bfa_s *bfa)
127void 148void
128bfa_msix_reqq(struct bfa_s *bfa, int qid) 149bfa_msix_reqq(struct bfa_s *bfa, int qid)
129{ 150{
130 struct list_head *waitq, *qe, *qen; 151 struct list_head *waitq;
131 struct bfa_reqq_wait_s *wqe;
132 152
133 qid &= (BFI_IOC_MAX_CQS - 1); 153 qid &= (BFI_IOC_MAX_CQS - 1);
134 154
135 waitq = bfa_reqq(bfa, qid); 155 bfa->iocfc.hwif.hw_reqq_ack(bfa, qid);
136 list_for_each_safe(qe, qen, waitq) {
137 /**
138 * Callback only as long as there is room in request queue
139 */
140 if (bfa_reqq_full(bfa, qid))
141 break;
142 156
143 list_del(qe); 157 /**
144 wqe = (struct bfa_reqq_wait_s *) qe; 158 * Resume any pending requests in the corresponding reqq.
145 wqe->qresume(wqe->cbarg); 159 */
146 } 160 waitq = bfa_reqq(bfa, qid);
161 if (!list_empty(waitq))
162 bfa_reqq_resume(bfa, qid);
147} 163}
148 164
149void 165void
@@ -157,26 +173,27 @@ bfa_isr_unhandled(struct bfa_s *bfa, struct bfi_msg_s *m)
157} 173}
158 174
159void 175void
160bfa_msix_rspq(struct bfa_s *bfa, int rsp_qid) 176bfa_msix_rspq(struct bfa_s *bfa, int qid)
161{ 177{
162 struct bfi_msg_s *m; 178 struct bfi_msg_s *m;
163 u32 pi, ci; 179 u32 pi, ci;
180 struct list_head *waitq;
164 181
165 bfa_trc_fp(bfa, rsp_qid); 182 bfa_trc_fp(bfa, qid);
166 183
167 rsp_qid &= (BFI_IOC_MAX_CQS - 1); 184 qid &= (BFI_IOC_MAX_CQS - 1);
168 185
169 bfa->iocfc.hwif.hw_rspq_ack(bfa, rsp_qid); 186 bfa->iocfc.hwif.hw_rspq_ack(bfa, qid);
170 187
171 ci = bfa_rspq_ci(bfa, rsp_qid); 188 ci = bfa_rspq_ci(bfa, qid);
172 pi = bfa_rspq_pi(bfa, rsp_qid); 189 pi = bfa_rspq_pi(bfa, qid);
173 190
174 bfa_trc_fp(bfa, ci); 191 bfa_trc_fp(bfa, ci);
175 bfa_trc_fp(bfa, pi); 192 bfa_trc_fp(bfa, pi);
176 193
177 if (bfa->rme_process) { 194 if (bfa->rme_process) {
178 while (ci != pi) { 195 while (ci != pi) {
179 m = bfa_rspq_elem(bfa, rsp_qid, ci); 196 m = bfa_rspq_elem(bfa, qid, ci);
180 bfa_assert_fp(m->mhdr.msg_class < BFI_MC_MAX); 197 bfa_assert_fp(m->mhdr.msg_class < BFI_MC_MAX);
181 198
182 bfa_isrs[m->mhdr.msg_class] (bfa, m); 199 bfa_isrs[m->mhdr.msg_class] (bfa, m);
@@ -188,25 +205,59 @@ bfa_msix_rspq(struct bfa_s *bfa, int rsp_qid)
188 /** 205 /**
189 * update CI 206 * update CI
190 */ 207 */
191 bfa_rspq_ci(bfa, rsp_qid) = pi; 208 bfa_rspq_ci(bfa, qid) = pi;
192 bfa_reg_write(bfa->iocfc.bfa_regs.rme_q_ci[rsp_qid], pi); 209 bfa_reg_write(bfa->iocfc.bfa_regs.rme_q_ci[qid], pi);
193 bfa_os_mmiowb(); 210 bfa_os_mmiowb();
211
212 /**
213 * Resume any pending requests in the corresponding reqq.
214 */
215 waitq = bfa_reqq(bfa, qid);
216 if (!list_empty(waitq))
217 bfa_reqq_resume(bfa, qid);
194} 218}
195 219
196void 220void
197bfa_msix_lpu_err(struct bfa_s *bfa, int vec) 221bfa_msix_lpu_err(struct bfa_s *bfa, int vec)
198{ 222{
199 u32 intr; 223 u32 intr, curr_value;
200 224
201 intr = bfa_reg_read(bfa->iocfc.bfa_regs.intr_status); 225 intr = bfa_reg_read(bfa->iocfc.bfa_regs.intr_status);
202 226
203 if (intr & (__HFN_INT_MBOX_LPU0 | __HFN_INT_MBOX_LPU1)) 227 if (intr & (__HFN_INT_MBOX_LPU0 | __HFN_INT_MBOX_LPU1))
204 bfa_msix_lpu(bfa); 228 bfa_msix_lpu(bfa);
205 229
206 if (intr & (__HFN_INT_ERR_EMC | 230 intr &= (__HFN_INT_ERR_EMC | __HFN_INT_ERR_LPU0 |
207 __HFN_INT_ERR_LPU0 | __HFN_INT_ERR_LPU1 | 231 __HFN_INT_ERR_LPU1 | __HFN_INT_ERR_PSS | __HFN_INT_LL_HALT);
208 __HFN_INT_ERR_PSS)) 232
233 if (intr) {
234 if (intr & __HFN_INT_LL_HALT) {
235 /**
236 * If LL_HALT bit is set then FW Init Halt LL Port
237 * Register needs to be cleared as well so Interrupt
238 * Status Register will be cleared.
239 */
240 curr_value = bfa_reg_read(bfa->ioc.ioc_regs.ll_halt);
241 curr_value &= ~__FW_INIT_HALT_P;
242 bfa_reg_write(bfa->ioc.ioc_regs.ll_halt, curr_value);
243 }
244
245 if (intr & __HFN_INT_ERR_PSS) {
246 /**
247 * ERR_PSS bit needs to be cleared as well in case
248 * interrups are shared so driver's interrupt handler is
249 * still called eventhough it is already masked out.
250 */
251 curr_value = bfa_reg_read(
252 bfa->ioc.ioc_regs.pss_err_status_reg);
253 curr_value &= __PSS_ERR_STATUS_SET;
254 bfa_reg_write(bfa->ioc.ioc_regs.pss_err_status_reg,
255 curr_value);
256 }
257
258 bfa_reg_write(bfa->iocfc.bfa_regs.intr_status, intr);
209 bfa_msix_errint(bfa, intr); 259 bfa_msix_errint(bfa, intr);
260 }
210} 261}
211 262
212void 263void
diff --git a/drivers/scsi/bfa/bfa_ioc.c b/drivers/scsi/bfa/bfa_ioc.c
index 397d7e9eade5..e038bc9769f6 100644
--- a/drivers/scsi/bfa/bfa_ioc.c
+++ b/drivers/scsi/bfa/bfa_ioc.c
@@ -18,7 +18,7 @@
18#include <bfa.h> 18#include <bfa.h>
19#include <bfa_ioc.h> 19#include <bfa_ioc.h>
20#include <bfa_fwimg_priv.h> 20#include <bfa_fwimg_priv.h>
21#include <bfa_trcmod_priv.h> 21#include <cna/bfa_cna_trcmod.h>
22#include <cs/bfa_debug.h> 22#include <cs/bfa_debug.h>
23#include <bfi/bfi_ioc.h> 23#include <bfi/bfi_ioc.h>
24#include <bfi/bfi_ctreg.h> 24#include <bfi/bfi_ctreg.h>
@@ -27,18 +27,17 @@
27#include <log/bfa_log_hal.h> 27#include <log/bfa_log_hal.h>
28#include <defs/bfa_defs_pci.h> 28#include <defs/bfa_defs_pci.h>
29 29
30BFA_TRC_FILE(HAL, IOC); 30BFA_TRC_FILE(CNA, IOC);
31 31
32/** 32/**
33 * IOC local definitions 33 * IOC local definitions
34 */ 34 */
35#define BFA_IOC_TOV 2000 /* msecs */ 35#define BFA_IOC_TOV 2000 /* msecs */
36#define BFA_IOC_HB_TOV 1000 /* msecs */ 36#define BFA_IOC_HWSEM_TOV 500 /* msecs */
37#define BFA_IOC_HB_FAIL_MAX 4 37#define BFA_IOC_HB_TOV 500 /* msecs */
38#define BFA_IOC_HWINIT_MAX 2 38#define BFA_IOC_HWINIT_MAX 2
39#define BFA_IOC_FWIMG_MINSZ (16 * 1024) 39#define BFA_IOC_FWIMG_MINSZ (16 * 1024)
40#define BFA_IOC_TOV_RECOVER (BFA_IOC_HB_FAIL_MAX * BFA_IOC_HB_TOV \ 40#define BFA_IOC_TOV_RECOVER BFA_IOC_HB_TOV
41 + BFA_IOC_TOV)
42 41
43#define bfa_ioc_timer_start(__ioc) \ 42#define bfa_ioc_timer_start(__ioc) \
44 bfa_timer_begin((__ioc)->timer_mod, &(__ioc)->ioc_timer, \ 43 bfa_timer_begin((__ioc)->timer_mod, &(__ioc)->ioc_timer, \
@@ -51,12 +50,25 @@ BFA_TRC_FILE(HAL, IOC);
51 (sizeof(struct bfa_trc_mod_s) - \ 50 (sizeof(struct bfa_trc_mod_s) - \
52 BFA_TRC_MAX * sizeof(struct bfa_trc_s))) 51 BFA_TRC_MAX * sizeof(struct bfa_trc_s)))
53#define BFA_DBG_FWTRC_OFF(_fn) (BFI_IOC_TRC_OFF + BFA_DBG_FWTRC_LEN * (_fn)) 52#define BFA_DBG_FWTRC_OFF(_fn) (BFI_IOC_TRC_OFF + BFA_DBG_FWTRC_LEN * (_fn))
54#define bfa_ioc_stats(_ioc, _stats) ((_ioc)->stats._stats++)
55 53
56#define BFA_FLASH_CHUNK_NO(off) (off / BFI_FLASH_CHUNK_SZ_WORDS) 54/**
57#define BFA_FLASH_OFFSET_IN_CHUNK(off) (off % BFI_FLASH_CHUNK_SZ_WORDS) 55 * Asic specific macros : see bfa_hw_cb.c and bfa_hw_ct.c for details.
58#define BFA_FLASH_CHUNK_ADDR(chunkno) (chunkno * BFI_FLASH_CHUNK_SZ_WORDS) 56 */
59bfa_boolean_t bfa_auto_recover = BFA_FALSE; 57
58#define bfa_ioc_firmware_lock(__ioc) \
59 ((__ioc)->ioc_hwif->ioc_firmware_lock(__ioc))
60#define bfa_ioc_firmware_unlock(__ioc) \
61 ((__ioc)->ioc_hwif->ioc_firmware_unlock(__ioc))
62#define bfa_ioc_fwimg_get_chunk(__ioc, __off) \
63 ((__ioc)->ioc_hwif->ioc_fwimg_get_chunk(__ioc, __off))
64#define bfa_ioc_fwimg_get_size(__ioc) \
65 ((__ioc)->ioc_hwif->ioc_fwimg_get_size(__ioc))
66#define bfa_ioc_reg_init(__ioc) ((__ioc)->ioc_hwif->ioc_reg_init(__ioc))
67#define bfa_ioc_map_port(__ioc) ((__ioc)->ioc_hwif->ioc_map_port(__ioc))
68#define bfa_ioc_notify_hbfail(__ioc) \
69 ((__ioc)->ioc_hwif->ioc_notify_hbfail(__ioc))
70
71bfa_boolean_t bfa_auto_recover = BFA_TRUE;
60 72
61/* 73/*
62 * forward declarations 74 * forward declarations
@@ -64,7 +76,6 @@ bfa_boolean_t bfa_auto_recover = BFA_FALSE;
64static void bfa_ioc_aen_post(struct bfa_ioc_s *bfa, 76static void bfa_ioc_aen_post(struct bfa_ioc_s *bfa,
65 enum bfa_ioc_aen_event event); 77 enum bfa_ioc_aen_event event);
66static void bfa_ioc_hw_sem_get(struct bfa_ioc_s *ioc); 78static void bfa_ioc_hw_sem_get(struct bfa_ioc_s *ioc);
67static void bfa_ioc_hw_sem_release(struct bfa_ioc_s *ioc);
68static void bfa_ioc_hw_sem_get_cancel(struct bfa_ioc_s *ioc); 79static void bfa_ioc_hw_sem_get_cancel(struct bfa_ioc_s *ioc);
69static void bfa_ioc_hwinit(struct bfa_ioc_s *ioc, bfa_boolean_t force); 80static void bfa_ioc_hwinit(struct bfa_ioc_s *ioc, bfa_boolean_t force);
70static void bfa_ioc_timeout(void *ioc); 81static void bfa_ioc_timeout(void *ioc);
@@ -77,8 +88,6 @@ static void bfa_ioc_reset(struct bfa_ioc_s *ioc, bfa_boolean_t force);
77static void bfa_ioc_mbox_poll(struct bfa_ioc_s *ioc); 88static void bfa_ioc_mbox_poll(struct bfa_ioc_s *ioc);
78static void bfa_ioc_mbox_hbfail(struct bfa_ioc_s *ioc); 89static void bfa_ioc_mbox_hbfail(struct bfa_ioc_s *ioc);
79static void bfa_ioc_recover(struct bfa_ioc_s *ioc); 90static void bfa_ioc_recover(struct bfa_ioc_s *ioc);
80static bfa_boolean_t bfa_ioc_firmware_lock(struct bfa_ioc_s *ioc);
81static void bfa_ioc_firmware_unlock(struct bfa_ioc_s *ioc);
82static void bfa_ioc_disable_comp(struct bfa_ioc_s *ioc); 91static void bfa_ioc_disable_comp(struct bfa_ioc_s *ioc);
83static void bfa_ioc_lpu_stop(struct bfa_ioc_s *ioc); 92static void bfa_ioc_lpu_stop(struct bfa_ioc_s *ioc);
84 93
@@ -508,14 +517,19 @@ bfa_ioc_sm_disabling(struct bfa_ioc_s *ioc, enum ioc_event event)
508 bfa_trc(ioc, event); 517 bfa_trc(ioc, event);
509 518
510 switch (event) { 519 switch (event) {
511 case IOC_E_HWERROR:
512 case IOC_E_FWRSP_DISABLE: 520 case IOC_E_FWRSP_DISABLE:
513 bfa_ioc_timer_stop(ioc); 521 bfa_ioc_timer_stop(ioc);
522 bfa_fsm_set_state(ioc, bfa_ioc_sm_disabled);
523 break;
524
525 case IOC_E_HWERROR:
526 bfa_ioc_timer_stop(ioc);
514 /* 527 /*
515 * !!! fall through !!! 528 * !!! fall through !!!
516 */ 529 */
517 530
518 case IOC_E_TIMEOUT: 531 case IOC_E_TIMEOUT:
532 bfa_reg_write(ioc->ioc_regs.ioc_fwstate, BFI_IOC_FAIL);
519 bfa_fsm_set_state(ioc, bfa_ioc_sm_disabled); 533 bfa_fsm_set_state(ioc, bfa_ioc_sm_disabled);
520 break; 534 break;
521 535
@@ -608,15 +622,12 @@ bfa_ioc_sm_hbfail_entry(struct bfa_ioc_s *ioc)
608 * Mark IOC as failed in hardware and stop firmware. 622 * Mark IOC as failed in hardware and stop firmware.
609 */ 623 */
610 bfa_ioc_lpu_stop(ioc); 624 bfa_ioc_lpu_stop(ioc);
611 bfa_reg_write(ioc->ioc_regs.ioc_fwstate, BFI_IOC_HBFAIL); 625 bfa_reg_write(ioc->ioc_regs.ioc_fwstate, BFI_IOC_FAIL);
612 626
613 if (ioc->pcidev.device_id == BFA_PCI_DEVICE_ID_CT) { 627 /**
614 bfa_reg_write(ioc->ioc_regs.ll_halt, __FW_INIT_HALT_P); 628 * Notify other functions on HB failure.
615 /* 629 */
616 * Wait for halt to take effect 630 bfa_ioc_notify_hbfail(ioc);
617 */
618 bfa_reg_read(ioc->ioc_regs.ll_halt);
619 }
620 631
621 /** 632 /**
622 * Notify driver and common modules registered for notification. 633 * Notify driver and common modules registered for notification.
@@ -672,6 +683,12 @@ bfa_ioc_sm_hbfail(struct bfa_ioc_s *ioc, enum ioc_event event)
672 */ 683 */
673 break; 684 break;
674 685
686 case IOC_E_HWERROR:
687 /*
688 * HB failure notification, ignore.
689 */
690 break;
691
675 default: 692 default:
676 bfa_sm_fault(ioc, event); 693 bfa_sm_fault(ioc, event);
677 } 694 }
@@ -700,7 +717,7 @@ bfa_ioc_disable_comp(struct bfa_ioc_s *ioc)
700 } 717 }
701} 718}
702 719
703static void 720void
704bfa_ioc_sem_timeout(void *ioc_arg) 721bfa_ioc_sem_timeout(void *ioc_arg)
705{ 722{
706 struct bfa_ioc_s *ioc = (struct bfa_ioc_s *)ioc_arg; 723 struct bfa_ioc_s *ioc = (struct bfa_ioc_s *)ioc_arg;
@@ -708,26 +725,32 @@ bfa_ioc_sem_timeout(void *ioc_arg)
708 bfa_ioc_hw_sem_get(ioc); 725 bfa_ioc_hw_sem_get(ioc);
709} 726}
710 727
711static void 728bfa_boolean_t
712bfa_ioc_usage_sem_get(struct bfa_ioc_s *ioc) 729bfa_ioc_sem_get(bfa_os_addr_t sem_reg)
713{ 730{
714 u32 r32; 731 u32 r32;
715 int cnt = 0; 732 int cnt = 0;
716#define BFA_SEM_SPINCNT 1000 733#define BFA_SEM_SPINCNT 3000
717 734
718 do { 735 r32 = bfa_reg_read(sem_reg);
719 r32 = bfa_reg_read(ioc->ioc_regs.ioc_usage_sem_reg); 736
737 while (r32 && (cnt < BFA_SEM_SPINCNT)) {
720 cnt++; 738 cnt++;
721 if (cnt > BFA_SEM_SPINCNT) 739 bfa_os_udelay(2);
722 break; 740 r32 = bfa_reg_read(sem_reg);
723 } while (r32 != 0); 741 }
742
743 if (r32 == 0)
744 return BFA_TRUE;
745
724 bfa_assert(cnt < BFA_SEM_SPINCNT); 746 bfa_assert(cnt < BFA_SEM_SPINCNT);
747 return BFA_FALSE;
725} 748}
726 749
727static void 750void
728bfa_ioc_usage_sem_release(struct bfa_ioc_s *ioc) 751bfa_ioc_sem_release(bfa_os_addr_t sem_reg)
729{ 752{
730 bfa_reg_write(ioc->ioc_regs.ioc_usage_sem_reg, 1); 753 bfa_reg_write(sem_reg, 1);
731} 754}
732 755
733static void 756static void
@@ -737,7 +760,7 @@ bfa_ioc_hw_sem_get(struct bfa_ioc_s *ioc)
737 760
738 /** 761 /**
739 * First read to the semaphore register will return 0, subsequent reads 762 * First read to the semaphore register will return 0, subsequent reads
740 * will return 1. Semaphore is released by writing 0 to the register 763 * will return 1. Semaphore is released by writing 1 to the register
741 */ 764 */
742 r32 = bfa_reg_read(ioc->ioc_regs.ioc_sem_reg); 765 r32 = bfa_reg_read(ioc->ioc_regs.ioc_sem_reg);
743 if (r32 == 0) { 766 if (r32 == 0) {
@@ -746,10 +769,10 @@ bfa_ioc_hw_sem_get(struct bfa_ioc_s *ioc)
746 } 769 }
747 770
748 bfa_timer_begin(ioc->timer_mod, &ioc->sem_timer, bfa_ioc_sem_timeout, 771 bfa_timer_begin(ioc->timer_mod, &ioc->sem_timer, bfa_ioc_sem_timeout,
749 ioc, BFA_IOC_TOV); 772 ioc, BFA_IOC_HWSEM_TOV);
750} 773}
751 774
752static void 775void
753bfa_ioc_hw_sem_release(struct bfa_ioc_s *ioc) 776bfa_ioc_hw_sem_release(struct bfa_ioc_s *ioc)
754{ 777{
755 bfa_reg_write(ioc->ioc_regs.ioc_sem_reg, 1); 778 bfa_reg_write(ioc->ioc_regs.ioc_sem_reg, 1);
@@ -828,7 +851,7 @@ bfa_ioc_lpu_stop(struct bfa_ioc_s *ioc)
828/** 851/**
829 * Get driver and firmware versions. 852 * Get driver and firmware versions.
830 */ 853 */
831static void 854void
832bfa_ioc_fwver_get(struct bfa_ioc_s *ioc, struct bfi_ioc_image_hdr_s *fwhdr) 855bfa_ioc_fwver_get(struct bfa_ioc_s *ioc, struct bfi_ioc_image_hdr_s *fwhdr)
833{ 856{
834 u32 pgnum, pgoff; 857 u32 pgnum, pgoff;
@@ -847,24 +870,10 @@ bfa_ioc_fwver_get(struct bfa_ioc_s *ioc, struct bfi_ioc_image_hdr_s *fwhdr)
847 } 870 }
848} 871}
849 872
850static u32 *
851bfa_ioc_fwimg_get_chunk(struct bfa_ioc_s *ioc, u32 off)
852{
853 if (ioc->ctdev)
854 return bfi_image_ct_get_chunk(off);
855 return bfi_image_cb_get_chunk(off);
856}
857
858static u32
859bfa_ioc_fwimg_get_size(struct bfa_ioc_s *ioc)
860{
861return (ioc->ctdev) ? bfi_image_ct_size : bfi_image_cb_size;
862}
863
864/** 873/**
865 * Returns TRUE if same. 874 * Returns TRUE if same.
866 */ 875 */
867static bfa_boolean_t 876bfa_boolean_t
868bfa_ioc_fwver_cmp(struct bfa_ioc_s *ioc, struct bfi_ioc_image_hdr_s *fwhdr) 877bfa_ioc_fwver_cmp(struct bfa_ioc_s *ioc, struct bfi_ioc_image_hdr_s *fwhdr)
869{ 878{
870 struct bfi_ioc_image_hdr_s *drv_fwhdr; 879 struct bfi_ioc_image_hdr_s *drv_fwhdr;
@@ -921,95 +930,6 @@ bfa_ioc_fwver_valid(struct bfa_ioc_s *ioc)
921} 930}
922 931
923/** 932/**
924 * Return true if firmware of current driver matches the running firmware.
925 */
926static bfa_boolean_t
927bfa_ioc_firmware_lock(struct bfa_ioc_s *ioc)
928{
929 enum bfi_ioc_state ioc_fwstate;
930 u32 usecnt;
931 struct bfi_ioc_image_hdr_s fwhdr;
932
933 /**
934 * Firmware match check is relevant only for CNA.
935 */
936 if (!ioc->cna)
937 return BFA_TRUE;
938
939 /**
940 * If bios boot (flash based) -- do not increment usage count
941 */
942 if (bfa_ioc_fwimg_get_size(ioc) < BFA_IOC_FWIMG_MINSZ)
943 return BFA_TRUE;
944
945 bfa_ioc_usage_sem_get(ioc);
946 usecnt = bfa_reg_read(ioc->ioc_regs.ioc_usage_reg);
947
948 /**
949 * If usage count is 0, always return TRUE.
950 */
951 if (usecnt == 0) {
952 bfa_reg_write(ioc->ioc_regs.ioc_usage_reg, 1);
953 bfa_ioc_usage_sem_release(ioc);
954 bfa_trc(ioc, usecnt);
955 return BFA_TRUE;
956 }
957
958 ioc_fwstate = bfa_reg_read(ioc->ioc_regs.ioc_fwstate);
959 bfa_trc(ioc, ioc_fwstate);
960
961 /**
962 * Use count cannot be non-zero and chip in uninitialized state.
963 */
964 bfa_assert(ioc_fwstate != BFI_IOC_UNINIT);
965
966 /**
967 * Check if another driver with a different firmware is active
968 */
969 bfa_ioc_fwver_get(ioc, &fwhdr);
970 if (!bfa_ioc_fwver_cmp(ioc, &fwhdr)) {
971 bfa_ioc_usage_sem_release(ioc);
972 bfa_trc(ioc, usecnt);
973 return BFA_FALSE;
974 }
975
976 /**
977 * Same firmware version. Increment the reference count.
978 */
979 usecnt++;
980 bfa_reg_write(ioc->ioc_regs.ioc_usage_reg, usecnt);
981 bfa_ioc_usage_sem_release(ioc);
982 bfa_trc(ioc, usecnt);
983 return BFA_TRUE;
984}
985
986static void
987bfa_ioc_firmware_unlock(struct bfa_ioc_s *ioc)
988{
989 u32 usecnt;
990
991 /**
992 * Firmware lock is relevant only for CNA.
993 * If bios boot (flash based) -- do not decrement usage count
994 */
995 if (!ioc->cna || (bfa_ioc_fwimg_get_size(ioc) < BFA_IOC_FWIMG_MINSZ))
996 return;
997
998 /**
999 * decrement usage count
1000 */
1001 bfa_ioc_usage_sem_get(ioc);
1002 usecnt = bfa_reg_read(ioc->ioc_regs.ioc_usage_reg);
1003 bfa_assert(usecnt > 0);
1004
1005 usecnt--;
1006 bfa_reg_write(ioc->ioc_regs.ioc_usage_reg, usecnt);
1007 bfa_trc(ioc, usecnt);
1008
1009 bfa_ioc_usage_sem_release(ioc);
1010}
1011
1012/**
1013 * Conditionally flush any pending message from firmware at start. 933 * Conditionally flush any pending message from firmware at start.
1014 */ 934 */
1015static void 935static void
@@ -1152,33 +1072,27 @@ bfa_ioc_send_getattr(struct bfa_ioc_s *ioc)
1152static void 1072static void
1153bfa_ioc_hb_check(void *cbarg) 1073bfa_ioc_hb_check(void *cbarg)
1154{ 1074{
1155 struct bfa_ioc_s *ioc = cbarg; 1075 struct bfa_ioc_s *ioc = cbarg;
1156 u32 hb_count; 1076 u32 hb_count;
1157 1077
1158 hb_count = bfa_reg_read(ioc->ioc_regs.heartbeat); 1078 hb_count = bfa_reg_read(ioc->ioc_regs.heartbeat);
1159 if (ioc->hb_count == hb_count) { 1079 if (ioc->hb_count == hb_count) {
1160 ioc->hb_fail++; 1080 bfa_log(ioc->logm, BFA_LOG_HAL_HEARTBEAT_FAILURE,
1161 } else { 1081 hb_count);
1162 ioc->hb_count = hb_count;
1163 ioc->hb_fail = 0;
1164 }
1165
1166 if (ioc->hb_fail >= BFA_IOC_HB_FAIL_MAX) {
1167 bfa_log(ioc->logm, BFA_LOG_HAL_HEARTBEAT_FAILURE, hb_count);
1168 ioc->hb_fail = 0;
1169 bfa_ioc_recover(ioc); 1082 bfa_ioc_recover(ioc);
1170 return; 1083 return;
1084 } else {
1085 ioc->hb_count = hb_count;
1171 } 1086 }
1172 1087
1173 bfa_ioc_mbox_poll(ioc); 1088 bfa_ioc_mbox_poll(ioc);
1174 bfa_timer_begin(ioc->timer_mod, &ioc->ioc_timer, bfa_ioc_hb_check, ioc, 1089 bfa_timer_begin(ioc->timer_mod, &ioc->ioc_timer, bfa_ioc_hb_check,
1175 BFA_IOC_HB_TOV); 1090 ioc, BFA_IOC_HB_TOV);
1176} 1091}
1177 1092
1178static void 1093static void
1179bfa_ioc_hb_monitor(struct bfa_ioc_s *ioc) 1094bfa_ioc_hb_monitor(struct bfa_ioc_s *ioc)
1180{ 1095{
1181 ioc->hb_fail = 0;
1182 ioc->hb_count = bfa_reg_read(ioc->ioc_regs.heartbeat); 1096 ioc->hb_count = bfa_reg_read(ioc->ioc_regs.heartbeat);
1183 bfa_timer_begin(ioc->timer_mod, &ioc->ioc_timer, bfa_ioc_hb_check, ioc, 1097 bfa_timer_begin(ioc->timer_mod, &ioc->ioc_timer, bfa_ioc_hb_check, ioc,
1184 BFA_IOC_HB_TOV); 1098 BFA_IOC_HB_TOV);
@@ -1191,112 +1105,6 @@ bfa_ioc_hb_stop(struct bfa_ioc_s *ioc)
1191} 1105}
1192 1106
1193/** 1107/**
1194 * Host to LPU mailbox message addresses
1195 */
1196static struct {
1197 u32 hfn_mbox, lpu_mbox, hfn_pgn;
1198} iocreg_fnreg[] = {
1199 {
1200 HOSTFN0_LPU_MBOX0_0, LPU_HOSTFN0_MBOX0_0, HOST_PAGE_NUM_FN0}, {
1201 HOSTFN1_LPU_MBOX0_8, LPU_HOSTFN1_MBOX0_8, HOST_PAGE_NUM_FN1}, {
1202 HOSTFN2_LPU_MBOX0_0, LPU_HOSTFN2_MBOX0_0, HOST_PAGE_NUM_FN2}, {
1203 HOSTFN3_LPU_MBOX0_8, LPU_HOSTFN3_MBOX0_8, HOST_PAGE_NUM_FN3}
1204};
1205
1206/**
1207 * Host <-> LPU mailbox command/status registers - port 0
1208 */
1209static struct {
1210 u32 hfn, lpu;
1211} iocreg_mbcmd_p0[] = {
1212 {
1213 HOSTFN0_LPU0_MBOX0_CMD_STAT, LPU0_HOSTFN0_MBOX0_CMD_STAT}, {
1214 HOSTFN1_LPU0_MBOX0_CMD_STAT, LPU0_HOSTFN1_MBOX0_CMD_STAT}, {
1215 HOSTFN2_LPU0_MBOX0_CMD_STAT, LPU0_HOSTFN2_MBOX0_CMD_STAT}, {
1216 HOSTFN3_LPU0_MBOX0_CMD_STAT, LPU0_HOSTFN3_MBOX0_CMD_STAT}
1217};
1218
1219/**
1220 * Host <-> LPU mailbox command/status registers - port 1
1221 */
1222static struct {
1223 u32 hfn, lpu;
1224} iocreg_mbcmd_p1[] = {
1225 {
1226 HOSTFN0_LPU1_MBOX0_CMD_STAT, LPU1_HOSTFN0_MBOX0_CMD_STAT}, {
1227 HOSTFN1_LPU1_MBOX0_CMD_STAT, LPU1_HOSTFN1_MBOX0_CMD_STAT}, {
1228 HOSTFN2_LPU1_MBOX0_CMD_STAT, LPU1_HOSTFN2_MBOX0_CMD_STAT}, {
1229 HOSTFN3_LPU1_MBOX0_CMD_STAT, LPU1_HOSTFN3_MBOX0_CMD_STAT}
1230};
1231
1232/**
1233 * Shared IRQ handling in INTX mode
1234 */
1235static struct {
1236 u32 isr, msk;
1237} iocreg_shirq_next[] = {
1238 {
1239 HOSTFN1_INT_STATUS, HOSTFN1_INT_MSK}, {
1240 HOSTFN2_INT_STATUS, HOSTFN2_INT_MSK}, {
1241 HOSTFN3_INT_STATUS, HOSTFN3_INT_MSK}, {
1242HOSTFN0_INT_STATUS, HOSTFN0_INT_MSK},};
1243
1244static void
1245bfa_ioc_reg_init(struct bfa_ioc_s *ioc)
1246{
1247 bfa_os_addr_t rb;
1248 int pcifn = bfa_ioc_pcifn(ioc);
1249
1250 rb = bfa_ioc_bar0(ioc);
1251
1252 ioc->ioc_regs.hfn_mbox = rb + iocreg_fnreg[pcifn].hfn_mbox;
1253 ioc->ioc_regs.lpu_mbox = rb + iocreg_fnreg[pcifn].lpu_mbox;
1254 ioc->ioc_regs.host_page_num_fn = rb + iocreg_fnreg[pcifn].hfn_pgn;
1255
1256 if (ioc->port_id == 0) {
1257 ioc->ioc_regs.heartbeat = rb + BFA_IOC0_HBEAT_REG;
1258 ioc->ioc_regs.ioc_fwstate = rb + BFA_IOC0_STATE_REG;
1259 ioc->ioc_regs.hfn_mbox_cmd = rb + iocreg_mbcmd_p0[pcifn].hfn;
1260 ioc->ioc_regs.lpu_mbox_cmd = rb + iocreg_mbcmd_p0[pcifn].lpu;
1261 ioc->ioc_regs.ll_halt = rb + FW_INIT_HALT_P0;
1262 } else {
1263 ioc->ioc_regs.heartbeat = (rb + BFA_IOC1_HBEAT_REG);
1264 ioc->ioc_regs.ioc_fwstate = (rb + BFA_IOC1_STATE_REG);
1265 ioc->ioc_regs.hfn_mbox_cmd = rb + iocreg_mbcmd_p1[pcifn].hfn;
1266 ioc->ioc_regs.lpu_mbox_cmd = rb + iocreg_mbcmd_p1[pcifn].lpu;
1267 ioc->ioc_regs.ll_halt = rb + FW_INIT_HALT_P1;
1268 }
1269
1270 /**
1271 * Shared IRQ handling in INTX mode
1272 */
1273 ioc->ioc_regs.shirq_isr_next = rb + iocreg_shirq_next[pcifn].isr;
1274 ioc->ioc_regs.shirq_msk_next = rb + iocreg_shirq_next[pcifn].msk;
1275
1276 /*
1277 * PSS control registers
1278 */
1279 ioc->ioc_regs.pss_ctl_reg = (rb + PSS_CTL_REG);
1280 ioc->ioc_regs.app_pll_fast_ctl_reg = (rb + APP_PLL_425_CTL_REG);
1281 ioc->ioc_regs.app_pll_slow_ctl_reg = (rb + APP_PLL_312_CTL_REG);
1282
1283 /*
1284 * IOC semaphore registers and serialization
1285 */
1286 ioc->ioc_regs.ioc_sem_reg = (rb + HOST_SEM0_REG);
1287 ioc->ioc_regs.ioc_usage_sem_reg = (rb + HOST_SEM1_REG);
1288 ioc->ioc_regs.ioc_usage_reg = (rb + BFA_FW_USE_COUNT);
1289
1290 /**
1291 * sram memory access
1292 */
1293 ioc->ioc_regs.smem_page_start = (rb + PSS_SMEM_PAGE_START);
1294 ioc->ioc_regs.smem_pg0 = BFI_IOC_SMEM_PG0_CB;
1295 if (ioc->pcidev.device_id == BFA_PCI_DEVICE_ID_CT)
1296 ioc->ioc_regs.smem_pg0 = BFI_IOC_SMEM_PG0_CT;
1297}
1298
1299/**
1300 * Initiate a full firmware download. 1108 * Initiate a full firmware download.
1301 */ 1109 */
1302static void 1110static void
@@ -1321,9 +1129,6 @@ bfa_ioc_download_fw(struct bfa_ioc_s *ioc, u32 boot_type,
1321 if (bfa_ioc_fwimg_get_size(ioc) < BFA_IOC_FWIMG_MINSZ) 1129 if (bfa_ioc_fwimg_get_size(ioc) < BFA_IOC_FWIMG_MINSZ)
1322 boot_type = BFI_BOOT_TYPE_FLASH; 1130 boot_type = BFI_BOOT_TYPE_FLASH;
1323 fwimg = bfa_ioc_fwimg_get_chunk(ioc, chunkno); 1131 fwimg = bfa_ioc_fwimg_get_chunk(ioc, chunkno);
1324 fwimg[BFI_BOOT_TYPE_OFF / sizeof(u32)] = bfa_os_swap32(boot_type);
1325 fwimg[BFI_BOOT_PARAM_OFF / sizeof(u32)] =
1326 bfa_os_swap32(boot_param);
1327 1132
1328 pgnum = bfa_ioc_smem_pgnum(ioc, loff); 1133 pgnum = bfa_ioc_smem_pgnum(ioc, loff);
1329 pgoff = bfa_ioc_smem_pgoff(ioc, loff); 1134 pgoff = bfa_ioc_smem_pgoff(ioc, loff);
@@ -1332,17 +1137,17 @@ bfa_ioc_download_fw(struct bfa_ioc_s *ioc, u32 boot_type,
1332 1137
1333 for (i = 0; i < bfa_ioc_fwimg_get_size(ioc); i++) { 1138 for (i = 0; i < bfa_ioc_fwimg_get_size(ioc); i++) {
1334 1139
1335 if (BFA_FLASH_CHUNK_NO(i) != chunkno) { 1140 if (BFA_IOC_FLASH_CHUNK_NO(i) != chunkno) {
1336 chunkno = BFA_FLASH_CHUNK_NO(i); 1141 chunkno = BFA_IOC_FLASH_CHUNK_NO(i);
1337 fwimg = bfa_ioc_fwimg_get_chunk(ioc, 1142 fwimg = bfa_ioc_fwimg_get_chunk(ioc,
1338 BFA_FLASH_CHUNK_ADDR(chunkno)); 1143 BFA_IOC_FLASH_CHUNK_ADDR(chunkno));
1339 } 1144 }
1340 1145
1341 /** 1146 /**
1342 * write smem 1147 * write smem
1343 */ 1148 */
1344 bfa_mem_write(ioc->ioc_regs.smem_page_start, loff, 1149 bfa_mem_write(ioc->ioc_regs.smem_page_start, loff,
1345 fwimg[BFA_FLASH_OFFSET_IN_CHUNK(i)]); 1150 fwimg[BFA_IOC_FLASH_OFFSET_IN_CHUNK(i)]);
1346 1151
1347 loff += sizeof(u32); 1152 loff += sizeof(u32);
1348 1153
@@ -1358,6 +1163,14 @@ bfa_ioc_download_fw(struct bfa_ioc_s *ioc, u32 boot_type,
1358 1163
1359 bfa_reg_write(ioc->ioc_regs.host_page_num_fn, 1164 bfa_reg_write(ioc->ioc_regs.host_page_num_fn,
1360 bfa_ioc_smem_pgnum(ioc, 0)); 1165 bfa_ioc_smem_pgnum(ioc, 0));
1166
1167 /*
1168 * Set boot type and boot param at the end.
1169 */
1170 bfa_mem_write(ioc->ioc_regs.smem_page_start, BFI_BOOT_TYPE_OFF,
1171 bfa_os_swap32(boot_type));
1172 bfa_mem_write(ioc->ioc_regs.smem_page_start, BFI_BOOT_PARAM_OFF,
1173 bfa_os_swap32(boot_param));
1361} 1174}
1362 1175
1363static void 1176static void
@@ -1440,168 +1253,10 @@ bfa_ioc_mbox_hbfail(struct bfa_ioc_s *ioc)
1440} 1253}
1441 1254
1442/** 1255/**
1443 * Initialize IOC to port mapping.
1444 */
1445
1446#define FNC_PERS_FN_SHIFT(__fn) ((__fn) * 8)
1447static void
1448bfa_ioc_map_port(struct bfa_ioc_s *ioc)
1449{
1450 bfa_os_addr_t rb = ioc->pcidev.pci_bar_kva;
1451 u32 r32;
1452
1453 /**
1454 * For crossbow, port id is same as pci function.
1455 */
1456 if (ioc->pcidev.device_id != BFA_PCI_DEVICE_ID_CT) {
1457 ioc->port_id = bfa_ioc_pcifn(ioc);
1458 return;
1459 }
1460
1461 /**
1462 * For catapult, base port id on personality register and IOC type
1463 */
1464 r32 = bfa_reg_read(rb + FNC_PERS_REG);
1465 r32 >>= FNC_PERS_FN_SHIFT(bfa_ioc_pcifn(ioc));
1466 ioc->port_id = (r32 & __F0_PORT_MAP_MK) >> __F0_PORT_MAP_SH;
1467
1468 bfa_trc(ioc, bfa_ioc_pcifn(ioc));
1469 bfa_trc(ioc, ioc->port_id);
1470}
1471
1472
1473
1474/**
1475 * bfa_ioc_public 1256 * bfa_ioc_public
1476 */ 1257 */
1477 1258
1478/** 1259/**
1479* Set interrupt mode for a function: INTX or MSIX
1480 */
1481void
1482bfa_ioc_isr_mode_set(struct bfa_ioc_s *ioc, bfa_boolean_t msix)
1483{
1484 bfa_os_addr_t rb = ioc->pcidev.pci_bar_kva;
1485 u32 r32, mode;
1486
1487 r32 = bfa_reg_read(rb + FNC_PERS_REG);
1488 bfa_trc(ioc, r32);
1489
1490 mode = (r32 >> FNC_PERS_FN_SHIFT(bfa_ioc_pcifn(ioc))) &
1491 __F0_INTX_STATUS;
1492
1493 /**
1494 * If already in desired mode, do not change anything
1495 */
1496 if (!msix && mode)
1497 return;
1498
1499 if (msix)
1500 mode = __F0_INTX_STATUS_MSIX;
1501 else
1502 mode = __F0_INTX_STATUS_INTA;
1503
1504 r32 &= ~(__F0_INTX_STATUS << FNC_PERS_FN_SHIFT(bfa_ioc_pcifn(ioc)));
1505 r32 |= (mode << FNC_PERS_FN_SHIFT(bfa_ioc_pcifn(ioc)));
1506 bfa_trc(ioc, r32);
1507
1508 bfa_reg_write(rb + FNC_PERS_REG, r32);
1509}
1510
1511bfa_status_t
1512bfa_ioc_pll_init(struct bfa_ioc_s *ioc)
1513{
1514 bfa_os_addr_t rb = ioc->pcidev.pci_bar_kva;
1515 u32 pll_sclk, pll_fclk, r32;
1516
1517 if (ioc->pcidev.device_id == BFA_PCI_DEVICE_ID_CT) {
1518 pll_sclk =
1519 __APP_PLL_312_ENABLE | __APP_PLL_312_LRESETN |
1520 __APP_PLL_312_RSEL200500 | __APP_PLL_312_P0_1(0U) |
1521 __APP_PLL_312_JITLMT0_1(3U) |
1522 __APP_PLL_312_CNTLMT0_1(1U);
1523 pll_fclk =
1524 __APP_PLL_425_ENABLE | __APP_PLL_425_LRESETN |
1525 __APP_PLL_425_RSEL200500 | __APP_PLL_425_P0_1(0U) |
1526 __APP_PLL_425_JITLMT0_1(3U) |
1527 __APP_PLL_425_CNTLMT0_1(1U);
1528
1529 /**
1530 * For catapult, choose operational mode FC/FCoE
1531 */
1532 if (ioc->fcmode) {
1533 bfa_reg_write((rb + OP_MODE), 0);
1534 bfa_reg_write((rb + ETH_MAC_SER_REG),
1535 __APP_EMS_CMLCKSEL | __APP_EMS_REFCKBUFEN2
1536 | __APP_EMS_CHANNEL_SEL);
1537 } else {
1538 ioc->pllinit = BFA_TRUE;
1539 bfa_reg_write((rb + OP_MODE), __GLOBAL_FCOE_MODE);
1540 bfa_reg_write((rb + ETH_MAC_SER_REG),
1541 __APP_EMS_REFCKBUFEN1);
1542 }
1543 } else {
1544 pll_sclk =
1545 __APP_PLL_312_ENABLE | __APP_PLL_312_LRESETN |
1546 __APP_PLL_312_P0_1(3U) | __APP_PLL_312_JITLMT0_1(3U) |
1547 __APP_PLL_312_CNTLMT0_1(3U);
1548 pll_fclk =
1549 __APP_PLL_425_ENABLE | __APP_PLL_425_LRESETN |
1550 __APP_PLL_425_RSEL200500 | __APP_PLL_425_P0_1(3U) |
1551 __APP_PLL_425_JITLMT0_1(3U) |
1552 __APP_PLL_425_CNTLMT0_1(3U);
1553 }
1554
1555 bfa_reg_write((rb + BFA_IOC0_STATE_REG), BFI_IOC_UNINIT);
1556 bfa_reg_write((rb + BFA_IOC1_STATE_REG), BFI_IOC_UNINIT);
1557
1558 bfa_reg_write((rb + HOSTFN0_INT_MSK), 0xffffffffU);
1559 bfa_reg_write((rb + HOSTFN1_INT_MSK), 0xffffffffU);
1560 bfa_reg_write((rb + HOSTFN0_INT_STATUS), 0xffffffffU);
1561 bfa_reg_write((rb + HOSTFN1_INT_STATUS), 0xffffffffU);
1562 bfa_reg_write((rb + HOSTFN0_INT_MSK), 0xffffffffU);
1563 bfa_reg_write((rb + HOSTFN1_INT_MSK), 0xffffffffU);
1564
1565 bfa_reg_write(ioc->ioc_regs.app_pll_slow_ctl_reg,
1566 __APP_PLL_312_LOGIC_SOFT_RESET);
1567 bfa_reg_write(ioc->ioc_regs.app_pll_slow_ctl_reg,
1568 __APP_PLL_312_BYPASS | __APP_PLL_312_LOGIC_SOFT_RESET);
1569 bfa_reg_write(ioc->ioc_regs.app_pll_fast_ctl_reg,
1570 __APP_PLL_425_LOGIC_SOFT_RESET);
1571 bfa_reg_write(ioc->ioc_regs.app_pll_fast_ctl_reg,
1572 __APP_PLL_425_BYPASS | __APP_PLL_425_LOGIC_SOFT_RESET);
1573 bfa_os_udelay(2);
1574 bfa_reg_write(ioc->ioc_regs.app_pll_slow_ctl_reg,
1575 __APP_PLL_312_LOGIC_SOFT_RESET);
1576 bfa_reg_write(ioc->ioc_regs.app_pll_fast_ctl_reg,
1577 __APP_PLL_425_LOGIC_SOFT_RESET);
1578
1579 bfa_reg_write(ioc->ioc_regs.app_pll_slow_ctl_reg,
1580 pll_sclk | __APP_PLL_312_LOGIC_SOFT_RESET);
1581 bfa_reg_write(ioc->ioc_regs.app_pll_fast_ctl_reg,
1582 pll_fclk | __APP_PLL_425_LOGIC_SOFT_RESET);
1583
1584 /**
1585 * Wait for PLLs to lock.
1586 */
1587 bfa_os_udelay(2000);
1588 bfa_reg_write((rb + HOSTFN0_INT_STATUS), 0xffffffffU);
1589 bfa_reg_write((rb + HOSTFN1_INT_STATUS), 0xffffffffU);
1590
1591 bfa_reg_write(ioc->ioc_regs.app_pll_slow_ctl_reg, pll_sclk);
1592 bfa_reg_write(ioc->ioc_regs.app_pll_fast_ctl_reg, pll_fclk);
1593
1594 if (ioc->pcidev.device_id == BFA_PCI_DEVICE_ID_CT) {
1595 bfa_reg_write((rb + MBIST_CTL_REG), __EDRAM_BISTR_START);
1596 bfa_os_udelay(1000);
1597 r32 = bfa_reg_read((rb + MBIST_STAT_REG));
1598 bfa_trc(ioc, r32);
1599 }
1600
1601 return BFA_STATUS_OK;
1602}
1603
1604/**
1605 * Interface used by diag module to do firmware boot with memory test 1260 * Interface used by diag module to do firmware boot with memory test
1606 * as the entry vector. 1261 * as the entry vector.
1607 */ 1262 */
@@ -1642,7 +1297,7 @@ bfa_ioc_boot(struct bfa_ioc_s *ioc, u32 boot_type, u32 boot_param)
1642void 1297void
1643bfa_ioc_auto_recover(bfa_boolean_t auto_recover) 1298bfa_ioc_auto_recover(bfa_boolean_t auto_recover)
1644{ 1299{
1645 bfa_auto_recover = BFA_FALSE; 1300 bfa_auto_recover = auto_recover;
1646} 1301}
1647 1302
1648 1303
@@ -1764,6 +1419,14 @@ bfa_ioc_pci_init(struct bfa_ioc_s *ioc, struct bfa_pcidev_s *pcidev,
1764 ioc->ctdev = (ioc->pcidev.device_id == BFA_PCI_DEVICE_ID_CT); 1419 ioc->ctdev = (ioc->pcidev.device_id == BFA_PCI_DEVICE_ID_CT);
1765 ioc->cna = ioc->ctdev && !ioc->fcmode; 1420 ioc->cna = ioc->ctdev && !ioc->fcmode;
1766 1421
1422 /**
1423 * Set asic specific interfaces. See bfa_ioc_cb.c and bfa_ioc_ct.c
1424 */
1425 if (ioc->ctdev)
1426 bfa_ioc_set_ct_hwif(ioc);
1427 else
1428 bfa_ioc_set_cb_hwif(ioc);
1429
1767 bfa_ioc_map_port(ioc); 1430 bfa_ioc_map_port(ioc);
1768 bfa_ioc_reg_init(ioc); 1431 bfa_ioc_reg_init(ioc);
1769} 1432}
@@ -1830,7 +1493,6 @@ return (auto_recover) ? BFA_DBG_FWTRC_LEN : 0;
1830void 1493void
1831bfa_ioc_debug_memclaim(struct bfa_ioc_s *ioc, void *dbg_fwsave) 1494bfa_ioc_debug_memclaim(struct bfa_ioc_s *ioc, void *dbg_fwsave)
1832{ 1495{
1833 bfa_assert(ioc->auto_recover);
1834 ioc->dbg_fwsave = dbg_fwsave; 1496 ioc->dbg_fwsave = dbg_fwsave;
1835 ioc->dbg_fwsave_len = bfa_ioc_debug_trcsz(ioc->auto_recover); 1497 ioc->dbg_fwsave_len = bfa_ioc_debug_trcsz(ioc->auto_recover);
1836} 1498}
@@ -1973,7 +1635,7 @@ bfa_ioc_fw_mismatch(struct bfa_ioc_s *ioc)
1973 ((__sm) == BFI_IOC_INITING) || \ 1635 ((__sm) == BFI_IOC_INITING) || \
1974 ((__sm) == BFI_IOC_HWINIT) || \ 1636 ((__sm) == BFI_IOC_HWINIT) || \
1975 ((__sm) == BFI_IOC_DISABLED) || \ 1637 ((__sm) == BFI_IOC_DISABLED) || \
1976 ((__sm) == BFI_IOC_HBFAIL) || \ 1638 ((__sm) == BFI_IOC_FAIL) || \
1977 ((__sm) == BFI_IOC_CFG_DISABLED)) 1639 ((__sm) == BFI_IOC_CFG_DISABLED))
1978 1640
1979/** 1641/**
@@ -2017,46 +1679,28 @@ bfa_ioc_get_adapter_attr(struct bfa_ioc_s *ioc,
2017 struct bfa_adapter_attr_s *ad_attr) 1679 struct bfa_adapter_attr_s *ad_attr)
2018{ 1680{
2019 struct bfi_ioc_attr_s *ioc_attr; 1681 struct bfi_ioc_attr_s *ioc_attr;
2020 char model[BFA_ADAPTER_MODEL_NAME_LEN];
2021 1682
2022 ioc_attr = ioc->attr; 1683 ioc_attr = ioc->attr;
2023 bfa_os_memcpy((void *)&ad_attr->serial_num, 1684
2024 (void *)ioc_attr->brcd_serialnum, 1685 bfa_ioc_get_adapter_serial_num(ioc, ad_attr->serial_num);
2025 BFA_ADAPTER_SERIAL_NUM_LEN); 1686 bfa_ioc_get_adapter_fw_ver(ioc, ad_attr->fw_ver);
2026 1687 bfa_ioc_get_adapter_optrom_ver(ioc, ad_attr->optrom_ver);
2027 bfa_os_memcpy(&ad_attr->fw_ver, ioc_attr->fw_version, BFA_VERSION_LEN); 1688 bfa_ioc_get_adapter_manufacturer(ioc, ad_attr->manufacturer);
2028 bfa_os_memcpy(&ad_attr->optrom_ver, ioc_attr->optrom_version,
2029 BFA_VERSION_LEN);
2030 bfa_os_memcpy(&ad_attr->manufacturer, BFA_MFG_NAME,
2031 BFA_ADAPTER_MFG_NAME_LEN);
2032 bfa_os_memcpy(&ad_attr->vpd, &ioc_attr->vpd, 1689 bfa_os_memcpy(&ad_attr->vpd, &ioc_attr->vpd,
2033 sizeof(struct bfa_mfg_vpd_s)); 1690 sizeof(struct bfa_mfg_vpd_s));
2034 1691
2035 ad_attr->nports = BFI_ADAPTER_GETP(NPORTS, ioc_attr->adapter_prop); 1692 ad_attr->nports = bfa_ioc_get_nports(ioc);
2036 ad_attr->max_speed = BFI_ADAPTER_GETP(SPEED, ioc_attr->adapter_prop); 1693 ad_attr->max_speed = bfa_ioc_speed_sup(ioc);
2037 1694
2038 /** 1695 bfa_ioc_get_adapter_model(ioc, ad_attr->model);
2039 * model name 1696 /* For now, model descr uses same model string */
2040 */ 1697 bfa_ioc_get_adapter_model(ioc, ad_attr->model_descr);
2041 if (BFI_ADAPTER_GETP(SPEED, ioc_attr->adapter_prop) == 10) {
2042 strcpy(model, "BR-10?0");
2043 model[5] = '0' + ad_attr->nports;
2044 } else {
2045 strcpy(model, "Brocade-??5");
2046 model[8] =
2047 '0' + BFI_ADAPTER_GETP(SPEED, ioc_attr->adapter_prop);
2048 model[9] = '0' + ad_attr->nports;
2049 }
2050 1698
2051 if (BFI_ADAPTER_IS_SPECIAL(ioc_attr->adapter_prop)) 1699 if (BFI_ADAPTER_IS_SPECIAL(ioc_attr->adapter_prop))
2052 ad_attr->prototype = 1; 1700 ad_attr->prototype = 1;
2053 else 1701 else
2054 ad_attr->prototype = 0; 1702 ad_attr->prototype = 0;
2055 1703
2056 bfa_os_memcpy(&ad_attr->model, model, BFA_ADAPTER_MODEL_NAME_LEN);
2057 bfa_os_memcpy(&ad_attr->model_descr, &ad_attr->model,
2058 BFA_ADAPTER_MODEL_NAME_LEN);
2059
2060 ad_attr->pwwn = bfa_ioc_get_pwwn(ioc); 1704 ad_attr->pwwn = bfa_ioc_get_pwwn(ioc);
2061 ad_attr->mac = bfa_ioc_get_mac(ioc); 1705 ad_attr->mac = bfa_ioc_get_mac(ioc);
2062 1706
@@ -2064,41 +1708,122 @@ bfa_ioc_get_adapter_attr(struct bfa_ioc_s *ioc,
2064 ad_attr->pcie_lanes = ioc_attr->pcie_lanes; 1708 ad_attr->pcie_lanes = ioc_attr->pcie_lanes;
2065 ad_attr->pcie_lanes_orig = ioc_attr->pcie_lanes_orig; 1709 ad_attr->pcie_lanes_orig = ioc_attr->pcie_lanes_orig;
2066 ad_attr->asic_rev = ioc_attr->asic_rev; 1710 ad_attr->asic_rev = ioc_attr->asic_rev;
2067 ad_attr->hw_ver[0] = 'R'; 1711
2068 ad_attr->hw_ver[1] = 'e'; 1712 bfa_ioc_get_pci_chip_rev(ioc, ad_attr->hw_ver);
2069 ad_attr->hw_ver[2] = 'v';
2070 ad_attr->hw_ver[3] = '-';
2071 ad_attr->hw_ver[4] = ioc_attr->asic_rev;
2072 ad_attr->hw_ver[5] = '\0';
2073 1713
2074 ad_attr->cna_capable = ioc->cna; 1714 ad_attr->cna_capable = ioc->cna;
2075} 1715}
2076 1716
1717enum bfa_ioc_type_e
1718bfa_ioc_get_type(struct bfa_ioc_s *ioc)
1719{
1720 if (!ioc->ctdev || ioc->fcmode)
1721 return BFA_IOC_TYPE_FC;
1722 else if (ioc->ioc_mc == BFI_MC_IOCFC)
1723 return BFA_IOC_TYPE_FCoE;
1724 else if (ioc->ioc_mc == BFI_MC_LL)
1725 return BFA_IOC_TYPE_LL;
1726 else {
1727 bfa_assert(ioc->ioc_mc == BFI_MC_LL);
1728 return BFA_IOC_TYPE_LL;
1729 }
1730}
1731
1732void
1733bfa_ioc_get_adapter_serial_num(struct bfa_ioc_s *ioc, char *serial_num)
1734{
1735 bfa_os_memset((void *)serial_num, 0, BFA_ADAPTER_SERIAL_NUM_LEN);
1736 bfa_os_memcpy((void *)serial_num,
1737 (void *)ioc->attr->brcd_serialnum,
1738 BFA_ADAPTER_SERIAL_NUM_LEN);
1739}
1740
1741void
1742bfa_ioc_get_adapter_fw_ver(struct bfa_ioc_s *ioc, char *fw_ver)
1743{
1744 bfa_os_memset((void *)fw_ver, 0, BFA_VERSION_LEN);
1745 bfa_os_memcpy(fw_ver, ioc->attr->fw_version, BFA_VERSION_LEN);
1746}
1747
1748void
1749bfa_ioc_get_pci_chip_rev(struct bfa_ioc_s *ioc, char *chip_rev)
1750{
1751 bfa_assert(chip_rev);
1752
1753 bfa_os_memset((void *)chip_rev, 0, BFA_IOC_CHIP_REV_LEN);
1754
1755 chip_rev[0] = 'R';
1756 chip_rev[1] = 'e';
1757 chip_rev[2] = 'v';
1758 chip_rev[3] = '-';
1759 chip_rev[4] = ioc->attr->asic_rev;
1760 chip_rev[5] = '\0';
1761}
1762
1763void
1764bfa_ioc_get_adapter_optrom_ver(struct bfa_ioc_s *ioc, char *optrom_ver)
1765{
1766 bfa_os_memset((void *)optrom_ver, 0, BFA_VERSION_LEN);
1767 bfa_os_memcpy(optrom_ver, ioc->attr->optrom_version,
1768 BFA_VERSION_LEN);
1769}
1770
1771void
1772bfa_ioc_get_adapter_manufacturer(struct bfa_ioc_s *ioc, char *manufacturer)
1773{
1774 bfa_os_memset((void *)manufacturer, 0, BFA_ADAPTER_MFG_NAME_LEN);
1775 bfa_os_memcpy(manufacturer, BFA_MFG_NAME, BFA_ADAPTER_MFG_NAME_LEN);
1776}
1777
1778void
1779bfa_ioc_get_adapter_model(struct bfa_ioc_s *ioc, char *model)
1780{
1781 struct bfi_ioc_attr_s *ioc_attr;
1782 u8 nports;
1783 u8 max_speed;
1784
1785 bfa_assert(model);
1786 bfa_os_memset((void *)model, 0, BFA_ADAPTER_MODEL_NAME_LEN);
1787
1788 ioc_attr = ioc->attr;
1789
1790 nports = bfa_ioc_get_nports(ioc);
1791 max_speed = bfa_ioc_speed_sup(ioc);
1792
1793 /**
1794 * model name
1795 */
1796 if (max_speed == 10) {
1797 strcpy(model, "BR-10?0");
1798 model[5] = '0' + nports;
1799 } else {
1800 strcpy(model, "Brocade-??5");
1801 model[8] = '0' + max_speed;
1802 model[9] = '0' + nports;
1803 }
1804}
1805
1806enum bfa_ioc_state
1807bfa_ioc_get_state(struct bfa_ioc_s *ioc)
1808{
1809 return bfa_sm_to_state(ioc_sm_table, ioc->fsm);
1810}
1811
2077void 1812void
2078bfa_ioc_get_attr(struct bfa_ioc_s *ioc, struct bfa_ioc_attr_s *ioc_attr) 1813bfa_ioc_get_attr(struct bfa_ioc_s *ioc, struct bfa_ioc_attr_s *ioc_attr)
2079{ 1814{
2080 bfa_os_memset((void *)ioc_attr, 0, sizeof(struct bfa_ioc_attr_s)); 1815 bfa_os_memset((void *)ioc_attr, 0, sizeof(struct bfa_ioc_attr_s));
2081 1816
2082 ioc_attr->state = bfa_sm_to_state(ioc_sm_table, ioc->fsm); 1817 ioc_attr->state = bfa_ioc_get_state(ioc);
2083 ioc_attr->port_id = ioc->port_id; 1818 ioc_attr->port_id = ioc->port_id;
2084 1819
2085 if (!ioc->ctdev) 1820 ioc_attr->ioc_type = bfa_ioc_get_type(ioc);
2086 ioc_attr->ioc_type = BFA_IOC_TYPE_FC;
2087 else if (ioc->ioc_mc == BFI_MC_IOCFC)
2088 ioc_attr->ioc_type = BFA_IOC_TYPE_FCoE;
2089 else if (ioc->ioc_mc == BFI_MC_LL)
2090 ioc_attr->ioc_type = BFA_IOC_TYPE_LL;
2091 1821
2092 bfa_ioc_get_adapter_attr(ioc, &ioc_attr->adapter_attr); 1822 bfa_ioc_get_adapter_attr(ioc, &ioc_attr->adapter_attr);
2093 1823
2094 ioc_attr->pci_attr.device_id = ioc->pcidev.device_id; 1824 ioc_attr->pci_attr.device_id = ioc->pcidev.device_id;
2095 ioc_attr->pci_attr.pcifn = ioc->pcidev.pci_func; 1825 ioc_attr->pci_attr.pcifn = ioc->pcidev.pci_func;
2096 ioc_attr->pci_attr.chip_rev[0] = 'R'; 1826 bfa_ioc_get_pci_chip_rev(ioc, ioc_attr->pci_attr.chip_rev);
2097 ioc_attr->pci_attr.chip_rev[1] = 'e';
2098 ioc_attr->pci_attr.chip_rev[2] = 'v';
2099 ioc_attr->pci_attr.chip_rev[3] = '-';
2100 ioc_attr->pci_attr.chip_rev[4] = ioc_attr->adapter_attr.asic_rev;
2101 ioc_attr->pci_attr.chip_rev[5] = '\0';
2102} 1827}
2103 1828
2104/** 1829/**
@@ -2195,29 +1920,6 @@ bfa_ioc_get_fcmode(struct bfa_ioc_s *ioc)
2195} 1920}
2196 1921
2197/** 1922/**
2198 * Return true if interrupt should be claimed.
2199 */
2200bfa_boolean_t
2201bfa_ioc_intx_claim(struct bfa_ioc_s *ioc)
2202{
2203 u32 isr, msk;
2204
2205 /**
2206 * Always claim if not catapult.
2207 */
2208 if (!ioc->ctdev)
2209 return BFA_TRUE;
2210
2211 /**
2212 * FALSE if next device is claiming interrupt.
2213 * TRUE if next device is not interrupting or not present.
2214 */
2215 msk = bfa_reg_read(ioc->ioc_regs.shirq_msk_next);
2216 isr = bfa_reg_read(ioc->ioc_regs.shirq_isr_next);
2217 return !(isr & ~msk);
2218}
2219
2220/**
2221 * Send AEN notification 1923 * Send AEN notification
2222 */ 1924 */
2223static void 1925static void
@@ -2226,32 +1928,14 @@ bfa_ioc_aen_post(struct bfa_ioc_s *ioc, enum bfa_ioc_aen_event event)
2226 union bfa_aen_data_u aen_data; 1928 union bfa_aen_data_u aen_data;
2227 struct bfa_log_mod_s *logmod = ioc->logm; 1929 struct bfa_log_mod_s *logmod = ioc->logm;
2228 s32 inst_num = 0; 1930 s32 inst_num = 0;
2229 struct bfa_ioc_attr_s ioc_attr; 1931 enum bfa_ioc_type_e ioc_type;
2230 1932
2231 switch (event) { 1933 bfa_log(logmod, BFA_LOG_CREATE_ID(BFA_AEN_CAT_IOC, event), inst_num);
2232 case BFA_IOC_AEN_HBGOOD:
2233 bfa_log(logmod, BFA_AEN_IOC_HBGOOD, inst_num);
2234 break;
2235 case BFA_IOC_AEN_HBFAIL:
2236 bfa_log(logmod, BFA_AEN_IOC_HBFAIL, inst_num);
2237 break;
2238 case BFA_IOC_AEN_ENABLE:
2239 bfa_log(logmod, BFA_AEN_IOC_ENABLE, inst_num);
2240 break;
2241 case BFA_IOC_AEN_DISABLE:
2242 bfa_log(logmod, BFA_AEN_IOC_DISABLE, inst_num);
2243 break;
2244 case BFA_IOC_AEN_FWMISMATCH:
2245 bfa_log(logmod, BFA_AEN_IOC_FWMISMATCH, inst_num);
2246 break;
2247 default:
2248 break;
2249 }
2250 1934
2251 memset(&aen_data.ioc.pwwn, 0, sizeof(aen_data.ioc.pwwn)); 1935 memset(&aen_data.ioc.pwwn, 0, sizeof(aen_data.ioc.pwwn));
2252 memset(&aen_data.ioc.mac, 0, sizeof(aen_data.ioc.mac)); 1936 memset(&aen_data.ioc.mac, 0, sizeof(aen_data.ioc.mac));
2253 bfa_ioc_get_attr(ioc, &ioc_attr); 1937 ioc_type = bfa_ioc_get_type(ioc);
2254 switch (ioc_attr.ioc_type) { 1938 switch (ioc_type) {
2255 case BFA_IOC_TYPE_FC: 1939 case BFA_IOC_TYPE_FC:
2256 aen_data.ioc.pwwn = bfa_ioc_get_pwwn(ioc); 1940 aen_data.ioc.pwwn = bfa_ioc_get_pwwn(ioc);
2257 break; 1941 break;
@@ -2263,10 +1947,10 @@ bfa_ioc_aen_post(struct bfa_ioc_s *ioc, enum bfa_ioc_aen_event event)
2263 aen_data.ioc.mac = bfa_ioc_get_mac(ioc); 1947 aen_data.ioc.mac = bfa_ioc_get_mac(ioc);
2264 break; 1948 break;
2265 default: 1949 default:
2266 bfa_assert(ioc_attr.ioc_type == BFA_IOC_TYPE_FC); 1950 bfa_assert(ioc_type == BFA_IOC_TYPE_FC);
2267 break; 1951 break;
2268 } 1952 }
2269 aen_data.ioc.ioc_type = ioc_attr.ioc_type; 1953 aen_data.ioc.ioc_type = ioc_type;
2270} 1954}
2271 1955
2272/** 1956/**
@@ -2290,6 +1974,15 @@ bfa_ioc_debug_fwsave(struct bfa_ioc_s *ioc, void *trcdata, int *trclen)
2290} 1974}
2291 1975
2292/** 1976/**
1977 * Clear saved firmware trace
1978 */
1979void
1980bfa_ioc_debug_fwsave_clear(struct bfa_ioc_s *ioc)
1981{
1982 ioc->dbg_fwsave_once = BFA_TRUE;
1983}
1984
1985/**
2293 * Retrieve saved firmware trace from a prior IOC failure. 1986 * Retrieve saved firmware trace from a prior IOC failure.
2294 */ 1987 */
2295bfa_status_t 1988bfa_status_t
@@ -2304,6 +1997,13 @@ bfa_ioc_debug_fwtrc(struct bfa_ioc_s *ioc, void *trcdata, int *trclen)
2304 1997
2305 pgnum = bfa_ioc_smem_pgnum(ioc, loff); 1998 pgnum = bfa_ioc_smem_pgnum(ioc, loff);
2306 loff = bfa_ioc_smem_pgoff(ioc, loff); 1999 loff = bfa_ioc_smem_pgoff(ioc, loff);
2000
2001 /*
2002 * Hold semaphore to serialize pll init and fwtrc.
2003 */
2004 if (BFA_FALSE == bfa_ioc_sem_get(ioc->ioc_regs.ioc_init_sem_reg))
2005 return BFA_STATUS_FAILED;
2006
2307 bfa_reg_write(ioc->ioc_regs.host_page_num_fn, pgnum); 2007 bfa_reg_write(ioc->ioc_regs.host_page_num_fn, pgnum);
2308 2008
2309 tlen = *trclen; 2009 tlen = *trclen;
@@ -2329,6 +2029,12 @@ bfa_ioc_debug_fwtrc(struct bfa_ioc_s *ioc, void *trcdata, int *trclen)
2329 } 2029 }
2330 bfa_reg_write(ioc->ioc_regs.host_page_num_fn, 2030 bfa_reg_write(ioc->ioc_regs.host_page_num_fn,
2331 bfa_ioc_smem_pgnum(ioc, 0)); 2031 bfa_ioc_smem_pgnum(ioc, 0));
2032
2033 /*
2034 * release semaphore.
2035 */
2036 bfa_ioc_sem_release(ioc->ioc_regs.ioc_init_sem_reg);
2037
2332 bfa_trc(ioc, pgnum); 2038 bfa_trc(ioc, pgnum);
2333 2039
2334 *trclen = tlen * sizeof(u32); 2040 *trclen = tlen * sizeof(u32);
diff --git a/drivers/scsi/bfa/bfa_ioc.h b/drivers/scsi/bfa/bfa_ioc.h
index 7c30f05ab137..d0804406ea1a 100644
--- a/drivers/scsi/bfa/bfa_ioc.h
+++ b/drivers/scsi/bfa/bfa_ioc.h
@@ -74,15 +74,18 @@ struct bfa_ioc_regs_s {
74 bfa_os_addr_t lpu_mbox_cmd; 74 bfa_os_addr_t lpu_mbox_cmd;
75 bfa_os_addr_t lpu_mbox; 75 bfa_os_addr_t lpu_mbox;
76 bfa_os_addr_t pss_ctl_reg; 76 bfa_os_addr_t pss_ctl_reg;
77 bfa_os_addr_t pss_err_status_reg;
77 bfa_os_addr_t app_pll_fast_ctl_reg; 78 bfa_os_addr_t app_pll_fast_ctl_reg;
78 bfa_os_addr_t app_pll_slow_ctl_reg; 79 bfa_os_addr_t app_pll_slow_ctl_reg;
79 bfa_os_addr_t ioc_sem_reg; 80 bfa_os_addr_t ioc_sem_reg;
80 bfa_os_addr_t ioc_usage_sem_reg; 81 bfa_os_addr_t ioc_usage_sem_reg;
82 bfa_os_addr_t ioc_init_sem_reg;
81 bfa_os_addr_t ioc_usage_reg; 83 bfa_os_addr_t ioc_usage_reg;
82 bfa_os_addr_t host_page_num_fn; 84 bfa_os_addr_t host_page_num_fn;
83 bfa_os_addr_t heartbeat; 85 bfa_os_addr_t heartbeat;
84 bfa_os_addr_t ioc_fwstate; 86 bfa_os_addr_t ioc_fwstate;
85 bfa_os_addr_t ll_halt; 87 bfa_os_addr_t ll_halt;
88 bfa_os_addr_t err_set;
86 bfa_os_addr_t shirq_isr_next; 89 bfa_os_addr_t shirq_isr_next;
87 bfa_os_addr_t shirq_msk_next; 90 bfa_os_addr_t shirq_msk_next;
88 bfa_os_addr_t smem_page_start; 91 bfa_os_addr_t smem_page_start;
@@ -154,7 +157,6 @@ struct bfa_ioc_s {
154 struct bfa_timer_s ioc_timer; 157 struct bfa_timer_s ioc_timer;
155 struct bfa_timer_s sem_timer; 158 struct bfa_timer_s sem_timer;
156 u32 hb_count; 159 u32 hb_count;
157 u32 hb_fail;
158 u32 retry_count; 160 u32 retry_count;
159 struct list_head hb_notify_q; 161 struct list_head hb_notify_q;
160 void *dbg_fwsave; 162 void *dbg_fwsave;
@@ -177,6 +179,22 @@ struct bfa_ioc_s {
177 struct bfi_ioc_attr_s *attr; 179 struct bfi_ioc_attr_s *attr;
178 struct bfa_ioc_cbfn_s *cbfn; 180 struct bfa_ioc_cbfn_s *cbfn;
179 struct bfa_ioc_mbox_mod_s mbox_mod; 181 struct bfa_ioc_mbox_mod_s mbox_mod;
182 struct bfa_ioc_hwif_s *ioc_hwif;
183};
184
185struct bfa_ioc_hwif_s {
186 bfa_status_t (*ioc_pll_init) (struct bfa_ioc_s *ioc);
187 bfa_boolean_t (*ioc_firmware_lock) (struct bfa_ioc_s *ioc);
188 void (*ioc_firmware_unlock) (struct bfa_ioc_s *ioc);
189 u32 * (*ioc_fwimg_get_chunk) (struct bfa_ioc_s *ioc,
190 u32 off);
191 u32 (*ioc_fwimg_get_size) (struct bfa_ioc_s *ioc);
192 void (*ioc_reg_init) (struct bfa_ioc_s *ioc);
193 void (*ioc_map_port) (struct bfa_ioc_s *ioc);
194 void (*ioc_isr_mode_set) (struct bfa_ioc_s *ioc,
195 bfa_boolean_t msix);
196 void (*ioc_notify_hbfail) (struct bfa_ioc_s *ioc);
197 void (*ioc_ownership_reset) (struct bfa_ioc_s *ioc);
180}; 198};
181 199
182#define bfa_ioc_pcifn(__ioc) ((__ioc)->pcidev.pci_func) 200#define bfa_ioc_pcifn(__ioc) ((__ioc)->pcidev.pci_func)
@@ -191,6 +209,15 @@ struct bfa_ioc_s {
191#define bfa_ioc_rx_bbcredit(__ioc) ((__ioc)->attr->rx_bbcredit) 209#define bfa_ioc_rx_bbcredit(__ioc) ((__ioc)->attr->rx_bbcredit)
192#define bfa_ioc_speed_sup(__ioc) \ 210#define bfa_ioc_speed_sup(__ioc) \
193 BFI_ADAPTER_GETP(SPEED, (__ioc)->attr->adapter_prop) 211 BFI_ADAPTER_GETP(SPEED, (__ioc)->attr->adapter_prop)
212#define bfa_ioc_get_nports(__ioc) \
213 BFI_ADAPTER_GETP(NPORTS, (__ioc)->attr->adapter_prop)
214
215#define bfa_ioc_stats(_ioc, _stats) ((_ioc)->stats._stats++)
216#define BFA_IOC_FWIMG_MINSZ (16 * 1024)
217
218#define BFA_IOC_FLASH_CHUNK_NO(off) (off / BFI_FLASH_CHUNK_SZ_WORDS)
219#define BFA_IOC_FLASH_OFFSET_IN_CHUNK(off) (off % BFI_FLASH_CHUNK_SZ_WORDS)
220#define BFA_IOC_FLASH_CHUNK_ADDR(chunkno) (chunkno * BFI_FLASH_CHUNK_SZ_WORDS)
194 221
195/** 222/**
196 * IOC mailbox interface 223 * IOC mailbox interface
@@ -207,6 +234,14 @@ void bfa_ioc_mbox_regisr(struct bfa_ioc_s *ioc, enum bfi_mclass mc,
207/** 234/**
208 * IOC interfaces 235 * IOC interfaces
209 */ 236 */
237#define bfa_ioc_pll_init(__ioc) ((__ioc)->ioc_hwif->ioc_pll_init(__ioc))
238#define bfa_ioc_isr_mode_set(__ioc, __msix) \
239 ((__ioc)->ioc_hwif->ioc_isr_mode_set(__ioc, __msix))
240#define bfa_ioc_ownership_reset(__ioc) \
241 ((__ioc)->ioc_hwif->ioc_ownership_reset(__ioc))
242
243void bfa_ioc_set_ct_hwif(struct bfa_ioc_s *ioc);
244void bfa_ioc_set_cb_hwif(struct bfa_ioc_s *ioc);
210void bfa_ioc_attach(struct bfa_ioc_s *ioc, void *bfa, 245void bfa_ioc_attach(struct bfa_ioc_s *ioc, void *bfa,
211 struct bfa_ioc_cbfn_s *cbfn, struct bfa_timer_mod_s *timer_mod, 246 struct bfa_ioc_cbfn_s *cbfn, struct bfa_timer_mod_s *timer_mod,
212 struct bfa_trc_mod_s *trcmod, 247 struct bfa_trc_mod_s *trcmod,
@@ -223,13 +258,21 @@ bfa_boolean_t bfa_ioc_intx_claim(struct bfa_ioc_s *ioc);
223void bfa_ioc_boot(struct bfa_ioc_s *ioc, u32 boot_type, u32 boot_param); 258void bfa_ioc_boot(struct bfa_ioc_s *ioc, u32 boot_type, u32 boot_param);
224void bfa_ioc_isr(struct bfa_ioc_s *ioc, struct bfi_mbmsg_s *msg); 259void bfa_ioc_isr(struct bfa_ioc_s *ioc, struct bfi_mbmsg_s *msg);
225void bfa_ioc_error_isr(struct bfa_ioc_s *ioc); 260void bfa_ioc_error_isr(struct bfa_ioc_s *ioc);
226void bfa_ioc_isr_mode_set(struct bfa_ioc_s *ioc, bfa_boolean_t intx);
227bfa_status_t bfa_ioc_pll_init(struct bfa_ioc_s *ioc);
228bfa_boolean_t bfa_ioc_is_operational(struct bfa_ioc_s *ioc); 261bfa_boolean_t bfa_ioc_is_operational(struct bfa_ioc_s *ioc);
229bfa_boolean_t bfa_ioc_is_disabled(struct bfa_ioc_s *ioc); 262bfa_boolean_t bfa_ioc_is_disabled(struct bfa_ioc_s *ioc);
230bfa_boolean_t bfa_ioc_fw_mismatch(struct bfa_ioc_s *ioc); 263bfa_boolean_t bfa_ioc_fw_mismatch(struct bfa_ioc_s *ioc);
231bfa_boolean_t bfa_ioc_adapter_is_disabled(struct bfa_ioc_s *ioc); 264bfa_boolean_t bfa_ioc_adapter_is_disabled(struct bfa_ioc_s *ioc);
232void bfa_ioc_cfg_complete(struct bfa_ioc_s *ioc); 265void bfa_ioc_cfg_complete(struct bfa_ioc_s *ioc);
266enum bfa_ioc_type_e bfa_ioc_get_type(struct bfa_ioc_s *ioc);
267void bfa_ioc_get_adapter_serial_num(struct bfa_ioc_s *ioc, char *serial_num);
268void bfa_ioc_get_adapter_fw_ver(struct bfa_ioc_s *ioc, char *fw_ver);
269void bfa_ioc_get_adapter_optrom_ver(struct bfa_ioc_s *ioc, char *optrom_ver);
270void bfa_ioc_get_adapter_model(struct bfa_ioc_s *ioc, char *model);
271void bfa_ioc_get_adapter_manufacturer(struct bfa_ioc_s *ioc,
272 char *manufacturer);
273void bfa_ioc_get_pci_chip_rev(struct bfa_ioc_s *ioc, char *chip_rev);
274enum bfa_ioc_state bfa_ioc_get_state(struct bfa_ioc_s *ioc);
275
233void bfa_ioc_get_attr(struct bfa_ioc_s *ioc, struct bfa_ioc_attr_s *ioc_attr); 276void bfa_ioc_get_attr(struct bfa_ioc_s *ioc, struct bfa_ioc_attr_s *ioc_attr);
234void bfa_ioc_get_adapter_attr(struct bfa_ioc_s *ioc, 277void bfa_ioc_get_adapter_attr(struct bfa_ioc_s *ioc,
235 struct bfa_adapter_attr_s *ad_attr); 278 struct bfa_adapter_attr_s *ad_attr);
@@ -237,6 +280,7 @@ int bfa_ioc_debug_trcsz(bfa_boolean_t auto_recover);
237void bfa_ioc_debug_memclaim(struct bfa_ioc_s *ioc, void *dbg_fwsave); 280void bfa_ioc_debug_memclaim(struct bfa_ioc_s *ioc, void *dbg_fwsave);
238bfa_status_t bfa_ioc_debug_fwsave(struct bfa_ioc_s *ioc, void *trcdata, 281bfa_status_t bfa_ioc_debug_fwsave(struct bfa_ioc_s *ioc, void *trcdata,
239 int *trclen); 282 int *trclen);
283void bfa_ioc_debug_fwsave_clear(struct bfa_ioc_s *ioc);
240bfa_status_t bfa_ioc_debug_fwtrc(struct bfa_ioc_s *ioc, void *trcdata, 284bfa_status_t bfa_ioc_debug_fwtrc(struct bfa_ioc_s *ioc, void *trcdata,
241 int *trclen); 285 int *trclen);
242u32 bfa_ioc_smem_pgnum(struct bfa_ioc_s *ioc, u32 fmaddr); 286u32 bfa_ioc_smem_pgnum(struct bfa_ioc_s *ioc, u32 fmaddr);
@@ -245,6 +289,13 @@ void bfa_ioc_set_fcmode(struct bfa_ioc_s *ioc);
245bfa_boolean_t bfa_ioc_get_fcmode(struct bfa_ioc_s *ioc); 289bfa_boolean_t bfa_ioc_get_fcmode(struct bfa_ioc_s *ioc);
246void bfa_ioc_hbfail_register(struct bfa_ioc_s *ioc, 290void bfa_ioc_hbfail_register(struct bfa_ioc_s *ioc,
247 struct bfa_ioc_hbfail_notify_s *notify); 291 struct bfa_ioc_hbfail_notify_s *notify);
292bfa_boolean_t bfa_ioc_sem_get(bfa_os_addr_t sem_reg);
293void bfa_ioc_sem_release(bfa_os_addr_t sem_reg);
294void bfa_ioc_hw_sem_release(struct bfa_ioc_s *ioc);
295void bfa_ioc_fwver_get(struct bfa_ioc_s *ioc,
296 struct bfi_ioc_image_hdr_s *fwhdr);
297bfa_boolean_t bfa_ioc_fwver_cmp(struct bfa_ioc_s *ioc,
298 struct bfi_ioc_image_hdr_s *fwhdr);
248 299
249/* 300/*
250 * bfa mfg wwn API functions 301 * bfa mfg wwn API functions
diff --git a/drivers/scsi/bfa/bfa_ioc_cb.c b/drivers/scsi/bfa/bfa_ioc_cb.c
new file mode 100644
index 000000000000..3ce85319f739
--- /dev/null
+++ b/drivers/scsi/bfa/bfa_ioc_cb.c
@@ -0,0 +1,274 @@
1/*
2 * Copyright (c) 2005-2009 Brocade Communications Systems, Inc.
3 * All rights reserved
4 * www.brocade.com
5 *
6 * Linux driver for Brocade Fibre Channel Host Bus Adapter.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License (GPL) Version 2 as
10 * published by the Free Software Foundation
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 */
17
18#include <bfa.h>
19#include <bfa_ioc.h>
20#include <bfa_fwimg_priv.h>
21#include <cna/bfa_cna_trcmod.h>
22#include <cs/bfa_debug.h>
23#include <bfi/bfi_ioc.h>
24#include <bfi/bfi_cbreg.h>
25#include <log/bfa_log_hal.h>
26#include <defs/bfa_defs_pci.h>
27
28BFA_TRC_FILE(CNA, IOC_CB);
29
30/*
31 * forward declarations
32 */
33static bfa_status_t bfa_ioc_cb_pll_init(struct bfa_ioc_s *ioc);
34static bfa_boolean_t bfa_ioc_cb_firmware_lock(struct bfa_ioc_s *ioc);
35static void bfa_ioc_cb_firmware_unlock(struct bfa_ioc_s *ioc);
36static u32 *bfa_ioc_cb_fwimg_get_chunk(struct bfa_ioc_s *ioc, u32 off);
37static u32 bfa_ioc_cb_fwimg_get_size(struct bfa_ioc_s *ioc);
38static void bfa_ioc_cb_reg_init(struct bfa_ioc_s *ioc);
39static void bfa_ioc_cb_map_port(struct bfa_ioc_s *ioc);
40static void bfa_ioc_cb_isr_mode_set(struct bfa_ioc_s *ioc, bfa_boolean_t msix);
41static void bfa_ioc_cb_notify_hbfail(struct bfa_ioc_s *ioc);
42static void bfa_ioc_cb_ownership_reset(struct bfa_ioc_s *ioc);
43
44struct bfa_ioc_hwif_s hwif_cb = {
45 bfa_ioc_cb_pll_init,
46 bfa_ioc_cb_firmware_lock,
47 bfa_ioc_cb_firmware_unlock,
48 bfa_ioc_cb_fwimg_get_chunk,
49 bfa_ioc_cb_fwimg_get_size,
50 bfa_ioc_cb_reg_init,
51 bfa_ioc_cb_map_port,
52 bfa_ioc_cb_isr_mode_set,
53 bfa_ioc_cb_notify_hbfail,
54 bfa_ioc_cb_ownership_reset,
55};
56
57/**
58 * Called from bfa_ioc_attach() to map asic specific calls.
59 */
60void
61bfa_ioc_set_cb_hwif(struct bfa_ioc_s *ioc)
62{
63 ioc->ioc_hwif = &hwif_cb;
64}
65
66static u32 *
67bfa_ioc_cb_fwimg_get_chunk(struct bfa_ioc_s *ioc, u32 off)
68{
69 return bfi_image_cb_get_chunk(off);
70}
71
72static u32
73bfa_ioc_cb_fwimg_get_size(struct bfa_ioc_s *ioc)
74{
75 return bfi_image_cb_size;
76}
77
78/**
79 * Return true if firmware of current driver matches the running firmware.
80 */
81static bfa_boolean_t
82bfa_ioc_cb_firmware_lock(struct bfa_ioc_s *ioc)
83{
84 return BFA_TRUE;
85}
86
87static void
88bfa_ioc_cb_firmware_unlock(struct bfa_ioc_s *ioc)
89{
90}
91
92/**
93 * Notify other functions on HB failure.
94 */
95static void
96bfa_ioc_cb_notify_hbfail(struct bfa_ioc_s *ioc)
97{
98 bfa_reg_write(ioc->ioc_regs.err_set, __PSS_ERR_STATUS_SET);
99 bfa_reg_read(ioc->ioc_regs.err_set);
100}
101
102/**
103 * Host to LPU mailbox message addresses
104 */
105static struct { u32 hfn_mbox, lpu_mbox, hfn_pgn; } iocreg_fnreg[] = {
106 { HOSTFN0_LPU_MBOX0_0, LPU_HOSTFN0_MBOX0_0, HOST_PAGE_NUM_FN0 },
107 { HOSTFN1_LPU_MBOX0_8, LPU_HOSTFN1_MBOX0_8, HOST_PAGE_NUM_FN1 }
108};
109
110/**
111 * Host <-> LPU mailbox command/status registers
112 */
113static struct { u32 hfn, lpu; } iocreg_mbcmd[] = {
114 { HOSTFN0_LPU0_CMD_STAT, LPU0_HOSTFN0_CMD_STAT },
115 { HOSTFN1_LPU1_CMD_STAT, LPU1_HOSTFN1_CMD_STAT }
116};
117
118static void
119bfa_ioc_cb_reg_init(struct bfa_ioc_s *ioc)
120{
121 bfa_os_addr_t rb;
122 int pcifn = bfa_ioc_pcifn(ioc);
123
124 rb = bfa_ioc_bar0(ioc);
125
126 ioc->ioc_regs.hfn_mbox = rb + iocreg_fnreg[pcifn].hfn_mbox;
127 ioc->ioc_regs.lpu_mbox = rb + iocreg_fnreg[pcifn].lpu_mbox;
128 ioc->ioc_regs.host_page_num_fn = rb + iocreg_fnreg[pcifn].hfn_pgn;
129
130 if (ioc->port_id == 0) {
131 ioc->ioc_regs.heartbeat = rb + BFA_IOC0_HBEAT_REG;
132 ioc->ioc_regs.ioc_fwstate = rb + BFA_IOC0_STATE_REG;
133 } else {
134 ioc->ioc_regs.heartbeat = (rb + BFA_IOC1_HBEAT_REG);
135 ioc->ioc_regs.ioc_fwstate = (rb + BFA_IOC1_STATE_REG);
136 }
137
138 /**
139 * Host <-> LPU mailbox command/status registers
140 */
141 ioc->ioc_regs.hfn_mbox_cmd = rb + iocreg_mbcmd[pcifn].hfn;
142 ioc->ioc_regs.lpu_mbox_cmd = rb + iocreg_mbcmd[pcifn].lpu;
143
144 /*
145 * PSS control registers
146 */
147 ioc->ioc_regs.pss_ctl_reg = (rb + PSS_CTL_REG);
148 ioc->ioc_regs.pss_err_status_reg = (rb + PSS_ERR_STATUS_REG);
149 ioc->ioc_regs.app_pll_fast_ctl_reg = (rb + APP_PLL_400_CTL_REG);
150 ioc->ioc_regs.app_pll_slow_ctl_reg = (rb + APP_PLL_212_CTL_REG);
151
152 /*
153 * IOC semaphore registers and serialization
154 */
155 ioc->ioc_regs.ioc_sem_reg = (rb + HOST_SEM0_REG);
156 ioc->ioc_regs.ioc_init_sem_reg = (rb + HOST_SEM2_REG);
157
158 /**
159 * sram memory access
160 */
161 ioc->ioc_regs.smem_page_start = (rb + PSS_SMEM_PAGE_START);
162 ioc->ioc_regs.smem_pg0 = BFI_IOC_SMEM_PG0_CB;
163
164 /*
165 * err set reg : for notification of hb failure
166 */
167 ioc->ioc_regs.err_set = (rb + ERR_SET_REG);
168}
169
170/**
171 * Initialize IOC to port mapping.
172 */
173static void
174bfa_ioc_cb_map_port(struct bfa_ioc_s *ioc)
175{
176 /**
177 * For crossbow, port id is same as pci function.
178 */
179 ioc->port_id = bfa_ioc_pcifn(ioc);
180 bfa_trc(ioc, ioc->port_id);
181}
182
183/**
184 * Set interrupt mode for a function: INTX or MSIX
185 */
186static void
187bfa_ioc_cb_isr_mode_set(struct bfa_ioc_s *ioc, bfa_boolean_t msix)
188{
189}
190
191static bfa_status_t
192bfa_ioc_cb_pll_init(struct bfa_ioc_s *ioc)
193{
194 bfa_os_addr_t rb = ioc->pcidev.pci_bar_kva;
195 u32 pll_sclk, pll_fclk;
196
197 /*
198 * Hold semaphore so that nobody can access the chip during init.
199 */
200 bfa_ioc_sem_get(ioc->ioc_regs.ioc_init_sem_reg);
201
202 pll_sclk = __APP_PLL_212_ENABLE | __APP_PLL_212_LRESETN |
203 __APP_PLL_212_P0_1(3U) |
204 __APP_PLL_212_JITLMT0_1(3U) |
205 __APP_PLL_212_CNTLMT0_1(3U);
206 pll_fclk = __APP_PLL_400_ENABLE | __APP_PLL_400_LRESETN |
207 __APP_PLL_400_RSEL200500 | __APP_PLL_400_P0_1(3U) |
208 __APP_PLL_400_JITLMT0_1(3U) |
209 __APP_PLL_400_CNTLMT0_1(3U);
210
211 bfa_reg_write((rb + BFA_IOC0_STATE_REG), BFI_IOC_UNINIT);
212 bfa_reg_write((rb + BFA_IOC1_STATE_REG), BFI_IOC_UNINIT);
213
214 bfa_reg_write((rb + HOSTFN0_INT_MSK), 0xffffffffU);
215 bfa_reg_write((rb + HOSTFN1_INT_MSK), 0xffffffffU);
216 bfa_reg_write((rb + HOSTFN0_INT_STATUS), 0xffffffffU);
217 bfa_reg_write((rb + HOSTFN1_INT_STATUS), 0xffffffffU);
218 bfa_reg_write((rb + HOSTFN0_INT_MSK), 0xffffffffU);
219 bfa_reg_write((rb + HOSTFN1_INT_MSK), 0xffffffffU);
220
221 bfa_reg_write(ioc->ioc_regs.app_pll_slow_ctl_reg,
222 __APP_PLL_212_LOGIC_SOFT_RESET);
223 bfa_reg_write(ioc->ioc_regs.app_pll_slow_ctl_reg,
224 __APP_PLL_212_BYPASS |
225 __APP_PLL_212_LOGIC_SOFT_RESET);
226 bfa_reg_write(ioc->ioc_regs.app_pll_fast_ctl_reg,
227 __APP_PLL_400_LOGIC_SOFT_RESET);
228 bfa_reg_write(ioc->ioc_regs.app_pll_fast_ctl_reg,
229 __APP_PLL_400_BYPASS |
230 __APP_PLL_400_LOGIC_SOFT_RESET);
231 bfa_os_udelay(2);
232 bfa_reg_write(ioc->ioc_regs.app_pll_slow_ctl_reg,
233 __APP_PLL_212_LOGIC_SOFT_RESET);
234 bfa_reg_write(ioc->ioc_regs.app_pll_fast_ctl_reg,
235 __APP_PLL_400_LOGIC_SOFT_RESET);
236
237 bfa_reg_write(ioc->ioc_regs.app_pll_slow_ctl_reg,
238 pll_sclk | __APP_PLL_212_LOGIC_SOFT_RESET);
239 bfa_reg_write(ioc->ioc_regs.app_pll_fast_ctl_reg,
240 pll_fclk | __APP_PLL_400_LOGIC_SOFT_RESET);
241
242 /**
243 * Wait for PLLs to lock.
244 */
245 bfa_os_udelay(2000);
246 bfa_reg_write((rb + HOSTFN0_INT_STATUS), 0xffffffffU);
247 bfa_reg_write((rb + HOSTFN1_INT_STATUS), 0xffffffffU);
248
249 bfa_reg_write(ioc->ioc_regs.app_pll_slow_ctl_reg, pll_sclk);
250 bfa_reg_write(ioc->ioc_regs.app_pll_fast_ctl_reg, pll_fclk);
251
252 /*
253 * release semaphore.
254 */
255 bfa_ioc_sem_release(ioc->ioc_regs.ioc_init_sem_reg);
256
257 return BFA_STATUS_OK;
258}
259
260/**
261 * Cleanup hw semaphore and usecnt registers
262 */
263static void
264bfa_ioc_cb_ownership_reset(struct bfa_ioc_s *ioc)
265{
266
267 /*
268 * Read the hw sem reg to make sure that it is locked
269 * before we clear it. If it is not locked, writing 1
270 * will lock it instead of clearing it.
271 */
272 bfa_reg_read(ioc->ioc_regs.ioc_sem_reg);
273 bfa_ioc_hw_sem_release(ioc);
274}
diff --git a/drivers/scsi/bfa/bfa_ioc_ct.c b/drivers/scsi/bfa/bfa_ioc_ct.c
new file mode 100644
index 000000000000..20b58ad5f95c
--- /dev/null
+++ b/drivers/scsi/bfa/bfa_ioc_ct.c
@@ -0,0 +1,423 @@
1/*
2 * Copyright (c) 2005-2009 Brocade Communications Systems, Inc.
3 * All rights reserved
4 * www.brocade.com
5 *
6 * Linux driver for Brocade Fibre Channel Host Bus Adapter.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License (GPL) Version 2 as
10 * published by the Free Software Foundation
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 */
17
18#include <bfa.h>
19#include <bfa_ioc.h>
20#include <bfa_fwimg_priv.h>
21#include <cna/bfa_cna_trcmod.h>
22#include <cs/bfa_debug.h>
23#include <bfi/bfi_ioc.h>
24#include <bfi/bfi_ctreg.h>
25#include <log/bfa_log_hal.h>
26#include <defs/bfa_defs_pci.h>
27
28BFA_TRC_FILE(CNA, IOC_CT);
29
30/*
31 * forward declarations
32 */
33static bfa_status_t bfa_ioc_ct_pll_init(struct bfa_ioc_s *ioc);
34static bfa_boolean_t bfa_ioc_ct_firmware_lock(struct bfa_ioc_s *ioc);
35static void bfa_ioc_ct_firmware_unlock(struct bfa_ioc_s *ioc);
36static u32* bfa_ioc_ct_fwimg_get_chunk(struct bfa_ioc_s *ioc,
37 u32 off);
38static u32 bfa_ioc_ct_fwimg_get_size(struct bfa_ioc_s *ioc);
39static void bfa_ioc_ct_reg_init(struct bfa_ioc_s *ioc);
40static void bfa_ioc_ct_map_port(struct bfa_ioc_s *ioc);
41static void bfa_ioc_ct_isr_mode_set(struct bfa_ioc_s *ioc, bfa_boolean_t msix);
42static void bfa_ioc_ct_notify_hbfail(struct bfa_ioc_s *ioc);
43static void bfa_ioc_ct_ownership_reset(struct bfa_ioc_s *ioc);
44
45struct bfa_ioc_hwif_s hwif_ct = {
46 bfa_ioc_ct_pll_init,
47 bfa_ioc_ct_firmware_lock,
48 bfa_ioc_ct_firmware_unlock,
49 bfa_ioc_ct_fwimg_get_chunk,
50 bfa_ioc_ct_fwimg_get_size,
51 bfa_ioc_ct_reg_init,
52 bfa_ioc_ct_map_port,
53 bfa_ioc_ct_isr_mode_set,
54 bfa_ioc_ct_notify_hbfail,
55 bfa_ioc_ct_ownership_reset,
56};
57
58/**
59 * Called from bfa_ioc_attach() to map asic specific calls.
60 */
61void
62bfa_ioc_set_ct_hwif(struct bfa_ioc_s *ioc)
63{
64 ioc->ioc_hwif = &hwif_ct;
65}
66
67static u32*
68bfa_ioc_ct_fwimg_get_chunk(struct bfa_ioc_s *ioc, u32 off)
69{
70 return bfi_image_ct_get_chunk(off);
71}
72
73static u32
74bfa_ioc_ct_fwimg_get_size(struct bfa_ioc_s *ioc)
75{
76 return bfi_image_ct_size;
77}
78
79/**
80 * Return true if firmware of current driver matches the running firmware.
81 */
82static bfa_boolean_t
83bfa_ioc_ct_firmware_lock(struct bfa_ioc_s *ioc)
84{
85 enum bfi_ioc_state ioc_fwstate;
86 u32 usecnt;
87 struct bfi_ioc_image_hdr_s fwhdr;
88
89 /**
90 * Firmware match check is relevant only for CNA.
91 */
92 if (!ioc->cna)
93 return BFA_TRUE;
94
95 /**
96 * If bios boot (flash based) -- do not increment usage count
97 */
98 if (bfa_ioc_ct_fwimg_get_size(ioc) < BFA_IOC_FWIMG_MINSZ)
99 return BFA_TRUE;
100
101 bfa_ioc_sem_get(ioc->ioc_regs.ioc_usage_sem_reg);
102 usecnt = bfa_reg_read(ioc->ioc_regs.ioc_usage_reg);
103
104 /**
105 * If usage count is 0, always return TRUE.
106 */
107 if (usecnt == 0) {
108 bfa_reg_write(ioc->ioc_regs.ioc_usage_reg, 1);
109 bfa_ioc_sem_release(ioc->ioc_regs.ioc_usage_sem_reg);
110 bfa_trc(ioc, usecnt);
111 return BFA_TRUE;
112 }
113
114 ioc_fwstate = bfa_reg_read(ioc->ioc_regs.ioc_fwstate);
115 bfa_trc(ioc, ioc_fwstate);
116
117 /**
118 * Use count cannot be non-zero and chip in uninitialized state.
119 */
120 bfa_assert(ioc_fwstate != BFI_IOC_UNINIT);
121
122 /**
123 * Check if another driver with a different firmware is active
124 */
125 bfa_ioc_fwver_get(ioc, &fwhdr);
126 if (!bfa_ioc_fwver_cmp(ioc, &fwhdr)) {
127 bfa_ioc_sem_release(ioc->ioc_regs.ioc_usage_sem_reg);
128 bfa_trc(ioc, usecnt);
129 return BFA_FALSE;
130 }
131
132 /**
133 * Same firmware version. Increment the reference count.
134 */
135 usecnt++;
136 bfa_reg_write(ioc->ioc_regs.ioc_usage_reg, usecnt);
137 bfa_ioc_sem_release(ioc->ioc_regs.ioc_usage_sem_reg);
138 bfa_trc(ioc, usecnt);
139 return BFA_TRUE;
140}
141
142static void
143bfa_ioc_ct_firmware_unlock(struct bfa_ioc_s *ioc)
144{
145 u32 usecnt;
146
147 /**
148 * Firmware lock is relevant only for CNA.
149 * If bios boot (flash based) -- do not decrement usage count
150 */
151 if (!ioc->cna || bfa_ioc_ct_fwimg_get_size(ioc) < BFA_IOC_FWIMG_MINSZ)
152 return;
153
154 /**
155 * decrement usage count
156 */
157 bfa_ioc_sem_get(ioc->ioc_regs.ioc_usage_sem_reg);
158 usecnt = bfa_reg_read(ioc->ioc_regs.ioc_usage_reg);
159 bfa_assert(usecnt > 0);
160
161 usecnt--;
162 bfa_reg_write(ioc->ioc_regs.ioc_usage_reg, usecnt);
163 bfa_trc(ioc, usecnt);
164
165 bfa_ioc_sem_release(ioc->ioc_regs.ioc_usage_sem_reg);
166}
167
168/**
169 * Notify other functions on HB failure.
170 */
171static void
172bfa_ioc_ct_notify_hbfail(struct bfa_ioc_s *ioc)
173{
174 if (ioc->cna) {
175 bfa_reg_write(ioc->ioc_regs.ll_halt, __FW_INIT_HALT_P);
176 /* Wait for halt to take effect */
177 bfa_reg_read(ioc->ioc_regs.ll_halt);
178 } else {
179 bfa_reg_write(ioc->ioc_regs.err_set, __PSS_ERR_STATUS_SET);
180 bfa_reg_read(ioc->ioc_regs.err_set);
181 }
182}
183
184/**
185 * Host to LPU mailbox message addresses
186 */
187static struct { u32 hfn_mbox, lpu_mbox, hfn_pgn; } iocreg_fnreg[] = {
188 { HOSTFN0_LPU_MBOX0_0, LPU_HOSTFN0_MBOX0_0, HOST_PAGE_NUM_FN0 },
189 { HOSTFN1_LPU_MBOX0_8, LPU_HOSTFN1_MBOX0_8, HOST_PAGE_NUM_FN1 },
190 { HOSTFN2_LPU_MBOX0_0, LPU_HOSTFN2_MBOX0_0, HOST_PAGE_NUM_FN2 },
191 { HOSTFN3_LPU_MBOX0_8, LPU_HOSTFN3_MBOX0_8, HOST_PAGE_NUM_FN3 }
192};
193
194/**
195 * Host <-> LPU mailbox command/status registers - port 0
196 */
197static struct { u32 hfn, lpu; } iocreg_mbcmd_p0[] = {
198 { HOSTFN0_LPU0_MBOX0_CMD_STAT, LPU0_HOSTFN0_MBOX0_CMD_STAT },
199 { HOSTFN1_LPU0_MBOX0_CMD_STAT, LPU0_HOSTFN1_MBOX0_CMD_STAT },
200 { HOSTFN2_LPU0_MBOX0_CMD_STAT, LPU0_HOSTFN2_MBOX0_CMD_STAT },
201 { HOSTFN3_LPU0_MBOX0_CMD_STAT, LPU0_HOSTFN3_MBOX0_CMD_STAT }
202};
203
204/**
205 * Host <-> LPU mailbox command/status registers - port 1
206 */
207static struct { u32 hfn, lpu; } iocreg_mbcmd_p1[] = {
208 { HOSTFN0_LPU1_MBOX0_CMD_STAT, LPU1_HOSTFN0_MBOX0_CMD_STAT },
209 { HOSTFN1_LPU1_MBOX0_CMD_STAT, LPU1_HOSTFN1_MBOX0_CMD_STAT },
210 { HOSTFN2_LPU1_MBOX0_CMD_STAT, LPU1_HOSTFN2_MBOX0_CMD_STAT },
211 { HOSTFN3_LPU1_MBOX0_CMD_STAT, LPU1_HOSTFN3_MBOX0_CMD_STAT }
212};
213
214static void
215bfa_ioc_ct_reg_init(struct bfa_ioc_s *ioc)
216{
217 bfa_os_addr_t rb;
218 int pcifn = bfa_ioc_pcifn(ioc);
219
220 rb = bfa_ioc_bar0(ioc);
221
222 ioc->ioc_regs.hfn_mbox = rb + iocreg_fnreg[pcifn].hfn_mbox;
223 ioc->ioc_regs.lpu_mbox = rb + iocreg_fnreg[pcifn].lpu_mbox;
224 ioc->ioc_regs.host_page_num_fn = rb + iocreg_fnreg[pcifn].hfn_pgn;
225
226 if (ioc->port_id == 0) {
227 ioc->ioc_regs.heartbeat = rb + BFA_IOC0_HBEAT_REG;
228 ioc->ioc_regs.ioc_fwstate = rb + BFA_IOC0_STATE_REG;
229 ioc->ioc_regs.hfn_mbox_cmd = rb + iocreg_mbcmd_p0[pcifn].hfn;
230 ioc->ioc_regs.lpu_mbox_cmd = rb + iocreg_mbcmd_p0[pcifn].lpu;
231 ioc->ioc_regs.ll_halt = rb + FW_INIT_HALT_P0;
232 } else {
233 ioc->ioc_regs.heartbeat = (rb + BFA_IOC1_HBEAT_REG);
234 ioc->ioc_regs.ioc_fwstate = (rb + BFA_IOC1_STATE_REG);
235 ioc->ioc_regs.hfn_mbox_cmd = rb + iocreg_mbcmd_p1[pcifn].hfn;
236 ioc->ioc_regs.lpu_mbox_cmd = rb + iocreg_mbcmd_p1[pcifn].lpu;
237 ioc->ioc_regs.ll_halt = rb + FW_INIT_HALT_P1;
238 }
239
240 /*
241 * PSS control registers
242 */
243 ioc->ioc_regs.pss_ctl_reg = (rb + PSS_CTL_REG);
244 ioc->ioc_regs.pss_err_status_reg = (rb + PSS_ERR_STATUS_REG);
245 ioc->ioc_regs.app_pll_fast_ctl_reg = (rb + APP_PLL_425_CTL_REG);
246 ioc->ioc_regs.app_pll_slow_ctl_reg = (rb + APP_PLL_312_CTL_REG);
247
248 /*
249 * IOC semaphore registers and serialization
250 */
251 ioc->ioc_regs.ioc_sem_reg = (rb + HOST_SEM0_REG);
252 ioc->ioc_regs.ioc_usage_sem_reg = (rb + HOST_SEM1_REG);
253 ioc->ioc_regs.ioc_init_sem_reg = (rb + HOST_SEM2_REG);
254 ioc->ioc_regs.ioc_usage_reg = (rb + BFA_FW_USE_COUNT);
255
256 /**
257 * sram memory access
258 */
259 ioc->ioc_regs.smem_page_start = (rb + PSS_SMEM_PAGE_START);
260 ioc->ioc_regs.smem_pg0 = BFI_IOC_SMEM_PG0_CT;
261
262 /*
263 * err set reg : for notification of hb failure in fcmode
264 */
265 ioc->ioc_regs.err_set = (rb + ERR_SET_REG);
266}
267
268/**
269 * Initialize IOC to port mapping.
270 */
271
272#define FNC_PERS_FN_SHIFT(__fn) ((__fn) * 8)
273static void
274bfa_ioc_ct_map_port(struct bfa_ioc_s *ioc)
275{
276 bfa_os_addr_t rb = ioc->pcidev.pci_bar_kva;
277 u32 r32;
278
279 /**
280 * For catapult, base port id on personality register and IOC type
281 */
282 r32 = bfa_reg_read(rb + FNC_PERS_REG);
283 r32 >>= FNC_PERS_FN_SHIFT(bfa_ioc_pcifn(ioc));
284 ioc->port_id = (r32 & __F0_PORT_MAP_MK) >> __F0_PORT_MAP_SH;
285
286 bfa_trc(ioc, bfa_ioc_pcifn(ioc));
287 bfa_trc(ioc, ioc->port_id);
288}
289
290/**
291 * Set interrupt mode for a function: INTX or MSIX
292 */
293static void
294bfa_ioc_ct_isr_mode_set(struct bfa_ioc_s *ioc, bfa_boolean_t msix)
295{
296 bfa_os_addr_t rb = ioc->pcidev.pci_bar_kva;
297 u32 r32, mode;
298
299 r32 = bfa_reg_read(rb + FNC_PERS_REG);
300 bfa_trc(ioc, r32);
301
302 mode = (r32 >> FNC_PERS_FN_SHIFT(bfa_ioc_pcifn(ioc))) &
303 __F0_INTX_STATUS;
304
305 /**
306 * If already in desired mode, do not change anything
307 */
308 if (!msix && mode)
309 return;
310
311 if (msix)
312 mode = __F0_INTX_STATUS_MSIX;
313 else
314 mode = __F0_INTX_STATUS_INTA;
315
316 r32 &= ~(__F0_INTX_STATUS << FNC_PERS_FN_SHIFT(bfa_ioc_pcifn(ioc)));
317 r32 |= (mode << FNC_PERS_FN_SHIFT(bfa_ioc_pcifn(ioc)));
318 bfa_trc(ioc, r32);
319
320 bfa_reg_write(rb + FNC_PERS_REG, r32);
321}
322
323static bfa_status_t
324bfa_ioc_ct_pll_init(struct bfa_ioc_s *ioc)
325{
326 bfa_os_addr_t rb = ioc->pcidev.pci_bar_kva;
327 u32 pll_sclk, pll_fclk, r32;
328
329 /*
330 * Hold semaphore so that nobody can access the chip during init.
331 */
332 bfa_ioc_sem_get(ioc->ioc_regs.ioc_init_sem_reg);
333
334 pll_sclk = __APP_PLL_312_LRESETN | __APP_PLL_312_ENARST |
335 __APP_PLL_312_RSEL200500 | __APP_PLL_312_P0_1(3U) |
336 __APP_PLL_312_JITLMT0_1(3U) |
337 __APP_PLL_312_CNTLMT0_1(1U);
338 pll_fclk = __APP_PLL_425_LRESETN | __APP_PLL_425_ENARST |
339 __APP_PLL_425_RSEL200500 | __APP_PLL_425_P0_1(3U) |
340 __APP_PLL_425_JITLMT0_1(3U) |
341 __APP_PLL_425_CNTLMT0_1(1U);
342
343 /**
344 * For catapult, choose operational mode FC/FCoE
345 */
346 if (ioc->fcmode) {
347 bfa_reg_write((rb + OP_MODE), 0);
348 bfa_reg_write((rb + ETH_MAC_SER_REG),
349 __APP_EMS_CMLCKSEL |
350 __APP_EMS_REFCKBUFEN2 |
351 __APP_EMS_CHANNEL_SEL);
352 } else {
353 ioc->pllinit = BFA_TRUE;
354 bfa_reg_write((rb + OP_MODE), __GLOBAL_FCOE_MODE);
355 bfa_reg_write((rb + ETH_MAC_SER_REG),
356 __APP_EMS_REFCKBUFEN1);
357 }
358
359 bfa_reg_write((rb + BFA_IOC0_STATE_REG), BFI_IOC_UNINIT);
360 bfa_reg_write((rb + BFA_IOC1_STATE_REG), BFI_IOC_UNINIT);
361
362 bfa_reg_write((rb + HOSTFN0_INT_MSK), 0xffffffffU);
363 bfa_reg_write((rb + HOSTFN1_INT_MSK), 0xffffffffU);
364 bfa_reg_write((rb + HOSTFN0_INT_STATUS), 0xffffffffU);
365 bfa_reg_write((rb + HOSTFN1_INT_STATUS), 0xffffffffU);
366 bfa_reg_write((rb + HOSTFN0_INT_MSK), 0xffffffffU);
367 bfa_reg_write((rb + HOSTFN1_INT_MSK), 0xffffffffU);
368
369 bfa_reg_write(ioc->ioc_regs.app_pll_slow_ctl_reg, pll_sclk |
370 __APP_PLL_312_LOGIC_SOFT_RESET);
371 bfa_reg_write(ioc->ioc_regs.app_pll_fast_ctl_reg, pll_fclk |
372 __APP_PLL_425_LOGIC_SOFT_RESET);
373 bfa_reg_write(ioc->ioc_regs.app_pll_slow_ctl_reg, pll_sclk |
374 __APP_PLL_312_LOGIC_SOFT_RESET | __APP_PLL_312_ENABLE);
375 bfa_reg_write(ioc->ioc_regs.app_pll_fast_ctl_reg, pll_fclk |
376 __APP_PLL_425_LOGIC_SOFT_RESET | __APP_PLL_425_ENABLE);
377
378 /**
379 * Wait for PLLs to lock.
380 */
381 bfa_reg_read(rb + HOSTFN0_INT_MSK);
382 bfa_os_udelay(2000);
383 bfa_reg_write((rb + HOSTFN0_INT_STATUS), 0xffffffffU);
384 bfa_reg_write((rb + HOSTFN1_INT_STATUS), 0xffffffffU);
385
386 bfa_reg_write(ioc->ioc_regs.app_pll_slow_ctl_reg, pll_sclk |
387 __APP_PLL_312_ENABLE);
388 bfa_reg_write(ioc->ioc_regs.app_pll_fast_ctl_reg, pll_fclk |
389 __APP_PLL_425_ENABLE);
390
391 bfa_reg_write((rb + MBIST_CTL_REG), __EDRAM_BISTR_START);
392 bfa_os_udelay(1000);
393 r32 = bfa_reg_read((rb + MBIST_STAT_REG));
394 bfa_trc(ioc, r32);
395 /*
396 * release semaphore.
397 */
398 bfa_ioc_sem_release(ioc->ioc_regs.ioc_init_sem_reg);
399
400 return BFA_STATUS_OK;
401}
402
403/**
404 * Cleanup hw semaphore and usecnt registers
405 */
406static void
407bfa_ioc_ct_ownership_reset(struct bfa_ioc_s *ioc)
408{
409
410 if (ioc->cna) {
411 bfa_ioc_sem_get(ioc->ioc_regs.ioc_usage_sem_reg);
412 bfa_reg_write(ioc->ioc_regs.ioc_usage_reg, 0);
413 bfa_ioc_sem_release(ioc->ioc_regs.ioc_usage_sem_reg);
414 }
415
416 /*
417 * Read the hw sem reg to make sure that it is locked
418 * before we clear it. If it is not locked, writing 1
419 * will lock it instead of clearing it.
420 */
421 bfa_reg_read(ioc->ioc_regs.ioc_sem_reg);
422 bfa_ioc_hw_sem_release(ioc);
423}
diff --git a/drivers/scsi/bfa/bfa_iocfc.c b/drivers/scsi/bfa/bfa_iocfc.c
index d7ab792a9e54..a76de2669bfc 100644
--- a/drivers/scsi/bfa/bfa_iocfc.c
+++ b/drivers/scsi/bfa/bfa_iocfc.c
@@ -172,6 +172,7 @@ bfa_iocfc_init_mem(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
172 */ 172 */
173 if (bfa_ioc_devid(&bfa->ioc) == BFA_PCI_DEVICE_ID_CT) { 173 if (bfa_ioc_devid(&bfa->ioc) == BFA_PCI_DEVICE_ID_CT) {
174 iocfc->hwif.hw_reginit = bfa_hwct_reginit; 174 iocfc->hwif.hw_reginit = bfa_hwct_reginit;
175 iocfc->hwif.hw_reqq_ack = bfa_hwct_reqq_ack;
175 iocfc->hwif.hw_rspq_ack = bfa_hwct_rspq_ack; 176 iocfc->hwif.hw_rspq_ack = bfa_hwct_rspq_ack;
176 iocfc->hwif.hw_msix_init = bfa_hwct_msix_init; 177 iocfc->hwif.hw_msix_init = bfa_hwct_msix_init;
177 iocfc->hwif.hw_msix_install = bfa_hwct_msix_install; 178 iocfc->hwif.hw_msix_install = bfa_hwct_msix_install;
@@ -180,6 +181,7 @@ bfa_iocfc_init_mem(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
180 iocfc->hwif.hw_msix_getvecs = bfa_hwct_msix_getvecs; 181 iocfc->hwif.hw_msix_getvecs = bfa_hwct_msix_getvecs;
181 } else { 182 } else {
182 iocfc->hwif.hw_reginit = bfa_hwcb_reginit; 183 iocfc->hwif.hw_reginit = bfa_hwcb_reginit;
184 iocfc->hwif.hw_reqq_ack = bfa_hwcb_reqq_ack;
183 iocfc->hwif.hw_rspq_ack = bfa_hwcb_rspq_ack; 185 iocfc->hwif.hw_rspq_ack = bfa_hwcb_rspq_ack;
184 iocfc->hwif.hw_msix_init = bfa_hwcb_msix_init; 186 iocfc->hwif.hw_msix_init = bfa_hwcb_msix_init;
185 iocfc->hwif.hw_msix_install = bfa_hwcb_msix_install; 187 iocfc->hwif.hw_msix_install = bfa_hwcb_msix_install;
@@ -336,8 +338,10 @@ bfa_iocfc_init_cb(void *bfa_arg, bfa_boolean_t complete)
336 bfa_cb_init(bfa->bfad, BFA_STATUS_OK); 338 bfa_cb_init(bfa->bfad, BFA_STATUS_OK);
337 else 339 else
338 bfa_cb_init(bfa->bfad, BFA_STATUS_FAILED); 340 bfa_cb_init(bfa->bfad, BFA_STATUS_FAILED);
339 } else 341 } else {
340 bfa->iocfc.action = BFA_IOCFC_ACT_NONE; 342 if (bfa->iocfc.cfgdone)
343 bfa->iocfc.action = BFA_IOCFC_ACT_NONE;
344 }
341} 345}
342 346
343static void 347static void
@@ -619,8 +623,6 @@ bfa_iocfc_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
619 623
620 bfa_ioc_attach(&bfa->ioc, bfa, &bfa_iocfc_cbfn, &bfa->timer_mod, 624 bfa_ioc_attach(&bfa->ioc, bfa, &bfa_iocfc_cbfn, &bfa->timer_mod,
621 bfa->trcmod, bfa->aen, bfa->logm); 625 bfa->trcmod, bfa->aen, bfa->logm);
622 bfa_ioc_pci_init(&bfa->ioc, pcidev, BFI_MC_IOCFC);
623 bfa_ioc_mbox_register(&bfa->ioc, bfa_mbox_isrs);
624 626
625 /** 627 /**
626 * Choose FC (ssid: 0x1C) v/s FCoE (ssid: 0x14) mode. 628 * Choose FC (ssid: 0x1C) v/s FCoE (ssid: 0x14) mode.
@@ -628,6 +630,9 @@ bfa_iocfc_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
628 if (0) 630 if (0)
629 bfa_ioc_set_fcmode(&bfa->ioc); 631 bfa_ioc_set_fcmode(&bfa->ioc);
630 632
633 bfa_ioc_pci_init(&bfa->ioc, pcidev, BFI_MC_IOCFC);
634 bfa_ioc_mbox_register(&bfa->ioc, bfa_mbox_isrs);
635
631 bfa_iocfc_init_mem(bfa, bfad, cfg, pcidev); 636 bfa_iocfc_init_mem(bfa, bfad, cfg, pcidev);
632 bfa_iocfc_mem_claim(bfa, cfg, meminfo); 637 bfa_iocfc_mem_claim(bfa, cfg, meminfo);
633 bfa_timer_init(&bfa->timer_mod); 638 bfa_timer_init(&bfa->timer_mod);
@@ -654,7 +659,6 @@ bfa_iocfc_init(struct bfa_s *bfa)
654{ 659{
655 bfa->iocfc.action = BFA_IOCFC_ACT_INIT; 660 bfa->iocfc.action = BFA_IOCFC_ACT_INIT;
656 bfa_ioc_enable(&bfa->ioc); 661 bfa_ioc_enable(&bfa->ioc);
657 bfa_msix_install(bfa);
658} 662}
659 663
660/** 664/**
@@ -797,6 +801,11 @@ bfa_iocfc_get_stats(struct bfa_s *bfa, struct bfa_iocfc_stats_s *stats,
797 return BFA_STATUS_DEVBUSY; 801 return BFA_STATUS_DEVBUSY;
798 } 802 }
799 803
804 if (!bfa_iocfc_is_operational(bfa)) {
805 bfa_trc(bfa, 0);
806 return BFA_STATUS_IOC_NON_OP;
807 }
808
800 iocfc->stats_busy = BFA_TRUE; 809 iocfc->stats_busy = BFA_TRUE;
801 iocfc->stats_ret = stats; 810 iocfc->stats_ret = stats;
802 iocfc->stats_cbfn = cbfn; 811 iocfc->stats_cbfn = cbfn;
@@ -817,6 +826,11 @@ bfa_iocfc_clear_stats(struct bfa_s *bfa, bfa_cb_ioc_t cbfn, void *cbarg)
817 return BFA_STATUS_DEVBUSY; 826 return BFA_STATUS_DEVBUSY;
818 } 827 }
819 828
829 if (!bfa_iocfc_is_operational(bfa)) {
830 bfa_trc(bfa, 0);
831 return BFA_STATUS_IOC_NON_OP;
832 }
833
820 iocfc->stats_busy = BFA_TRUE; 834 iocfc->stats_busy = BFA_TRUE;
821 iocfc->stats_cbfn = cbfn; 835 iocfc->stats_cbfn = cbfn;
822 iocfc->stats_cbarg = cbarg; 836 iocfc->stats_cbarg = cbarg;
diff --git a/drivers/scsi/bfa/bfa_iocfc.h b/drivers/scsi/bfa/bfa_iocfc.h
index ce9a830a4207..fbb4bdc9d600 100644
--- a/drivers/scsi/bfa/bfa_iocfc.h
+++ b/drivers/scsi/bfa/bfa_iocfc.h
@@ -54,6 +54,7 @@ struct bfa_msix_s {
54 */ 54 */
55struct bfa_hwif_s { 55struct bfa_hwif_s {
56 void (*hw_reginit)(struct bfa_s *bfa); 56 void (*hw_reginit)(struct bfa_s *bfa);
57 void (*hw_reqq_ack)(struct bfa_s *bfa, int reqq);
57 void (*hw_rspq_ack)(struct bfa_s *bfa, int rspq); 58 void (*hw_rspq_ack)(struct bfa_s *bfa, int rspq);
58 void (*hw_msix_init)(struct bfa_s *bfa, int nvecs); 59 void (*hw_msix_init)(struct bfa_s *bfa, int nvecs);
59 void (*hw_msix_install)(struct bfa_s *bfa); 60 void (*hw_msix_install)(struct bfa_s *bfa);
@@ -143,6 +144,7 @@ void bfa_msix_rspq(struct bfa_s *bfa, int vec);
143void bfa_msix_lpu_err(struct bfa_s *bfa, int vec); 144void bfa_msix_lpu_err(struct bfa_s *bfa, int vec);
144 145
145void bfa_hwcb_reginit(struct bfa_s *bfa); 146void bfa_hwcb_reginit(struct bfa_s *bfa);
147void bfa_hwcb_reqq_ack(struct bfa_s *bfa, int rspq);
146void bfa_hwcb_rspq_ack(struct bfa_s *bfa, int rspq); 148void bfa_hwcb_rspq_ack(struct bfa_s *bfa, int rspq);
147void bfa_hwcb_msix_init(struct bfa_s *bfa, int nvecs); 149void bfa_hwcb_msix_init(struct bfa_s *bfa, int nvecs);
148void bfa_hwcb_msix_install(struct bfa_s *bfa); 150void bfa_hwcb_msix_install(struct bfa_s *bfa);
@@ -151,6 +153,7 @@ void bfa_hwcb_isr_mode_set(struct bfa_s *bfa, bfa_boolean_t msix);
151void bfa_hwcb_msix_getvecs(struct bfa_s *bfa, u32 *vecmap, 153void bfa_hwcb_msix_getvecs(struct bfa_s *bfa, u32 *vecmap,
152 u32 *nvecs, u32 *maxvec); 154 u32 *nvecs, u32 *maxvec);
153void bfa_hwct_reginit(struct bfa_s *bfa); 155void bfa_hwct_reginit(struct bfa_s *bfa);
156void bfa_hwct_reqq_ack(struct bfa_s *bfa, int rspq);
154void bfa_hwct_rspq_ack(struct bfa_s *bfa, int rspq); 157void bfa_hwct_rspq_ack(struct bfa_s *bfa, int rspq);
155void bfa_hwct_msix_init(struct bfa_s *bfa, int nvecs); 158void bfa_hwct_msix_init(struct bfa_s *bfa, int nvecs);
156void bfa_hwct_msix_install(struct bfa_s *bfa); 159void bfa_hwct_msix_install(struct bfa_s *bfa);
diff --git a/drivers/scsi/bfa/bfa_ioim.c b/drivers/scsi/bfa/bfa_ioim.c
index f81d359b7089..5b107abe46e5 100644
--- a/drivers/scsi/bfa/bfa_ioim.c
+++ b/drivers/scsi/bfa/bfa_ioim.c
@@ -149,7 +149,7 @@ bfa_ioim_sm_uninit(struct bfa_ioim_s *ioim, enum bfa_ioim_event event)
149 break; 149 break;
150 150
151 default: 151 default:
152 bfa_assert(0); 152 bfa_sm_fault(ioim->bfa, event);
153 } 153 }
154} 154}
155 155
@@ -194,7 +194,7 @@ bfa_ioim_sm_sgalloc(struct bfa_ioim_s *ioim, enum bfa_ioim_event event)
194 break; 194 break;
195 195
196 default: 196 default:
197 bfa_assert(0); 197 bfa_sm_fault(ioim->bfa, event);
198 } 198 }
199} 199}
200 200
@@ -259,7 +259,7 @@ bfa_ioim_sm_active(struct bfa_ioim_s *ioim, enum bfa_ioim_event event)
259 break; 259 break;
260 260
261 default: 261 default:
262 bfa_assert(0); 262 bfa_sm_fault(ioim->bfa, event);
263 } 263 }
264} 264}
265 265
@@ -317,7 +317,7 @@ bfa_ioim_sm_abort(struct bfa_ioim_s *ioim, enum bfa_ioim_event event)
317 break; 317 break;
318 318
319 default: 319 default:
320 bfa_assert(0); 320 bfa_sm_fault(ioim->bfa, event);
321 } 321 }
322} 322}
323 323
@@ -377,7 +377,7 @@ bfa_ioim_sm_cleanup(struct bfa_ioim_s *ioim, enum bfa_ioim_event event)
377 break; 377 break;
378 378
379 default: 379 default:
380 bfa_assert(0); 380 bfa_sm_fault(ioim->bfa, event);
381 } 381 }
382} 382}
383 383
@@ -419,7 +419,7 @@ bfa_ioim_sm_qfull(struct bfa_ioim_s *ioim, enum bfa_ioim_event event)
419 break; 419 break;
420 420
421 default: 421 default:
422 bfa_assert(0); 422 bfa_sm_fault(ioim->bfa, event);
423 } 423 }
424} 424}
425 425
@@ -467,7 +467,7 @@ bfa_ioim_sm_abort_qfull(struct bfa_ioim_s *ioim, enum bfa_ioim_event event)
467 break; 467 break;
468 468
469 default: 469 default:
470 bfa_assert(0); 470 bfa_sm_fault(ioim->bfa, event);
471 } 471 }
472} 472}
473 473
@@ -516,7 +516,7 @@ bfa_ioim_sm_cleanup_qfull(struct bfa_ioim_s *ioim, enum bfa_ioim_event event)
516 break; 516 break;
517 517
518 default: 518 default:
519 bfa_assert(0); 519 bfa_sm_fault(ioim->bfa, event);
520 } 520 }
521} 521}
522 522
@@ -544,7 +544,7 @@ bfa_ioim_sm_hcb(struct bfa_ioim_s *ioim, enum bfa_ioim_event event)
544 break; 544 break;
545 545
546 default: 546 default:
547 bfa_assert(0); 547 bfa_sm_fault(ioim->bfa, event);
548 } 548 }
549} 549}
550 550
@@ -577,7 +577,7 @@ bfa_ioim_sm_hcb_free(struct bfa_ioim_s *ioim, enum bfa_ioim_event event)
577 break; 577 break;
578 578
579 default: 579 default:
580 bfa_assert(0); 580 bfa_sm_fault(ioim->bfa, event);
581 } 581 }
582} 582}
583 583
@@ -605,7 +605,7 @@ bfa_ioim_sm_resfree(struct bfa_ioim_s *ioim, enum bfa_ioim_event event)
605 break; 605 break;
606 606
607 default: 607 default:
608 bfa_assert(0); 608 bfa_sm_fault(ioim->bfa, event);
609 } 609 }
610} 610}
611 611
diff --git a/drivers/scsi/bfa/bfa_itnim.c b/drivers/scsi/bfa/bfa_itnim.c
index eabf7d38bd09..a914ff255135 100644
--- a/drivers/scsi/bfa/bfa_itnim.c
+++ b/drivers/scsi/bfa/bfa_itnim.c
@@ -144,7 +144,7 @@ bfa_itnim_sm_uninit(struct bfa_itnim_s *itnim, enum bfa_itnim_event event)
144 break; 144 break;
145 145
146 default: 146 default:
147 bfa_assert(0); 147 bfa_sm_fault(itnim->bfa, event);
148 } 148 }
149} 149}
150 150
@@ -175,7 +175,7 @@ bfa_itnim_sm_created(struct bfa_itnim_s *itnim, enum bfa_itnim_event event)
175 break; 175 break;
176 176
177 default: 177 default:
178 bfa_assert(0); 178 bfa_sm_fault(itnim->bfa, event);
179 } 179 }
180} 180}
181 181
@@ -212,7 +212,7 @@ bfa_itnim_sm_fwcreate(struct bfa_itnim_s *itnim, enum bfa_itnim_event event)
212 break; 212 break;
213 213
214 default: 214 default:
215 bfa_assert(0); 215 bfa_sm_fault(itnim->bfa, event);
216 } 216 }
217} 217}
218 218
@@ -247,7 +247,7 @@ bfa_itnim_sm_fwcreate_qfull(struct bfa_itnim_s *itnim,
247 break; 247 break;
248 248
249 default: 249 default:
250 bfa_assert(0); 250 bfa_sm_fault(itnim->bfa, event);
251 } 251 }
252} 252}
253 253
@@ -275,7 +275,7 @@ bfa_itnim_sm_delete_pending(struct bfa_itnim_s *itnim,
275 break; 275 break;
276 276
277 default: 277 default:
278 bfa_assert(0); 278 bfa_sm_fault(itnim->bfa, event);
279 } 279 }
280} 280}
281 281
@@ -317,7 +317,7 @@ bfa_itnim_sm_online(struct bfa_itnim_s *itnim, enum bfa_itnim_event event)
317 break; 317 break;
318 318
319 default: 319 default:
320 bfa_assert(0); 320 bfa_sm_fault(itnim->bfa, event);
321 } 321 }
322} 322}
323 323
@@ -348,7 +348,7 @@ bfa_itnim_sm_sler(struct bfa_itnim_s *itnim, enum bfa_itnim_event event)
348 break; 348 break;
349 349
350 default: 350 default:
351 bfa_assert(0); 351 bfa_sm_fault(itnim->bfa, event);
352 } 352 }
353} 353}
354 354
@@ -385,7 +385,7 @@ bfa_itnim_sm_cleanup_offline(struct bfa_itnim_s *itnim,
385 break; 385 break;
386 386
387 default: 387 default:
388 bfa_assert(0); 388 bfa_sm_fault(itnim->bfa, event);
389 } 389 }
390} 390}
391 391
@@ -413,7 +413,7 @@ bfa_itnim_sm_cleanup_delete(struct bfa_itnim_s *itnim,
413 break; 413 break;
414 414
415 default: 415 default:
416 bfa_assert(0); 416 bfa_sm_fault(itnim->bfa, event);
417 } 417 }
418} 418}
419 419
@@ -442,7 +442,7 @@ bfa_itnim_sm_fwdelete(struct bfa_itnim_s *itnim, enum bfa_itnim_event event)
442 break; 442 break;
443 443
444 default: 444 default:
445 bfa_assert(0); 445 bfa_sm_fault(itnim->bfa, event);
446 } 446 }
447} 447}
448 448
@@ -470,7 +470,7 @@ bfa_itnim_sm_fwdelete_qfull(struct bfa_itnim_s *itnim,
470 break; 470 break;
471 471
472 default: 472 default:
473 bfa_assert(0); 473 bfa_sm_fault(itnim->bfa, event);
474 } 474 }
475} 475}
476 476
@@ -502,7 +502,7 @@ bfa_itnim_sm_offline(struct bfa_itnim_s *itnim, enum bfa_itnim_event event)
502 break; 502 break;
503 503
504 default: 504 default:
505 bfa_assert(0); 505 bfa_sm_fault(itnim->bfa, event);
506 } 506 }
507} 507}
508 508
@@ -538,7 +538,7 @@ bfa_itnim_sm_iocdisable(struct bfa_itnim_s *itnim,
538 break; 538 break;
539 539
540 default: 540 default:
541 bfa_assert(0); 541 bfa_sm_fault(itnim->bfa, event);
542 } 542 }
543} 543}
544 544
@@ -559,7 +559,7 @@ bfa_itnim_sm_deleting(struct bfa_itnim_s *itnim, enum bfa_itnim_event event)
559 break; 559 break;
560 560
561 default: 561 default:
562 bfa_assert(0); 562 bfa_sm_fault(itnim->bfa, event);
563 } 563 }
564} 564}
565 565
@@ -583,7 +583,7 @@ bfa_itnim_sm_deleting_qfull(struct bfa_itnim_s *itnim,
583 break; 583 break;
584 584
585 default: 585 default:
586 bfa_assert(0); 586 bfa_sm_fault(itnim->bfa, event);
587 } 587 }
588} 588}
589 589
diff --git a/drivers/scsi/bfa/bfa_lps.c b/drivers/scsi/bfa/bfa_lps.c
index 9844b45412b6..ad06f6189092 100644
--- a/drivers/scsi/bfa/bfa_lps.c
+++ b/drivers/scsi/bfa/bfa_lps.c
@@ -18,6 +18,7 @@
18#include <bfa.h> 18#include <bfa.h>
19#include <bfi/bfi_lps.h> 19#include <bfi/bfi_lps.h>
20#include <cs/bfa_debug.h> 20#include <cs/bfa_debug.h>
21#include <defs/bfa_defs_pci.h>
21 22
22BFA_TRC_FILE(HAL, LPS); 23BFA_TRC_FILE(HAL, LPS);
23BFA_MODULE(lps); 24BFA_MODULE(lps);
@@ -25,6 +26,12 @@ BFA_MODULE(lps);
25#define BFA_LPS_MIN_LPORTS (1) 26#define BFA_LPS_MIN_LPORTS (1)
26#define BFA_LPS_MAX_LPORTS (256) 27#define BFA_LPS_MAX_LPORTS (256)
27 28
29/*
30 * Maximum Vports supported per physical port or vf.
31 */
32#define BFA_LPS_MAX_VPORTS_SUPP_CB 255
33#define BFA_LPS_MAX_VPORTS_SUPP_CT 190
34
28/** 35/**
29 * forward declarations 36 * forward declarations
30 */ 37 */
@@ -49,7 +56,7 @@ static void bfa_lps_send_login(struct bfa_lps_s *lps);
49static void bfa_lps_send_logout(struct bfa_lps_s *lps); 56static void bfa_lps_send_logout(struct bfa_lps_s *lps);
50static void bfa_lps_login_comp(struct bfa_lps_s *lps); 57static void bfa_lps_login_comp(struct bfa_lps_s *lps);
51static void bfa_lps_logout_comp(struct bfa_lps_s *lps); 58static void bfa_lps_logout_comp(struct bfa_lps_s *lps);
52 59static void bfa_lps_cvl_event(struct bfa_lps_s *lps);
53 60
54/** 61/**
55 * lps_pvt BFA LPS private functions 62 * lps_pvt BFA LPS private functions
@@ -62,6 +69,7 @@ enum bfa_lps_event {
62 BFA_LPS_SM_RESUME = 4, /* space present in reqq queue */ 69 BFA_LPS_SM_RESUME = 4, /* space present in reqq queue */
63 BFA_LPS_SM_DELETE = 5, /* lps delete from user */ 70 BFA_LPS_SM_DELETE = 5, /* lps delete from user */
64 BFA_LPS_SM_OFFLINE = 6, /* Link is offline */ 71 BFA_LPS_SM_OFFLINE = 6, /* Link is offline */
72 BFA_LPS_SM_RX_CVL = 7, /* Rx clear virtual link */
65}; 73};
66 74
67static void bfa_lps_sm_init(struct bfa_lps_s *lps, enum bfa_lps_event event); 75static void bfa_lps_sm_init(struct bfa_lps_s *lps, enum bfa_lps_event event);
@@ -91,6 +99,12 @@ bfa_lps_sm_init(struct bfa_lps_s *lps, enum bfa_lps_event event)
91 bfa_sm_set_state(lps, bfa_lps_sm_login); 99 bfa_sm_set_state(lps, bfa_lps_sm_login);
92 bfa_lps_send_login(lps); 100 bfa_lps_send_login(lps);
93 } 101 }
102 if (lps->fdisc)
103 bfa_plog_str(lps->bfa->plog, BFA_PL_MID_LPS,
104 BFA_PL_EID_LOGIN, 0, "FDISC Request");
105 else
106 bfa_plog_str(lps->bfa->plog, BFA_PL_MID_LPS,
107 BFA_PL_EID_LOGIN, 0, "FLOGI Request");
94 break; 108 break;
95 109
96 case BFA_LPS_SM_LOGOUT: 110 case BFA_LPS_SM_LOGOUT:
@@ -101,6 +115,7 @@ bfa_lps_sm_init(struct bfa_lps_s *lps, enum bfa_lps_event event)
101 bfa_lps_free(lps); 115 bfa_lps_free(lps);
102 break; 116 break;
103 117
118 case BFA_LPS_SM_RX_CVL:
104 case BFA_LPS_SM_OFFLINE: 119 case BFA_LPS_SM_OFFLINE:
105 break; 120 break;
106 121
@@ -112,7 +127,7 @@ bfa_lps_sm_init(struct bfa_lps_s *lps, enum bfa_lps_event event)
112 break; 127 break;
113 128
114 default: 129 default:
115 bfa_assert(0); 130 bfa_sm_fault(lps->bfa, event);
116 } 131 }
117} 132}
118 133
@@ -127,10 +142,25 @@ bfa_lps_sm_login(struct bfa_lps_s *lps, enum bfa_lps_event event)
127 142
128 switch (event) { 143 switch (event) {
129 case BFA_LPS_SM_FWRSP: 144 case BFA_LPS_SM_FWRSP:
130 if (lps->status == BFA_STATUS_OK) 145 if (lps->status == BFA_STATUS_OK) {
131 bfa_sm_set_state(lps, bfa_lps_sm_online); 146 bfa_sm_set_state(lps, bfa_lps_sm_online);
132 else 147 if (lps->fdisc)
148 bfa_plog_str(lps->bfa->plog, BFA_PL_MID_LPS,
149 BFA_PL_EID_LOGIN, 0, "FDISC Accept");
150 else
151 bfa_plog_str(lps->bfa->plog, BFA_PL_MID_LPS,
152 BFA_PL_EID_LOGIN, 0, "FLOGI Accept");
153 } else {
133 bfa_sm_set_state(lps, bfa_lps_sm_init); 154 bfa_sm_set_state(lps, bfa_lps_sm_init);
155 if (lps->fdisc)
156 bfa_plog_str(lps->bfa->plog, BFA_PL_MID_LPS,
157 BFA_PL_EID_LOGIN, 0,
158 "FDISC Fail (RJT or timeout)");
159 else
160 bfa_plog_str(lps->bfa->plog, BFA_PL_MID_LPS,
161 BFA_PL_EID_LOGIN, 0,
162 "FLOGI Fail (RJT or timeout)");
163 }
134 bfa_lps_login_comp(lps); 164 bfa_lps_login_comp(lps);
135 break; 165 break;
136 166
@@ -139,7 +169,7 @@ bfa_lps_sm_login(struct bfa_lps_s *lps, enum bfa_lps_event event)
139 break; 169 break;
140 170
141 default: 171 default:
142 bfa_assert(0); 172 bfa_sm_fault(lps->bfa, event);
143 } 173 }
144} 174}
145 175
@@ -162,8 +192,16 @@ bfa_lps_sm_loginwait(struct bfa_lps_s *lps, enum bfa_lps_event event)
162 bfa_reqq_wcancel(&lps->wqe); 192 bfa_reqq_wcancel(&lps->wqe);
163 break; 193 break;
164 194
195 case BFA_LPS_SM_RX_CVL:
196 /*
197 * Login was not even sent out; so when getting out
198 * of this state, it will appear like a login retry
199 * after Clear virtual link
200 */
201 break;
202
165 default: 203 default:
166 bfa_assert(0); 204 bfa_sm_fault(lps->bfa, event);
167 } 205 }
168} 206}
169 207
@@ -185,6 +223,17 @@ bfa_lps_sm_online(struct bfa_lps_s *lps, enum bfa_lps_event event)
185 bfa_sm_set_state(lps, bfa_lps_sm_logout); 223 bfa_sm_set_state(lps, bfa_lps_sm_logout);
186 bfa_lps_send_logout(lps); 224 bfa_lps_send_logout(lps);
187 } 225 }
226 bfa_plog_str(lps->bfa->plog, BFA_PL_MID_LPS,
227 BFA_PL_EID_LOGO, 0, "Logout");
228 break;
229
230 case BFA_LPS_SM_RX_CVL:
231 bfa_sm_set_state(lps, bfa_lps_sm_init);
232
233 /* Let the vport module know about this event */
234 bfa_lps_cvl_event(lps);
235 bfa_plog_str(lps->bfa->plog, BFA_PL_MID_LPS,
236 BFA_PL_EID_FIP_FCF_CVL, 0, "FCF Clear Virt. Link Rx");
188 break; 237 break;
189 238
190 case BFA_LPS_SM_OFFLINE: 239 case BFA_LPS_SM_OFFLINE:
@@ -193,7 +242,7 @@ bfa_lps_sm_online(struct bfa_lps_s *lps, enum bfa_lps_event event)
193 break; 242 break;
194 243
195 default: 244 default:
196 bfa_assert(0); 245 bfa_sm_fault(lps->bfa, event);
197 } 246 }
198} 247}
199 248
@@ -217,7 +266,7 @@ bfa_lps_sm_logout(struct bfa_lps_s *lps, enum bfa_lps_event event)
217 break; 266 break;
218 267
219 default: 268 default:
220 bfa_assert(0); 269 bfa_sm_fault(lps->bfa, event);
221 } 270 }
222} 271}
223 272
@@ -242,7 +291,7 @@ bfa_lps_sm_logowait(struct bfa_lps_s *lps, enum bfa_lps_event event)
242 break; 291 break;
243 292
244 default: 293 default:
245 bfa_assert(0); 294 bfa_sm_fault(lps->bfa, event);
246 } 295 }
247} 296}
248 297
@@ -396,6 +445,20 @@ bfa_lps_logout_rsp(struct bfa_s *bfa, struct bfi_lps_logout_rsp_s *rsp)
396} 445}
397 446
398/** 447/**
448 * Firmware received a Clear virtual link request (for FCoE)
449 */
450static void
451bfa_lps_rx_cvl_event(struct bfa_s *bfa, struct bfi_lps_cvl_event_s *cvl)
452{
453 struct bfa_lps_mod_s *mod = BFA_LPS_MOD(bfa);
454 struct bfa_lps_s *lps;
455
456 lps = BFA_LPS_FROM_TAG(mod, cvl->lp_tag);
457
458 bfa_sm_send_event(lps, BFA_LPS_SM_RX_CVL);
459}
460
461/**
399 * Space is available in request queue, resume queueing request to firmware. 462 * Space is available in request queue, resume queueing request to firmware.
400 */ 463 */
401static void 464static void
@@ -531,7 +594,48 @@ bfa_lps_logout_comp(struct bfa_lps_s *lps)
531 bfa_cb_lps_flogo_comp(lps->bfa->bfad, lps->uarg); 594 bfa_cb_lps_flogo_comp(lps->bfa->bfad, lps->uarg);
532} 595}
533 596
597/**
598 * Clear virtual link completion handler for non-fcs
599 */
600static void
601bfa_lps_cvl_event_cb(void *arg, bfa_boolean_t complete)
602{
603 struct bfa_lps_s *lps = arg;
604
605 if (!complete)
606 return;
607
608 /* Clear virtual link to base port will result in link down */
609 if (lps->fdisc)
610 bfa_cb_lps_cvl_event(lps->bfa->bfad, lps->uarg);
611}
612
613/**
614 * Received Clear virtual link event --direct call for fcs,
615 * queue for others
616 */
617static void
618bfa_lps_cvl_event(struct bfa_lps_s *lps)
619{
620 if (!lps->bfa->fcs) {
621 bfa_cb_queue(lps->bfa, &lps->hcb_qe, bfa_lps_cvl_event_cb,
622 lps);
623 return;
624 }
625
626 /* Clear virtual link to base port will result in link down */
627 if (lps->fdisc)
628 bfa_cb_lps_cvl_event(lps->bfa->bfad, lps->uarg);
629}
534 630
631u32
632bfa_lps_get_max_vport(struct bfa_s *bfa)
633{
634 if (bfa_ioc_devid(&bfa->ioc) == BFA_PCI_DEVICE_ID_CT)
635 return BFA_LPS_MAX_VPORTS_SUPP_CT;
636 else
637 return BFA_LPS_MAX_VPORTS_SUPP_CB;
638}
535 639
536/** 640/**
537 * lps_public BFA LPS public functions 641 * lps_public BFA LPS public functions
@@ -752,6 +856,14 @@ bfa_lps_get_lsrjt_expl(struct bfa_lps_s *lps)
752 return lps->lsrjt_expl; 856 return lps->lsrjt_expl;
753} 857}
754 858
859/**
860 * Return fpma/spma MAC for lport
861 */
862struct mac_s
863bfa_lps_get_lp_mac(struct bfa_lps_s *lps)
864{
865 return lps->lp_mac;
866}
755 867
756/** 868/**
757 * LPS firmware message class handler. 869 * LPS firmware message class handler.
@@ -773,6 +885,10 @@ bfa_lps_isr(struct bfa_s *bfa, struct bfi_msg_s *m)
773 bfa_lps_logout_rsp(bfa, msg.logout_rsp); 885 bfa_lps_logout_rsp(bfa, msg.logout_rsp);
774 break; 886 break;
775 887
888 case BFI_LPS_H2I_CVL_EVENT:
889 bfa_lps_rx_cvl_event(bfa, msg.cvl_event);
890 break;
891
776 default: 892 default:
777 bfa_trc(bfa, m->mhdr.msg_id); 893 bfa_trc(bfa, m->mhdr.msg_id);
778 bfa_assert(0); 894 bfa_assert(0);
diff --git a/drivers/scsi/bfa/bfa_module.c b/drivers/scsi/bfa/bfa_module.c
index 32eda8e1ec65..a7fcc80c177e 100644
--- a/drivers/scsi/bfa/bfa_module.c
+++ b/drivers/scsi/bfa/bfa_module.c
@@ -24,7 +24,7 @@
24 */ 24 */
25struct bfa_module_s *hal_mods[] = { 25struct bfa_module_s *hal_mods[] = {
26 &hal_mod_sgpg, 26 &hal_mod_sgpg,
27 &hal_mod_pport, 27 &hal_mod_fcport,
28 &hal_mod_fcxp, 28 &hal_mod_fcxp,
29 &hal_mod_lps, 29 &hal_mod_lps,
30 &hal_mod_uf, 30 &hal_mod_uf,
@@ -45,7 +45,7 @@ bfa_isr_func_t bfa_isrs[BFI_MC_MAX] = {
45 bfa_isr_unhandled, /* BFI_MC_DIAG */ 45 bfa_isr_unhandled, /* BFI_MC_DIAG */
46 bfa_isr_unhandled, /* BFI_MC_FLASH */ 46 bfa_isr_unhandled, /* BFI_MC_FLASH */
47 bfa_isr_unhandled, /* BFI_MC_CEE */ 47 bfa_isr_unhandled, /* BFI_MC_CEE */
48 bfa_pport_isr, /* BFI_MC_PORT */ 48 bfa_fcport_isr, /* BFI_MC_FCPORT */
49 bfa_isr_unhandled, /* BFI_MC_IOCFC */ 49 bfa_isr_unhandled, /* BFI_MC_IOCFC */
50 bfa_isr_unhandled, /* BFI_MC_LL */ 50 bfa_isr_unhandled, /* BFI_MC_LL */
51 bfa_uf_isr, /* BFI_MC_UF */ 51 bfa_uf_isr, /* BFI_MC_UF */
diff --git a/drivers/scsi/bfa/bfa_modules_priv.h b/drivers/scsi/bfa/bfa_modules_priv.h
index 96f70534593c..f554c2fad6a9 100644
--- a/drivers/scsi/bfa/bfa_modules_priv.h
+++ b/drivers/scsi/bfa/bfa_modules_priv.h
@@ -29,7 +29,7 @@
29 29
30 30
31struct bfa_modules_s { 31struct bfa_modules_s {
32 struct bfa_pport_s pport; /* physical port module */ 32 struct bfa_fcport_s fcport; /* fc port module */
33 struct bfa_fcxp_mod_s fcxp_mod; /* fcxp module */ 33 struct bfa_fcxp_mod_s fcxp_mod; /* fcxp module */
34 struct bfa_lps_mod_s lps_mod; /* fcxp module */ 34 struct bfa_lps_mod_s lps_mod; /* fcxp module */
35 struct bfa_uf_mod_s uf_mod; /* unsolicited frame module */ 35 struct bfa_uf_mod_s uf_mod; /* unsolicited frame module */
diff --git a/drivers/scsi/bfa/bfa_port_priv.h b/drivers/scsi/bfa/bfa_port_priv.h
index 51f698a06b6d..40e256ec67ff 100644
--- a/drivers/scsi/bfa/bfa_port_priv.h
+++ b/drivers/scsi/bfa/bfa_port_priv.h
@@ -23,9 +23,19 @@
23#include "bfa_intr_priv.h" 23#include "bfa_intr_priv.h"
24 24
25/** 25/**
26 * BFA physical port data structure 26 * Link notification data structure
27 */ 27 */
28struct bfa_pport_s { 28struct bfa_fcport_ln_s {
29 struct bfa_fcport_s *fcport;
30 bfa_sm_t sm;
31 struct bfa_cb_qe_s ln_qe; /* BFA callback queue elem for ln */
32 enum bfa_pport_linkstate ln_event; /* ln event for callback */
33};
34
35/**
36 * BFA FC port data structure
37 */
38struct bfa_fcport_s {
29 struct bfa_s *bfa; /* parent BFA instance */ 39 struct bfa_s *bfa; /* parent BFA instance */
30 bfa_sm_t sm; /* port state machine */ 40 bfa_sm_t sm; /* port state machine */
31 wwn_t nwwn; /* node wwn of physical port */ 41 wwn_t nwwn; /* node wwn of physical port */
@@ -36,6 +46,8 @@ struct bfa_pport_s {
36 enum bfa_pport_topology topology; /* current topology */ 46 enum bfa_pport_topology topology; /* current topology */
37 u8 myalpa; /* my ALPA in LOOP topology */ 47 u8 myalpa; /* my ALPA in LOOP topology */
38 u8 rsvd[3]; 48 u8 rsvd[3];
49 u32 mypid:24;
50 u32 rsvd_b:8;
39 struct bfa_pport_cfg_s cfg; /* current port configuration */ 51 struct bfa_pport_cfg_s cfg; /* current port configuration */
40 struct bfa_qos_attr_s qos_attr; /* QoS Attributes */ 52 struct bfa_qos_attr_s qos_attr; /* QoS Attributes */
41 struct bfa_qos_vc_attr_s qos_vc_attr; /* VC info from ELP */ 53 struct bfa_qos_vc_attr_s qos_vc_attr; /* VC info from ELP */
@@ -49,42 +61,31 @@ struct bfa_pport_s {
49 void (*event_cbfn) (void *cbarg, 61 void (*event_cbfn) (void *cbarg,
50 bfa_pport_event_t event); 62 bfa_pport_event_t event);
51 union { 63 union {
52 union bfi_pport_i2h_msg_u i2hmsg; 64 union bfi_fcport_i2h_msg_u i2hmsg;
53 } event_arg; 65 } event_arg;
54 void *bfad; /* BFA driver handle */ 66 void *bfad; /* BFA driver handle */
67 struct bfa_fcport_ln_s ln; /* Link Notification */
55 struct bfa_cb_qe_s hcb_qe; /* BFA callback queue elem */ 68 struct bfa_cb_qe_s hcb_qe; /* BFA callback queue elem */
56 enum bfa_pport_linkstate hcb_event; 69 struct bfa_timer_s timer; /* timer */
57 /* link event for callback */
58 u32 msgtag; /* fimrware msg tag for reply */ 70 u32 msgtag; /* fimrware msg tag for reply */
59 u8 *stats_kva; 71 u8 *stats_kva;
60 u64 stats_pa; 72 u64 stats_pa;
61 union bfa_pport_stats_u *stats; /* pport stats */ 73 union bfa_fcport_stats_u *stats;
62 u32 mypid:24; 74 union bfa_fcport_stats_u *stats_ret; /* driver stats location */
63 u32 rsvd_b:8; 75 bfa_status_t stats_status; /* stats/statsclr status */
64 struct bfa_timer_s timer; /* timer */ 76 bfa_boolean_t stats_busy; /* outstanding stats/statsclr */
65 union bfa_pport_stats_u *stats_ret; 77 bfa_boolean_t stats_qfull;
66 /* driver stats location */ 78 bfa_cb_pport_t stats_cbfn; /* driver callback function */
67 bfa_status_t stats_status; 79 void *stats_cbarg; /* *!< user callback arg */
68 /* stats/statsclr status */ 80 bfa_boolean_t diag_busy; /* diag busy status */
69 bfa_boolean_t stats_busy; 81 bfa_boolean_t beacon; /* port beacon status */
70 /* outstanding stats/statsclr */ 82 bfa_boolean_t link_e2e_beacon; /* link beacon status */
71 bfa_boolean_t stats_qfull;
72 bfa_boolean_t diag_busy;
73 /* diag busy status */
74 bfa_boolean_t beacon;
75 /* port beacon status */
76 bfa_boolean_t link_e2e_beacon;
77 /* link beacon status */
78 bfa_cb_pport_t stats_cbfn;
79 /* driver callback function */
80 void *stats_cbarg;
81 /* *!< user callback arg */
82}; 83};
83 84
84#define BFA_PORT_MOD(__bfa) (&(__bfa)->modules.pport) 85#define BFA_FCPORT_MOD(__bfa) (&(__bfa)->modules.fcport)
85 86
86/* 87/*
87 * public functions 88 * public functions
88 */ 89 */
89void bfa_pport_isr(struct bfa_s *bfa, struct bfi_msg_s *msg); 90void bfa_fcport_isr(struct bfa_s *bfa, struct bfi_msg_s *msg);
90#endif /* __BFA_PORT_PRIV_H__ */ 91#endif /* __BFA_PORT_PRIV_H__ */
diff --git a/drivers/scsi/bfa/bfa_priv.h b/drivers/scsi/bfa/bfa_priv.h
index 0747a6b26f7b..be80fc7e1b0e 100644
--- a/drivers/scsi/bfa/bfa_priv.h
+++ b/drivers/scsi/bfa/bfa_priv.h
@@ -101,7 +101,7 @@ extern bfa_boolean_t bfa_auto_recover;
101extern struct bfa_module_s hal_mod_flash; 101extern struct bfa_module_s hal_mod_flash;
102extern struct bfa_module_s hal_mod_fcdiag; 102extern struct bfa_module_s hal_mod_fcdiag;
103extern struct bfa_module_s hal_mod_sgpg; 103extern struct bfa_module_s hal_mod_sgpg;
104extern struct bfa_module_s hal_mod_pport; 104extern struct bfa_module_s hal_mod_fcport;
105extern struct bfa_module_s hal_mod_fcxp; 105extern struct bfa_module_s hal_mod_fcxp;
106extern struct bfa_module_s hal_mod_lps; 106extern struct bfa_module_s hal_mod_lps;
107extern struct bfa_module_s hal_mod_uf; 107extern struct bfa_module_s hal_mod_uf;
diff --git a/drivers/scsi/bfa/bfa_rport.c b/drivers/scsi/bfa/bfa_rport.c
index 3e1990a74258..7c509fa244e4 100644
--- a/drivers/scsi/bfa/bfa_rport.c
+++ b/drivers/scsi/bfa/bfa_rport.c
@@ -114,7 +114,7 @@ bfa_rport_sm_uninit(struct bfa_rport_s *rp, enum bfa_rport_event event)
114 114
115 default: 115 default:
116 bfa_stats(rp, sm_un_unexp); 116 bfa_stats(rp, sm_un_unexp);
117 bfa_assert(0); 117 bfa_sm_fault(rp->bfa, event);
118 } 118 }
119} 119}
120 120
@@ -146,7 +146,7 @@ bfa_rport_sm_created(struct bfa_rport_s *rp, enum bfa_rport_event event)
146 146
147 default: 147 default:
148 bfa_stats(rp, sm_cr_unexp); 148 bfa_stats(rp, sm_cr_unexp);
149 bfa_assert(0); 149 bfa_sm_fault(rp->bfa, event);
150 } 150 }
151} 151}
152 152
@@ -183,7 +183,7 @@ bfa_rport_sm_fwcreate(struct bfa_rport_s *rp, enum bfa_rport_event event)
183 183
184 default: 184 default:
185 bfa_stats(rp, sm_fwc_unexp); 185 bfa_stats(rp, sm_fwc_unexp);
186 bfa_assert(0); 186 bfa_sm_fault(rp->bfa, event);
187 } 187 }
188} 188}
189 189
@@ -224,7 +224,7 @@ bfa_rport_sm_fwcreate_qfull(struct bfa_rport_s *rp, enum bfa_rport_event event)
224 224
225 default: 225 default:
226 bfa_stats(rp, sm_fwc_unexp); 226 bfa_stats(rp, sm_fwc_unexp);
227 bfa_assert(0); 227 bfa_sm_fault(rp->bfa, event);
228 } 228 }
229} 229}
230 230
@@ -296,7 +296,7 @@ bfa_rport_sm_online(struct bfa_rport_s *rp, enum bfa_rport_event event)
296 296
297 default: 297 default:
298 bfa_stats(rp, sm_on_unexp); 298 bfa_stats(rp, sm_on_unexp);
299 bfa_assert(0); 299 bfa_sm_fault(rp->bfa, event);
300 } 300 }
301} 301}
302 302
@@ -329,7 +329,7 @@ bfa_rport_sm_fwdelete(struct bfa_rport_s *rp, enum bfa_rport_event event)
329 329
330 default: 330 default:
331 bfa_stats(rp, sm_fwd_unexp); 331 bfa_stats(rp, sm_fwd_unexp);
332 bfa_assert(0); 332 bfa_sm_fault(rp->bfa, event);
333 } 333 }
334} 334}
335 335
@@ -359,7 +359,7 @@ bfa_rport_sm_fwdelete_qfull(struct bfa_rport_s *rp, enum bfa_rport_event event)
359 359
360 default: 360 default:
361 bfa_stats(rp, sm_fwd_unexp); 361 bfa_stats(rp, sm_fwd_unexp);
362 bfa_assert(0); 362 bfa_sm_fault(rp->bfa, event);
363 } 363 }
364} 364}
365 365
@@ -394,7 +394,7 @@ bfa_rport_sm_offline(struct bfa_rport_s *rp, enum bfa_rport_event event)
394 394
395 default: 395 default:
396 bfa_stats(rp, sm_off_unexp); 396 bfa_stats(rp, sm_off_unexp);
397 bfa_assert(0); 397 bfa_sm_fault(rp->bfa, event);
398 } 398 }
399} 399}
400 400
@@ -421,7 +421,7 @@ bfa_rport_sm_deleting(struct bfa_rport_s *rp, enum bfa_rport_event event)
421 break; 421 break;
422 422
423 default: 423 default:
424 bfa_assert(0); 424 bfa_sm_fault(rp->bfa, event);
425 } 425 }
426} 426}
427 427
@@ -446,7 +446,7 @@ bfa_rport_sm_deleting_qfull(struct bfa_rport_s *rp, enum bfa_rport_event event)
446 break; 446 break;
447 447
448 default: 448 default:
449 bfa_assert(0); 449 bfa_sm_fault(rp->bfa, event);
450 } 450 }
451} 451}
452 452
@@ -477,7 +477,7 @@ bfa_rport_sm_delete_pending(struct bfa_rport_s *rp,
477 477
478 default: 478 default:
479 bfa_stats(rp, sm_delp_unexp); 479 bfa_stats(rp, sm_delp_unexp);
480 bfa_assert(0); 480 bfa_sm_fault(rp->bfa, event);
481 } 481 }
482} 482}
483 483
@@ -512,7 +512,7 @@ bfa_rport_sm_offline_pending(struct bfa_rport_s *rp,
512 512
513 default: 513 default:
514 bfa_stats(rp, sm_offp_unexp); 514 bfa_stats(rp, sm_offp_unexp);
515 bfa_assert(0); 515 bfa_sm_fault(rp->bfa, event);
516 } 516 }
517} 517}
518 518
@@ -550,7 +550,7 @@ bfa_rport_sm_iocdisable(struct bfa_rport_s *rp, enum bfa_rport_event event)
550 550
551 default: 551 default:
552 bfa_stats(rp, sm_iocd_unexp); 552 bfa_stats(rp, sm_iocd_unexp);
553 bfa_assert(0); 553 bfa_sm_fault(rp->bfa, event);
554 } 554 }
555} 555}
556 556
diff --git a/drivers/scsi/bfa/bfa_trcmod_priv.h b/drivers/scsi/bfa/bfa_trcmod_priv.h
index b3562dce7e9f..a7a82610db85 100644
--- a/drivers/scsi/bfa/bfa_trcmod_priv.h
+++ b/drivers/scsi/bfa/bfa_trcmod_priv.h
@@ -29,38 +29,36 @@
29 * !!! needed between trace utility and driver version 29 * !!! needed between trace utility and driver version
30 */ 30 */
31enum { 31enum {
32 BFA_TRC_HAL_IOC = 1, 32 BFA_TRC_HAL_INTR = 1,
33 BFA_TRC_HAL_INTR = 2, 33 BFA_TRC_HAL_FCXP = 2,
34 BFA_TRC_HAL_FCXP = 3, 34 BFA_TRC_HAL_UF = 3,
35 BFA_TRC_HAL_UF = 4, 35 BFA_TRC_HAL_RPORT = 4,
36 BFA_TRC_HAL_DIAG = 5, 36 BFA_TRC_HAL_FCPIM = 5,
37 BFA_TRC_HAL_RPORT = 6, 37 BFA_TRC_HAL_IOIM = 6,
38 BFA_TRC_HAL_FCPIM = 7, 38 BFA_TRC_HAL_TSKIM = 7,
39 BFA_TRC_HAL_IOIM = 8, 39 BFA_TRC_HAL_ITNIM = 8,
40 BFA_TRC_HAL_TSKIM = 9, 40 BFA_TRC_HAL_FCPORT = 9,
41 BFA_TRC_HAL_ITNIM = 10, 41 BFA_TRC_HAL_SGPG = 10,
42 BFA_TRC_HAL_PPORT = 11, 42 BFA_TRC_HAL_FLASH = 11,
43 BFA_TRC_HAL_SGPG = 12, 43 BFA_TRC_HAL_DEBUG = 12,
44 BFA_TRC_HAL_FLASH = 13, 44 BFA_TRC_HAL_WWN = 13,
45 BFA_TRC_HAL_DEBUG = 14, 45 BFA_TRC_HAL_FLASH_RAW = 14,
46 BFA_TRC_HAL_WWN = 15, 46 BFA_TRC_HAL_SBOOT = 15,
47 BFA_TRC_HAL_FLASH_RAW = 16, 47 BFA_TRC_HAL_SBOOT_IO = 16,
48 BFA_TRC_HAL_SBOOT = 17, 48 BFA_TRC_HAL_SBOOT_INTR = 17,
49 BFA_TRC_HAL_SBOOT_IO = 18, 49 BFA_TRC_HAL_SBTEST = 18,
50 BFA_TRC_HAL_SBOOT_INTR = 19, 50 BFA_TRC_HAL_IPFC = 19,
51 BFA_TRC_HAL_SBTEST = 20, 51 BFA_TRC_HAL_IOCFC = 20,
52 BFA_TRC_HAL_IPFC = 21, 52 BFA_TRC_HAL_FCPTM = 21,
53 BFA_TRC_HAL_IOCFC = 22, 53 BFA_TRC_HAL_IOTM = 22,
54 BFA_TRC_HAL_FCPTM = 23, 54 BFA_TRC_HAL_TSKTM = 23,
55 BFA_TRC_HAL_IOTM = 24, 55 BFA_TRC_HAL_TIN = 24,
56 BFA_TRC_HAL_TSKTM = 25, 56 BFA_TRC_HAL_LPS = 25,
57 BFA_TRC_HAL_TIN = 26, 57 BFA_TRC_HAL_FCDIAG = 26,
58 BFA_TRC_HAL_LPS = 27, 58 BFA_TRC_HAL_PBIND = 27,
59 BFA_TRC_HAL_FCDIAG = 28, 59 BFA_TRC_HAL_IOCFC_CT = 28,
60 BFA_TRC_HAL_PBIND = 29, 60 BFA_TRC_HAL_IOCFC_CB = 29,
61 BFA_TRC_HAL_IOCFC_CT = 30, 61 BFA_TRC_HAL_IOCFC_Q = 30,
62 BFA_TRC_HAL_IOCFC_CB = 31,
63 BFA_TRC_HAL_IOCFC_Q = 32,
64}; 62};
65 63
66#endif /* __BFA_TRCMOD_PRIV_H__ */ 64#endif /* __BFA_TRCMOD_PRIV_H__ */
diff --git a/drivers/scsi/bfa/bfa_tskim.c b/drivers/scsi/bfa/bfa_tskim.c
index ff7a4dc0bf3c..ad9aaaedd3f1 100644
--- a/drivers/scsi/bfa/bfa_tskim.c
+++ b/drivers/scsi/bfa/bfa_tskim.c
@@ -110,7 +110,7 @@ bfa_tskim_sm_uninit(struct bfa_tskim_s *tskim, enum bfa_tskim_event event)
110 break; 110 break;
111 111
112 default: 112 default:
113 bfa_assert(0); 113 bfa_sm_fault(tskim->bfa, event);
114 } 114 }
115} 115}
116 116
@@ -146,7 +146,7 @@ bfa_tskim_sm_active(struct bfa_tskim_s *tskim, enum bfa_tskim_event event)
146 break; 146 break;
147 147
148 default: 148 default:
149 bfa_assert(0); 149 bfa_sm_fault(tskim->bfa, event);
150 } 150 }
151} 151}
152 152
@@ -178,7 +178,7 @@ bfa_tskim_sm_cleanup(struct bfa_tskim_s *tskim, enum bfa_tskim_event event)
178 break; 178 break;
179 179
180 default: 180 default:
181 bfa_assert(0); 181 bfa_sm_fault(tskim->bfa, event);
182 } 182 }
183} 183}
184 184
@@ -207,7 +207,7 @@ bfa_tskim_sm_iocleanup(struct bfa_tskim_s *tskim, enum bfa_tskim_event event)
207 break; 207 break;
208 208
209 default: 209 default:
210 bfa_assert(0); 210 bfa_sm_fault(tskim->bfa, event);
211 } 211 }
212} 212}
213 213
@@ -242,7 +242,7 @@ bfa_tskim_sm_qfull(struct bfa_tskim_s *tskim, enum bfa_tskim_event event)
242 break; 242 break;
243 243
244 default: 244 default:
245 bfa_assert(0); 245 bfa_sm_fault(tskim->bfa, event);
246 } 246 }
247} 247}
248 248
@@ -277,7 +277,7 @@ bfa_tskim_sm_cleanup_qfull(struct bfa_tskim_s *tskim,
277 break; 277 break;
278 278
279 default: 279 default:
280 bfa_assert(0); 280 bfa_sm_fault(tskim->bfa, event);
281 } 281 }
282} 282}
283 283
@@ -303,7 +303,7 @@ bfa_tskim_sm_hcb(struct bfa_tskim_s *tskim, enum bfa_tskim_event event)
303 break; 303 break;
304 304
305 default: 305 default:
306 bfa_assert(0); 306 bfa_sm_fault(tskim->bfa, event);
307 } 307 }
308} 308}
309 309
diff --git a/drivers/scsi/bfa/bfad.c b/drivers/scsi/bfa/bfad.c
index b52b773d49d9..6bff08ea4029 100644
--- a/drivers/scsi/bfa/bfad.c
+++ b/drivers/scsi/bfa/bfad.c
@@ -20,6 +20,7 @@
20 */ 20 */
21 21
22#include <linux/module.h> 22#include <linux/module.h>
23#include <linux/kthread.h>
23#include "bfad_drv.h" 24#include "bfad_drv.h"
24#include "bfad_im.h" 25#include "bfad_im.h"
25#include "bfad_tm.h" 26#include "bfad_tm.h"
@@ -53,6 +54,7 @@ static int log_level = BFA_LOG_WARNING;
53static int ioc_auto_recover = BFA_TRUE; 54static int ioc_auto_recover = BFA_TRUE;
54static int ipfc_enable = BFA_FALSE; 55static int ipfc_enable = BFA_FALSE;
55static int ipfc_mtu = -1; 56static int ipfc_mtu = -1;
57static int fdmi_enable = BFA_TRUE;
56int bfa_lun_queue_depth = BFAD_LUN_QUEUE_DEPTH; 58int bfa_lun_queue_depth = BFAD_LUN_QUEUE_DEPTH;
57int bfa_linkup_delay = -1; 59int bfa_linkup_delay = -1;
58 60
@@ -74,6 +76,7 @@ module_param(log_level, int, S_IRUGO | S_IWUSR);
74module_param(ioc_auto_recover, int, S_IRUGO | S_IWUSR); 76module_param(ioc_auto_recover, int, S_IRUGO | S_IWUSR);
75module_param(ipfc_enable, int, S_IRUGO | S_IWUSR); 77module_param(ipfc_enable, int, S_IRUGO | S_IWUSR);
76module_param(ipfc_mtu, int, S_IRUGO | S_IWUSR); 78module_param(ipfc_mtu, int, S_IRUGO | S_IWUSR);
79module_param(fdmi_enable, int, S_IRUGO | S_IWUSR);
77module_param(bfa_linkup_delay, int, S_IRUGO | S_IWUSR); 80module_param(bfa_linkup_delay, int, S_IRUGO | S_IWUSR);
78 81
79/* 82/*
@@ -95,6 +98,8 @@ bfad_fc4_probe(struct bfad_s *bfad)
95 98
96 if (ipfc_enable) 99 if (ipfc_enable)
97 bfad_ipfc_probe(bfad); 100 bfad_ipfc_probe(bfad);
101
102 bfad->bfad_flags |= BFAD_FC4_PROBE_DONE;
98ext: 103ext:
99 return rc; 104 return rc;
100} 105}
@@ -106,6 +111,7 @@ bfad_fc4_probe_undo(struct bfad_s *bfad)
106 bfad_tm_probe_undo(bfad); 111 bfad_tm_probe_undo(bfad);
107 if (ipfc_enable) 112 if (ipfc_enable)
108 bfad_ipfc_probe_undo(bfad); 113 bfad_ipfc_probe_undo(bfad);
114 bfad->bfad_flags &= ~BFAD_FC4_PROBE_DONE;
109} 115}
110 116
111static void 117static void
@@ -173,9 +179,19 @@ bfa_cb_init(void *drv, bfa_status_t init_status)
173{ 179{
174 struct bfad_s *bfad = drv; 180 struct bfad_s *bfad = drv;
175 181
176 if (init_status == BFA_STATUS_OK) 182 if (init_status == BFA_STATUS_OK) {
177 bfad->bfad_flags |= BFAD_HAL_INIT_DONE; 183 bfad->bfad_flags |= BFAD_HAL_INIT_DONE;
178 184
185 /* If BFAD_HAL_INIT_FAIL flag is set:
186 * Wake up the kernel thread to start
187 * the bfad operations after HAL init done
188 */
189 if ((bfad->bfad_flags & BFAD_HAL_INIT_FAIL)) {
190 bfad->bfad_flags &= ~BFAD_HAL_INIT_FAIL;
191 wake_up_process(bfad->bfad_tsk);
192 }
193 }
194
179 complete(&bfad->comp); 195 complete(&bfad->comp);
180} 196}
181 197
@@ -648,7 +664,7 @@ bfad_fcs_port_cfg(struct bfad_s *bfad)
648 664
649 sprintf(symname, "%s-%d", BFAD_DRIVER_NAME, bfad->inst_no); 665 sprintf(symname, "%s-%d", BFAD_DRIVER_NAME, bfad->inst_no);
650 memcpy(port_cfg.sym_name.symname, symname, strlen(symname)); 666 memcpy(port_cfg.sym_name.symname, symname, strlen(symname));
651 bfa_pport_get_attr(&bfad->bfa, &attr); 667 bfa_fcport_get_attr(&bfad->bfa, &attr);
652 port_cfg.nwwn = attr.nwwn; 668 port_cfg.nwwn = attr.nwwn;
653 port_cfg.pwwn = attr.pwwn; 669 port_cfg.pwwn = attr.pwwn;
654 670
@@ -661,7 +677,6 @@ bfad_drv_init(struct bfad_s *bfad)
661 bfa_status_t rc; 677 bfa_status_t rc;
662 unsigned long flags; 678 unsigned long flags;
663 struct bfa_fcs_driver_info_s driver_info; 679 struct bfa_fcs_driver_info_s driver_info;
664 int i;
665 680
666 bfad->cfg_data.rport_del_timeout = rport_del_timeout; 681 bfad->cfg_data.rport_del_timeout = rport_del_timeout;
667 bfad->cfg_data.lun_queue_depth = bfa_lun_queue_depth; 682 bfad->cfg_data.lun_queue_depth = bfa_lun_queue_depth;
@@ -681,12 +696,7 @@ bfad_drv_init(struct bfad_s *bfad)
681 bfa_init_log(&bfad->bfa, bfad->logmod); 696 bfa_init_log(&bfad->bfa, bfad->logmod);
682 bfa_init_trc(&bfad->bfa, bfad->trcmod); 697 bfa_init_trc(&bfad->bfa, bfad->trcmod);
683 bfa_init_aen(&bfad->bfa, bfad->aen); 698 bfa_init_aen(&bfad->bfa, bfad->aen);
684 INIT_LIST_HEAD(&bfad->file_q); 699 memset(bfad->file_map, 0, sizeof(bfad->file_map));
685 INIT_LIST_HEAD(&bfad->file_free_q);
686 for (i = 0; i < BFAD_AEN_MAX_APPS; i++) {
687 bfa_q_qe_init(&bfad->file_buf[i].qe);
688 list_add_tail(&bfad->file_buf[i].qe, &bfad->file_free_q);
689 }
690 bfa_init_plog(&bfad->bfa, &bfad->plog_buf); 700 bfa_init_plog(&bfad->bfa, &bfad->plog_buf);
691 bfa_plog_init(&bfad->plog_buf); 701 bfa_plog_init(&bfad->plog_buf);
692 bfa_plog_str(&bfad->plog_buf, BFA_PL_MID_DRVR, BFA_PL_EID_DRIVER_START, 702 bfa_plog_str(&bfad->plog_buf, BFA_PL_MID_DRVR, BFA_PL_EID_DRIVER_START,
@@ -746,8 +756,16 @@ bfad_drv_init(struct bfad_s *bfad)
746 bfa_fcs_log_init(&bfad->bfa_fcs, bfad->logmod); 756 bfa_fcs_log_init(&bfad->bfa_fcs, bfad->logmod);
747 bfa_fcs_trc_init(&bfad->bfa_fcs, bfad->trcmod); 757 bfa_fcs_trc_init(&bfad->bfa_fcs, bfad->trcmod);
748 bfa_fcs_aen_init(&bfad->bfa_fcs, bfad->aen); 758 bfa_fcs_aen_init(&bfad->bfa_fcs, bfad->aen);
749 bfa_fcs_init(&bfad->bfa_fcs, &bfad->bfa, bfad, BFA_FALSE); 759 bfa_fcs_attach(&bfad->bfa_fcs, &bfad->bfa, bfad, BFA_FALSE);
760
761 /* Do FCS init only when HAL init is done */
762 if ((bfad->bfad_flags & BFAD_HAL_INIT_DONE)) {
763 bfa_fcs_init(&bfad->bfa_fcs);
764 bfad->bfad_flags |= BFAD_FCS_INIT_DONE;
765 }
766
750 bfa_fcs_driver_info_init(&bfad->bfa_fcs, &driver_info); 767 bfa_fcs_driver_info_init(&bfad->bfa_fcs, &driver_info);
768 bfa_fcs_set_fdmi_param(&bfad->bfa_fcs, fdmi_enable);
751 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 769 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
752 770
753 bfad->bfad_flags |= BFAD_DRV_INIT_DONE; 771 bfad->bfad_flags |= BFAD_DRV_INIT_DONE;
@@ -763,12 +781,21 @@ out_hal_mem_alloc_failure:
763void 781void
764bfad_drv_uninit(struct bfad_s *bfad) 782bfad_drv_uninit(struct bfad_s *bfad)
765{ 783{
784 unsigned long flags;
785
786 spin_lock_irqsave(&bfad->bfad_lock, flags);
787 init_completion(&bfad->comp);
788 bfa_stop(&bfad->bfa);
789 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
790 wait_for_completion(&bfad->comp);
791
766 del_timer_sync(&bfad->hal_tmo); 792 del_timer_sync(&bfad->hal_tmo);
767 bfa_isr_disable(&bfad->bfa); 793 bfa_isr_disable(&bfad->bfa);
768 bfa_detach(&bfad->bfa); 794 bfa_detach(&bfad->bfa);
769 bfad_remove_intr(bfad); 795 bfad_remove_intr(bfad);
770 bfa_assert(list_empty(&bfad->file_q));
771 bfad_hal_mem_release(bfad); 796 bfad_hal_mem_release(bfad);
797
798 bfad->bfad_flags &= ~BFAD_DRV_INIT_DONE;
772} 799}
773 800
774void 801void
@@ -859,6 +886,86 @@ bfad_drv_log_level_set(struct bfad_s *bfad)
859 bfa_log_set_level_all(&bfad->log_data, log_level); 886 bfa_log_set_level_all(&bfad->log_data, log_level);
860} 887}
861 888
889bfa_status_t
890bfad_start_ops(struct bfad_s *bfad)
891{
892 int retval;
893
894 /* PPORT FCS config */
895 bfad_fcs_port_cfg(bfad);
896
897 retval = bfad_cfg_pport(bfad, BFA_PORT_ROLE_FCP_IM);
898 if (retval != BFA_STATUS_OK)
899 goto out_cfg_pport_failure;
900
901 /* BFAD level FC4 (IM/TM/IPFC) specific resource allocation */
902 retval = bfad_fc4_probe(bfad);
903 if (retval != BFA_STATUS_OK) {
904 printk(KERN_WARNING "bfad_fc4_probe failed\n");
905 goto out_fc4_probe_failure;
906 }
907
908 bfad_drv_start(bfad);
909
910 /*
911 * If bfa_linkup_delay is set to -1 default; try to retrive the
912 * value using the bfad_os_get_linkup_delay(); else use the
913 * passed in module param value as the bfa_linkup_delay.
914 */
915 if (bfa_linkup_delay < 0) {
916
917 bfa_linkup_delay = bfad_os_get_linkup_delay(bfad);
918 bfad_os_rport_online_wait(bfad);
919 bfa_linkup_delay = -1;
920
921 } else {
922 bfad_os_rport_online_wait(bfad);
923 }
924
925 bfa_log(bfad->logmod, BFA_LOG_LINUX_DEVICE_CLAIMED, bfad->pci_name);
926
927 return BFA_STATUS_OK;
928
929out_fc4_probe_failure:
930 bfad_fc4_probe_undo(bfad);
931 bfad_uncfg_pport(bfad);
932out_cfg_pport_failure:
933 return BFA_STATUS_FAILED;
934}
935
936int
937bfad_worker (void *ptr)
938{
939 struct bfad_s *bfad;
940 unsigned long flags;
941
942 bfad = (struct bfad_s *)ptr;
943
944 while (!kthread_should_stop()) {
945
946 /* Check if the FCS init is done from bfad_drv_init;
947 * if not done do FCS init and set the flag.
948 */
949 if (!(bfad->bfad_flags & BFAD_FCS_INIT_DONE)) {
950 spin_lock_irqsave(&bfad->bfad_lock, flags);
951 bfa_fcs_init(&bfad->bfa_fcs);
952 bfad->bfad_flags |= BFAD_FCS_INIT_DONE;
953 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
954 }
955
956 /* Start the bfad operations after HAL init done */
957 bfad_start_ops(bfad);
958
959 spin_lock_irqsave(&bfad->bfad_lock, flags);
960 bfad->bfad_tsk = NULL;
961 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
962
963 break;
964 }
965
966 return 0;
967}
968
862 /* 969 /*
863 * PCI_entry PCI driver entries * { 970 * PCI_entry PCI driver entries * {
864 */ 971 */
@@ -871,7 +978,6 @@ bfad_pci_probe(struct pci_dev *pdev, const struct pci_device_id *pid)
871{ 978{
872 struct bfad_s *bfad; 979 struct bfad_s *bfad;
873 int error = -ENODEV, retval; 980 int error = -ENODEV, retval;
874 char buf[16];
875 981
876 /* 982 /*
877 * For single port cards - only claim function 0 983 * For single port cards - only claim function 0
@@ -902,8 +1008,7 @@ bfad_pci_probe(struct pci_dev *pdev, const struct pci_device_id *pid)
902 bfa_trc(bfad, bfad_inst); 1008 bfa_trc(bfad, bfad_inst);
903 1009
904 bfad->logmod = &bfad->log_data; 1010 bfad->logmod = &bfad->log_data;
905 sprintf(buf, "%d", bfad_inst); 1011 bfa_log_init(bfad->logmod, (char *)pci_name(pdev), bfa_os_printf);
906 bfa_log_init(bfad->logmod, buf, bfa_os_printf);
907 1012
908 bfad_drv_log_level_set(bfad); 1013 bfad_drv_log_level_set(bfad);
909 1014
@@ -933,57 +1038,39 @@ bfad_pci_probe(struct pci_dev *pdev, const struct pci_device_id *pid)
933 bfad->ref_count = 0; 1038 bfad->ref_count = 0;
934 bfad->pport.bfad = bfad; 1039 bfad->pport.bfad = bfad;
935 1040
1041 bfad->bfad_tsk = kthread_create(bfad_worker, (void *) bfad, "%s",
1042 "bfad_worker");
1043 if (IS_ERR(bfad->bfad_tsk)) {
1044 printk(KERN_INFO "bfad[%d]: Kernel thread"
1045 " creation failed!\n",
1046 bfad->inst_no);
1047 goto out_kthread_create_failure;
1048 }
1049
936 retval = bfad_drv_init(bfad); 1050 retval = bfad_drv_init(bfad);
937 if (retval != BFA_STATUS_OK) 1051 if (retval != BFA_STATUS_OK)
938 goto out_drv_init_failure; 1052 goto out_drv_init_failure;
939 if (!(bfad->bfad_flags & BFAD_HAL_INIT_DONE)) { 1053 if (!(bfad->bfad_flags & BFAD_HAL_INIT_DONE)) {
1054 bfad->bfad_flags |= BFAD_HAL_INIT_FAIL;
940 printk(KERN_WARNING "bfad%d: hal init failed\n", bfad->inst_no); 1055 printk(KERN_WARNING "bfad%d: hal init failed\n", bfad->inst_no);
941 goto ok; 1056 goto ok;
942 } 1057 }
943 1058
944 /* 1059 retval = bfad_start_ops(bfad);
945 * PPORT FCS config
946 */
947 bfad_fcs_port_cfg(bfad);
948
949 retval = bfad_cfg_pport(bfad, BFA_PORT_ROLE_FCP_IM);
950 if (retval != BFA_STATUS_OK) 1060 if (retval != BFA_STATUS_OK)
951 goto out_cfg_pport_failure; 1061 goto out_start_ops_failure;
952
953 /*
954 * BFAD level FC4 (IM/TM/IPFC) specific resource allocation
955 */
956 retval = bfad_fc4_probe(bfad);
957 if (retval != BFA_STATUS_OK) {
958 printk(KERN_WARNING "bfad_fc4_probe failed\n");
959 goto out_fc4_probe_failure;
960 }
961 1062
962 bfad_drv_start(bfad); 1063 kthread_stop(bfad->bfad_tsk);
963 1064 bfad->bfad_tsk = NULL;
964 /*
965 * If bfa_linkup_delay is set to -1 default; try to retrive the
966 * value using the bfad_os_get_linkup_delay(); else use the
967 * passed in module param value as the bfa_linkup_delay.
968 */
969 if (bfa_linkup_delay < 0) {
970 bfa_linkup_delay = bfad_os_get_linkup_delay(bfad);
971 bfad_os_rport_online_wait(bfad);
972 bfa_linkup_delay = -1;
973 } else {
974 bfad_os_rport_online_wait(bfad);
975 }
976 1065
977 bfa_log(bfad->logmod, BFA_LOG_LINUX_DEVICE_CLAIMED, bfad->pci_name);
978ok: 1066ok:
979 return 0; 1067 return 0;
980 1068
981out_fc4_probe_failure: 1069out_start_ops_failure:
982 bfad_fc4_probe_undo(bfad);
983 bfad_uncfg_pport(bfad);
984out_cfg_pport_failure:
985 bfad_drv_uninit(bfad); 1070 bfad_drv_uninit(bfad);
986out_drv_init_failure: 1071out_drv_init_failure:
1072 kthread_stop(bfad->bfad_tsk);
1073out_kthread_create_failure:
987 mutex_lock(&bfad_mutex); 1074 mutex_lock(&bfad_mutex);
988 bfad_inst--; 1075 bfad_inst--;
989 list_del(&bfad->list_entry); 1076 list_del(&bfad->list_entry);
@@ -1008,6 +1095,11 @@ bfad_pci_remove(struct pci_dev *pdev)
1008 1095
1009 bfa_trc(bfad, bfad->inst_no); 1096 bfa_trc(bfad, bfad->inst_no);
1010 1097
1098 spin_lock_irqsave(&bfad->bfad_lock, flags);
1099 if (bfad->bfad_tsk != NULL)
1100 kthread_stop(bfad->bfad_tsk);
1101 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
1102
1011 if ((bfad->bfad_flags & BFAD_DRV_INIT_DONE) 1103 if ((bfad->bfad_flags & BFAD_DRV_INIT_DONE)
1012 && !(bfad->bfad_flags & BFAD_HAL_INIT_DONE)) { 1104 && !(bfad->bfad_flags & BFAD_HAL_INIT_DONE)) {
1013 1105
@@ -1024,13 +1116,25 @@ bfad_pci_remove(struct pci_dev *pdev)
1024 goto remove_sysfs; 1116 goto remove_sysfs;
1025 } 1117 }
1026 1118
1027 if (bfad->bfad_flags & BFAD_HAL_START_DONE) 1119 if (bfad->bfad_flags & BFAD_HAL_START_DONE) {
1028 bfad_drv_stop(bfad); 1120 bfad_drv_stop(bfad);
1121 } else if (bfad->bfad_flags & BFAD_DRV_INIT_DONE) {
1122 /* Invoking bfa_stop() before bfa_detach
1123 * when HAL and DRV init are success
1124 * but HAL start did not occur.
1125 */
1126 spin_lock_irqsave(&bfad->bfad_lock, flags);
1127 init_completion(&bfad->comp);
1128 bfa_stop(&bfad->bfa);
1129 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
1130 wait_for_completion(&bfad->comp);
1131 }
1029 1132
1030 bfad_remove_intr(bfad); 1133 bfad_remove_intr(bfad);
1031
1032 del_timer_sync(&bfad->hal_tmo); 1134 del_timer_sync(&bfad->hal_tmo);
1033 bfad_fc4_probe_undo(bfad); 1135
1136 if (bfad->bfad_flags & BFAD_FC4_PROBE_DONE)
1137 bfad_fc4_probe_undo(bfad);
1034 1138
1035 if (bfad->bfad_flags & BFAD_CFG_PPORT_DONE) 1139 if (bfad->bfad_flags & BFAD_CFG_PPORT_DONE)
1036 bfad_uncfg_pport(bfad); 1140 bfad_uncfg_pport(bfad);
diff --git a/drivers/scsi/bfa/bfad_attr.c b/drivers/scsi/bfa/bfad_attr.c
index 9129ae3040ff..d97f69191838 100644
--- a/drivers/scsi/bfa/bfad_attr.c
+++ b/drivers/scsi/bfa/bfad_attr.c
@@ -141,7 +141,7 @@ bfad_im_get_host_port_type(struct Scsi_Host *shost)
141 struct bfad_s *bfad = im_port->bfad; 141 struct bfad_s *bfad = im_port->bfad;
142 struct bfa_pport_attr_s attr; 142 struct bfa_pport_attr_s attr;
143 143
144 bfa_pport_get_attr(&bfad->bfa, &attr); 144 bfa_fcport_get_attr(&bfad->bfa, &attr);
145 145
146 switch (attr.port_type) { 146 switch (attr.port_type) {
147 case BFA_PPORT_TYPE_NPORT: 147 case BFA_PPORT_TYPE_NPORT:
@@ -173,7 +173,7 @@ bfad_im_get_host_port_state(struct Scsi_Host *shost)
173 struct bfad_s *bfad = im_port->bfad; 173 struct bfad_s *bfad = im_port->bfad;
174 struct bfa_pport_attr_s attr; 174 struct bfa_pport_attr_s attr;
175 175
176 bfa_pport_get_attr(&bfad->bfa, &attr); 176 bfa_fcport_get_attr(&bfad->bfa, &attr);
177 177
178 switch (attr.port_state) { 178 switch (attr.port_state) {
179 case BFA_PPORT_ST_LINKDOWN: 179 case BFA_PPORT_ST_LINKDOWN:
@@ -229,8 +229,10 @@ bfad_im_get_host_speed(struct Scsi_Host *shost)
229 (struct bfad_im_port_s *) shost->hostdata[0]; 229 (struct bfad_im_port_s *) shost->hostdata[0];
230 struct bfad_s *bfad = im_port->bfad; 230 struct bfad_s *bfad = im_port->bfad;
231 struct bfa_pport_attr_s attr; 231 struct bfa_pport_attr_s attr;
232 unsigned long flags;
232 233
233 bfa_pport_get_attr(&bfad->bfa, &attr); 234 spin_lock_irqsave(shost->host_lock, flags);
235 bfa_fcport_get_attr(&bfad->bfa, &attr);
234 switch (attr.speed) { 236 switch (attr.speed) {
235 case BFA_PPORT_SPEED_8GBPS: 237 case BFA_PPORT_SPEED_8GBPS:
236 fc_host_speed(shost) = FC_PORTSPEED_8GBIT; 238 fc_host_speed(shost) = FC_PORTSPEED_8GBIT;
@@ -248,6 +250,7 @@ bfad_im_get_host_speed(struct Scsi_Host *shost)
248 fc_host_speed(shost) = FC_PORTSPEED_UNKNOWN; 250 fc_host_speed(shost) = FC_PORTSPEED_UNKNOWN;
249 break; 251 break;
250 } 252 }
253 spin_unlock_irqrestore(shost->host_lock, flags);
251} 254}
252 255
253/** 256/**
@@ -285,7 +288,7 @@ bfad_im_get_stats(struct Scsi_Host *shost)
285 init_completion(&fcomp.comp); 288 init_completion(&fcomp.comp);
286 spin_lock_irqsave(&bfad->bfad_lock, flags); 289 spin_lock_irqsave(&bfad->bfad_lock, flags);
287 memset(hstats, 0, sizeof(struct fc_host_statistics)); 290 memset(hstats, 0, sizeof(struct fc_host_statistics));
288 rc = bfa_pport_get_stats(&bfad->bfa, 291 rc = bfa_port_get_stats(BFA_FCPORT(&bfad->bfa),
289 (union bfa_pport_stats_u *) hstats, 292 (union bfa_pport_stats_u *) hstats,
290 bfad_hcb_comp, &fcomp); 293 bfad_hcb_comp, &fcomp);
291 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 294 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
@@ -312,7 +315,8 @@ bfad_im_reset_stats(struct Scsi_Host *shost)
312 315
313 init_completion(&fcomp.comp); 316 init_completion(&fcomp.comp);
314 spin_lock_irqsave(&bfad->bfad_lock, flags); 317 spin_lock_irqsave(&bfad->bfad_lock, flags);
315 rc = bfa_pport_clear_stats(&bfad->bfa, bfad_hcb_comp, &fcomp); 318 rc = bfa_port_clear_stats(BFA_FCPORT(&bfad->bfa), bfad_hcb_comp,
319 &fcomp);
316 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 320 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
317 321
318 if (rc != BFA_STATUS_OK) 322 if (rc != BFA_STATUS_OK)
@@ -421,12 +425,10 @@ bfad_im_serial_num_show(struct device *dev, struct device_attribute *attr,
421 struct bfad_im_port_s *im_port = 425 struct bfad_im_port_s *im_port =
422 (struct bfad_im_port_s *) shost->hostdata[0]; 426 (struct bfad_im_port_s *) shost->hostdata[0];
423 struct bfad_s *bfad = im_port->bfad; 427 struct bfad_s *bfad = im_port->bfad;
424 struct bfa_ioc_attr_s ioc_attr; 428 char serial_num[BFA_ADAPTER_SERIAL_NUM_LEN];
425 429
426 memset(&ioc_attr, 0, sizeof(ioc_attr)); 430 bfa_get_adapter_serial_num(&bfad->bfa, serial_num);
427 bfa_get_attr(&bfad->bfa, &ioc_attr); 431 return snprintf(buf, PAGE_SIZE, "%s\n", serial_num);
428 return snprintf(buf, PAGE_SIZE, "%s\n",
429 ioc_attr.adapter_attr.serial_num);
430} 432}
431 433
432static ssize_t 434static ssize_t
@@ -437,11 +439,10 @@ bfad_im_model_show(struct device *dev, struct device_attribute *attr,
437 struct bfad_im_port_s *im_port = 439 struct bfad_im_port_s *im_port =
438 (struct bfad_im_port_s *) shost->hostdata[0]; 440 (struct bfad_im_port_s *) shost->hostdata[0];
439 struct bfad_s *bfad = im_port->bfad; 441 struct bfad_s *bfad = im_port->bfad;
440 struct bfa_ioc_attr_s ioc_attr; 442 char model[BFA_ADAPTER_MODEL_NAME_LEN];
441 443
442 memset(&ioc_attr, 0, sizeof(ioc_attr)); 444 bfa_get_adapter_model(&bfad->bfa, model);
443 bfa_get_attr(&bfad->bfa, &ioc_attr); 445 return snprintf(buf, PAGE_SIZE, "%s\n", model);
444 return snprintf(buf, PAGE_SIZE, "%s\n", ioc_attr.adapter_attr.model);
445} 446}
446 447
447static ssize_t 448static ssize_t
@@ -452,12 +453,10 @@ bfad_im_model_desc_show(struct device *dev, struct device_attribute *attr,
452 struct bfad_im_port_s *im_port = 453 struct bfad_im_port_s *im_port =
453 (struct bfad_im_port_s *) shost->hostdata[0]; 454 (struct bfad_im_port_s *) shost->hostdata[0];
454 struct bfad_s *bfad = im_port->bfad; 455 struct bfad_s *bfad = im_port->bfad;
455 struct bfa_ioc_attr_s ioc_attr; 456 char model_descr[BFA_ADAPTER_MODEL_DESCR_LEN];
456 457
457 memset(&ioc_attr, 0, sizeof(ioc_attr)); 458 bfa_get_adapter_model(&bfad->bfa, model_descr);
458 bfa_get_attr(&bfad->bfa, &ioc_attr); 459 return snprintf(buf, PAGE_SIZE, "%s\n", model_descr);
459 return snprintf(buf, PAGE_SIZE, "%s\n",
460 ioc_attr.adapter_attr.model_descr);
461} 460}
462 461
463static ssize_t 462static ssize_t
@@ -482,14 +481,13 @@ bfad_im_symbolic_name_show(struct device *dev, struct device_attribute *attr,
482 struct bfad_im_port_s *im_port = 481 struct bfad_im_port_s *im_port =
483 (struct bfad_im_port_s *) shost->hostdata[0]; 482 (struct bfad_im_port_s *) shost->hostdata[0];
484 struct bfad_s *bfad = im_port->bfad; 483 struct bfad_s *bfad = im_port->bfad;
485 struct bfa_ioc_attr_s ioc_attr; 484 char model[BFA_ADAPTER_MODEL_NAME_LEN];
486 485 char fw_ver[BFA_VERSION_LEN];
487 memset(&ioc_attr, 0, sizeof(ioc_attr));
488 bfa_get_attr(&bfad->bfa, &ioc_attr);
489 486
487 bfa_get_adapter_model(&bfad->bfa, model);
488 bfa_get_adapter_fw_ver(&bfad->bfa, fw_ver);
490 return snprintf(buf, PAGE_SIZE, "Brocade %s FV%s DV%s\n", 489 return snprintf(buf, PAGE_SIZE, "Brocade %s FV%s DV%s\n",
491 ioc_attr.adapter_attr.model, 490 model, fw_ver, BFAD_DRIVER_VERSION);
492 ioc_attr.adapter_attr.fw_ver, BFAD_DRIVER_VERSION);
493} 491}
494 492
495static ssize_t 493static ssize_t
@@ -500,11 +498,10 @@ bfad_im_hw_version_show(struct device *dev, struct device_attribute *attr,
500 struct bfad_im_port_s *im_port = 498 struct bfad_im_port_s *im_port =
501 (struct bfad_im_port_s *) shost->hostdata[0]; 499 (struct bfad_im_port_s *) shost->hostdata[0];
502 struct bfad_s *bfad = im_port->bfad; 500 struct bfad_s *bfad = im_port->bfad;
503 struct bfa_ioc_attr_s ioc_attr; 501 char hw_ver[BFA_VERSION_LEN];
504 502
505 memset(&ioc_attr, 0, sizeof(ioc_attr)); 503 bfa_get_pci_chip_rev(&bfad->bfa, hw_ver);
506 bfa_get_attr(&bfad->bfa, &ioc_attr); 504 return snprintf(buf, PAGE_SIZE, "%s\n", hw_ver);
507 return snprintf(buf, PAGE_SIZE, "%s\n", ioc_attr.adapter_attr.hw_ver);
508} 505}
509 506
510static ssize_t 507static ssize_t
@@ -522,12 +519,10 @@ bfad_im_optionrom_version_show(struct device *dev,
522 struct bfad_im_port_s *im_port = 519 struct bfad_im_port_s *im_port =
523 (struct bfad_im_port_s *) shost->hostdata[0]; 520 (struct bfad_im_port_s *) shost->hostdata[0];
524 struct bfad_s *bfad = im_port->bfad; 521 struct bfad_s *bfad = im_port->bfad;
525 struct bfa_ioc_attr_s ioc_attr; 522 char optrom_ver[BFA_VERSION_LEN];
526 523
527 memset(&ioc_attr, 0, sizeof(ioc_attr)); 524 bfa_get_adapter_optrom_ver(&bfad->bfa, optrom_ver);
528 bfa_get_attr(&bfad->bfa, &ioc_attr); 525 return snprintf(buf, PAGE_SIZE, "%s\n", optrom_ver);
529 return snprintf(buf, PAGE_SIZE, "%s\n",
530 ioc_attr.adapter_attr.optrom_ver);
531} 526}
532 527
533static ssize_t 528static ssize_t
@@ -538,11 +533,10 @@ bfad_im_fw_version_show(struct device *dev, struct device_attribute *attr,
538 struct bfad_im_port_s *im_port = 533 struct bfad_im_port_s *im_port =
539 (struct bfad_im_port_s *) shost->hostdata[0]; 534 (struct bfad_im_port_s *) shost->hostdata[0];
540 struct bfad_s *bfad = im_port->bfad; 535 struct bfad_s *bfad = im_port->bfad;
541 struct bfa_ioc_attr_s ioc_attr; 536 char fw_ver[BFA_VERSION_LEN];
542 537
543 memset(&ioc_attr, 0, sizeof(ioc_attr)); 538 bfa_get_adapter_fw_ver(&bfad->bfa, fw_ver);
544 bfa_get_attr(&bfad->bfa, &ioc_attr); 539 return snprintf(buf, PAGE_SIZE, "%s\n", fw_ver);
545 return snprintf(buf, PAGE_SIZE, "%s\n", ioc_attr.adapter_attr.fw_ver);
546} 540}
547 541
548static ssize_t 542static ssize_t
@@ -553,11 +547,9 @@ bfad_im_num_of_ports_show(struct device *dev, struct device_attribute *attr,
553 struct bfad_im_port_s *im_port = 547 struct bfad_im_port_s *im_port =
554 (struct bfad_im_port_s *) shost->hostdata[0]; 548 (struct bfad_im_port_s *) shost->hostdata[0];
555 struct bfad_s *bfad = im_port->bfad; 549 struct bfad_s *bfad = im_port->bfad;
556 struct bfa_ioc_attr_s ioc_attr;
557 550
558 memset(&ioc_attr, 0, sizeof(ioc_attr)); 551 return snprintf(buf, PAGE_SIZE, "%d\n",
559 bfa_get_attr(&bfad->bfa, &ioc_attr); 552 bfa_get_nports(&bfad->bfa));
560 return snprintf(buf, PAGE_SIZE, "%d\n", ioc_attr.adapter_attr.nports);
561} 553}
562 554
563static ssize_t 555static ssize_t
diff --git a/drivers/scsi/bfa/bfad_attr.h b/drivers/scsi/bfa/bfad_attr.h
index 4d3312da6a81..bf0102076508 100644
--- a/drivers/scsi/bfa/bfad_attr.h
+++ b/drivers/scsi/bfa/bfad_attr.h
@@ -17,9 +17,6 @@
17 17
18#ifndef __BFAD_ATTR_H__ 18#ifndef __BFAD_ATTR_H__
19#define __BFAD_ATTR_H__ 19#define __BFAD_ATTR_H__
20/**
21 * bfad_attr.h VMware driver configuration interface module.
22 */
23 20
24/** 21/**
25 * FC_transport_template FC transport template 22 * FC_transport_template FC transport template
@@ -52,12 +49,6 @@ bfad_im_get_starget_port_name(struct scsi_target *starget);
52void 49void
53bfad_im_get_host_port_id(struct Scsi_Host *shost); 50bfad_im_get_host_port_id(struct Scsi_Host *shost);
54 51
55/**
56 * FC transport template entry, issue a LIP.
57 */
58int
59bfad_im_issue_fc_host_lip(struct Scsi_Host *shost);
60
61struct Scsi_Host* 52struct Scsi_Host*
62bfad_os_starget_to_shost(struct scsi_target *starget); 53bfad_os_starget_to_shost(struct scsi_target *starget);
63 54
diff --git a/drivers/scsi/bfa/bfad_drv.h b/drivers/scsi/bfa/bfad_drv.h
index 172c81e25c1c..107848cd3b6d 100644
--- a/drivers/scsi/bfa/bfad_drv.h
+++ b/drivers/scsi/bfa/bfad_drv.h
@@ -46,7 +46,7 @@
46#ifdef BFA_DRIVER_VERSION 46#ifdef BFA_DRIVER_VERSION
47#define BFAD_DRIVER_VERSION BFA_DRIVER_VERSION 47#define BFAD_DRIVER_VERSION BFA_DRIVER_VERSION
48#else 48#else
49#define BFAD_DRIVER_VERSION "2.0.0.0" 49#define BFAD_DRIVER_VERSION "2.1.2.1"
50#endif 50#endif
51 51
52 52
@@ -62,7 +62,9 @@
62#define BFAD_HAL_START_DONE 0x00000010 62#define BFAD_HAL_START_DONE 0x00000010
63#define BFAD_PORT_ONLINE 0x00000020 63#define BFAD_PORT_ONLINE 0x00000020
64#define BFAD_RPORT_ONLINE 0x00000040 64#define BFAD_RPORT_ONLINE 0x00000040
65 65#define BFAD_FCS_INIT_DONE 0x00000080
66#define BFAD_HAL_INIT_FAIL 0x00000100
67#define BFAD_FC4_PROBE_DONE 0x00000200
66#define BFAD_PORT_DELETE 0x00000001 68#define BFAD_PORT_DELETE 0x00000001
67 69
68/* 70/*
@@ -137,12 +139,16 @@ struct bfad_cfg_param_s {
137 u32 binding_method; 139 u32 binding_method;
138}; 140};
139 141
140#define BFAD_AEN_MAX_APPS 8 142union bfad_tmp_buf {
141struct bfad_aen_file_s { 143 /* From struct bfa_adapter_attr_s */
142 struct list_head qe; 144 char manufacturer[BFA_ADAPTER_MFG_NAME_LEN];
143 struct bfad_s *bfad; 145 char serial_num[BFA_ADAPTER_SERIAL_NUM_LEN];
144 s32 ri; 146 char model[BFA_ADAPTER_MODEL_NAME_LEN];
145 s32 app_id; 147 char fw_ver[BFA_VERSION_LEN];
148 char optrom_ver[BFA_VERSION_LEN];
149
150 /* From struct bfa_ioc_pci_attr_s */
151 u8 chip_rev[BFA_IOC_CHIP_REV_LEN]; /* chip revision */
146}; 152};
147 153
148/* 154/*
@@ -168,6 +174,7 @@ struct bfad_s {
168 u32 inst_no; /* BFAD instance number */ 174 u32 inst_no; /* BFAD instance number */
169 u32 bfad_flags; 175 u32 bfad_flags;
170 spinlock_t bfad_lock; 176 spinlock_t bfad_lock;
177 struct task_struct *bfad_tsk;
171 struct bfad_cfg_param_s cfg_data; 178 struct bfad_cfg_param_s cfg_data;
172 struct bfad_msix_s msix_tab[MAX_MSIX_ENTRY]; 179 struct bfad_msix_s msix_tab[MAX_MSIX_ENTRY];
173 int nvec; 180 int nvec;
@@ -183,18 +190,12 @@ struct bfad_s {
183 struct bfa_log_mod_s *logmod; 190 struct bfa_log_mod_s *logmod;
184 struct bfa_aen_s *aen; 191 struct bfa_aen_s *aen;
185 struct bfa_aen_s aen_buf; 192 struct bfa_aen_s aen_buf;
186 struct bfad_aen_file_s file_buf[BFAD_AEN_MAX_APPS]; 193 void *file_map[BFA_AEN_MAX_APP];
187 struct list_head file_q;
188 struct list_head file_free_q;
189 struct bfa_plog_s plog_buf; 194 struct bfa_plog_s plog_buf;
190 int ref_count; 195 int ref_count;
191 bfa_boolean_t ipfc_enabled; 196 bfa_boolean_t ipfc_enabled;
197 union bfad_tmp_buf tmp_buf;
192 struct fc_host_statistics link_stats; 198 struct fc_host_statistics link_stats;
193
194 struct kobject *bfa_kobj;
195 struct kobject *ioc_kobj;
196 struct kobject *pport_kobj;
197 struct kobject *lport_kobj;
198}; 199};
199 200
200/* 201/*
@@ -258,6 +259,7 @@ bfa_status_t bfad_vf_create(struct bfad_s *bfad, u16 vf_id,
258 struct bfa_port_cfg_s *port_cfg); 259 struct bfa_port_cfg_s *port_cfg);
259bfa_status_t bfad_cfg_pport(struct bfad_s *bfad, enum bfa_port_role role); 260bfa_status_t bfad_cfg_pport(struct bfad_s *bfad, enum bfa_port_role role);
260bfa_status_t bfad_drv_init(struct bfad_s *bfad); 261bfa_status_t bfad_drv_init(struct bfad_s *bfad);
262bfa_status_t bfad_start_ops(struct bfad_s *bfad);
261void bfad_drv_start(struct bfad_s *bfad); 263void bfad_drv_start(struct bfad_s *bfad);
262void bfad_uncfg_pport(struct bfad_s *bfad); 264void bfad_uncfg_pport(struct bfad_s *bfad);
263void bfad_drv_stop(struct bfad_s *bfad); 265void bfad_drv_stop(struct bfad_s *bfad);
@@ -279,6 +281,7 @@ void bfad_drv_uninit(struct bfad_s *bfad);
279void bfad_drv_log_level_set(struct bfad_s *bfad); 281void bfad_drv_log_level_set(struct bfad_s *bfad);
280bfa_status_t bfad_fc4_module_init(void); 282bfa_status_t bfad_fc4_module_init(void);
281void bfad_fc4_module_exit(void); 283void bfad_fc4_module_exit(void);
284int bfad_worker (void *ptr);
282 285
283void bfad_pci_remove(struct pci_dev *pdev); 286void bfad_pci_remove(struct pci_dev *pdev);
284int bfad_pci_probe(struct pci_dev *pdev, const struct pci_device_id *pid); 287int bfad_pci_probe(struct pci_dev *pdev, const struct pci_device_id *pid);
diff --git a/drivers/scsi/bfa/bfad_im.c b/drivers/scsi/bfa/bfad_im.c
index f788c2a0ab07..f9fc67a25bf2 100644
--- a/drivers/scsi/bfa/bfad_im.c
+++ b/drivers/scsi/bfa/bfad_im.c
@@ -43,11 +43,11 @@ bfa_cb_ioim_done(void *drv, struct bfad_ioim_s *dio,
43 struct bfad_s *bfad = drv; 43 struct bfad_s *bfad = drv;
44 struct bfad_itnim_data_s *itnim_data; 44 struct bfad_itnim_data_s *itnim_data;
45 struct bfad_itnim_s *itnim; 45 struct bfad_itnim_s *itnim;
46 u8 host_status = DID_OK;
46 47
47 switch (io_status) { 48 switch (io_status) {
48 case BFI_IOIM_STS_OK: 49 case BFI_IOIM_STS_OK:
49 bfa_trc(bfad, scsi_status); 50 bfa_trc(bfad, scsi_status);
50 cmnd->result = ScsiResult(DID_OK, scsi_status);
51 scsi_set_resid(cmnd, 0); 51 scsi_set_resid(cmnd, 0);
52 52
53 if (sns_len > 0) { 53 if (sns_len > 0) {
@@ -56,8 +56,18 @@ bfa_cb_ioim_done(void *drv, struct bfad_ioim_s *dio,
56 sns_len = SCSI_SENSE_BUFFERSIZE; 56 sns_len = SCSI_SENSE_BUFFERSIZE;
57 memcpy(cmnd->sense_buffer, sns_info, sns_len); 57 memcpy(cmnd->sense_buffer, sns_info, sns_len);
58 } 58 }
59 if (residue > 0) 59 if (residue > 0) {
60 bfa_trc(bfad, residue);
60 scsi_set_resid(cmnd, residue); 61 scsi_set_resid(cmnd, residue);
62 if (!sns_len && (scsi_status == SAM_STAT_GOOD) &&
63 (scsi_bufflen(cmnd) - residue) <
64 cmnd->underflow) {
65 bfa_trc(bfad, 0);
66 host_status = DID_ERROR;
67 }
68 }
69 cmnd->result = ScsiResult(host_status, scsi_status);
70
61 break; 71 break;
62 72
63 case BFI_IOIM_STS_ABORTED: 73 case BFI_IOIM_STS_ABORTED:
@@ -167,17 +177,15 @@ bfad_im_info(struct Scsi_Host *shost)
167 static char bfa_buf[256]; 177 static char bfa_buf[256];
168 struct bfad_im_port_s *im_port = 178 struct bfad_im_port_s *im_port =
169 (struct bfad_im_port_s *) shost->hostdata[0]; 179 (struct bfad_im_port_s *) shost->hostdata[0];
170 struct bfa_ioc_attr_s ioc_attr;
171 struct bfad_s *bfad = im_port->bfad; 180 struct bfad_s *bfad = im_port->bfad;
181 char model[BFA_ADAPTER_MODEL_NAME_LEN];
172 182
173 memset(&ioc_attr, 0, sizeof(ioc_attr)); 183 bfa_get_adapter_model(&bfad->bfa, model);
174 bfa_get_attr(&bfad->bfa, &ioc_attr);
175 184
176 memset(bfa_buf, 0, sizeof(bfa_buf)); 185 memset(bfa_buf, 0, sizeof(bfa_buf));
177 snprintf(bfa_buf, sizeof(bfa_buf), 186 snprintf(bfa_buf, sizeof(bfa_buf),
178 "Brocade FC/FCOE Adapter, " "model: %s hwpath: %s driver: %s", 187 "Brocade FC/FCOE Adapter, " "model: %s hwpath: %s driver: %s",
179 ioc_attr.adapter_attr.model, bfad->pci_name, 188 model, bfad->pci_name, BFAD_DRIVER_VERSION);
180 BFAD_DRIVER_VERSION);
181 return bfa_buf; 189 return bfa_buf;
182} 190}
183 191
@@ -501,16 +509,6 @@ void bfa_fcb_itnim_tov(struct bfad_itnim_s *itnim)
501} 509}
502 510
503/** 511/**
504 * Path TOV processing begin notification -- dummy for linux
505 */
506void
507bfa_fcb_itnim_tov_begin(struct bfad_itnim_s *itnim)
508{
509}
510
511
512
513/**
514 * Allocate a Scsi_Host for a port. 512 * Allocate a Scsi_Host for a port.
515 */ 513 */
516int 514int
@@ -931,10 +929,9 @@ bfad_os_fc_host_init(struct bfad_im_port_s *im_port)
931 struct Scsi_Host *host = im_port->shost; 929 struct Scsi_Host *host = im_port->shost;
932 struct bfad_s *bfad = im_port->bfad; 930 struct bfad_s *bfad = im_port->bfad;
933 struct bfad_port_s *port = im_port->port; 931 struct bfad_port_s *port = im_port->port;
934 union attr { 932 struct bfa_pport_attr_s pattr;
935 struct bfa_pport_attr_s pattr; 933 char model[BFA_ADAPTER_MODEL_NAME_LEN];
936 struct bfa_ioc_attr_s ioc_attr; 934 char fw_ver[BFA_VERSION_LEN];
937 } attr;
938 935
939 fc_host_node_name(host) = 936 fc_host_node_name(host) =
940 bfa_os_htonll((bfa_fcs_port_get_nwwn(port->fcs_port))); 937 bfa_os_htonll((bfa_fcs_port_get_nwwn(port->fcs_port)));
@@ -954,20 +951,18 @@ bfad_os_fc_host_init(struct bfad_im_port_s *im_port)
954 /* For fibre channel services type 0x20 */ 951 /* For fibre channel services type 0x20 */
955 fc_host_supported_fc4s(host)[7] = 1; 952 fc_host_supported_fc4s(host)[7] = 1;
956 953
957 memset(&attr.ioc_attr, 0, sizeof(attr.ioc_attr)); 954 bfa_get_adapter_model(&bfad->bfa, model);
958 bfa_get_attr(&bfad->bfa, &attr.ioc_attr); 955 bfa_get_adapter_fw_ver(&bfad->bfa, fw_ver);
959 sprintf(fc_host_symbolic_name(host), "Brocade %s FV%s DV%s", 956 sprintf(fc_host_symbolic_name(host), "Brocade %s FV%s DV%s",
960 attr.ioc_attr.adapter_attr.model, 957 model, fw_ver, BFAD_DRIVER_VERSION);
961 attr.ioc_attr.adapter_attr.fw_ver, BFAD_DRIVER_VERSION);
962 958
963 fc_host_supported_speeds(host) = 0; 959 fc_host_supported_speeds(host) = 0;
964 fc_host_supported_speeds(host) |= 960 fc_host_supported_speeds(host) |=
965 FC_PORTSPEED_8GBIT | FC_PORTSPEED_4GBIT | FC_PORTSPEED_2GBIT | 961 FC_PORTSPEED_8GBIT | FC_PORTSPEED_4GBIT | FC_PORTSPEED_2GBIT |
966 FC_PORTSPEED_1GBIT; 962 FC_PORTSPEED_1GBIT;
967 963
968 memset(&attr.pattr, 0, sizeof(attr.pattr)); 964 bfa_fcport_get_attr(&bfad->bfa, &pattr);
969 bfa_pport_get_attr(&bfad->bfa, &attr.pattr); 965 fc_host_maxframe_size(host) = pattr.pport_cfg.maxfrsize;
970 fc_host_maxframe_size(host) = attr.pattr.pport_cfg.maxfrsize;
971} 966}
972 967
973static void 968static void
diff --git a/drivers/scsi/bfa/bfad_im.h b/drivers/scsi/bfa/bfad_im.h
index 189a5b29e21a..85ab2da21321 100644
--- a/drivers/scsi/bfa/bfad_im.h
+++ b/drivers/scsi/bfa/bfad_im.h
@@ -23,7 +23,6 @@
23 23
24#define FCPI_NAME " fcpim" 24#define FCPI_NAME " fcpim"
25 25
26void bfad_flags_set(struct bfad_s *bfad, u32 flags);
27bfa_status_t bfad_im_module_init(void); 26bfa_status_t bfad_im_module_init(void);
28void bfad_im_module_exit(void); 27void bfad_im_module_exit(void);
29bfa_status_t bfad_im_probe(struct bfad_s *bfad); 28bfa_status_t bfad_im_probe(struct bfad_s *bfad);
@@ -126,7 +125,6 @@ bfa_status_t bfad_os_thread_workq(struct bfad_s *bfad);
126void bfad_os_destroy_workq(struct bfad_im_s *im); 125void bfad_os_destroy_workq(struct bfad_im_s *im);
127void bfad_os_itnim_process(struct bfad_itnim_s *itnim_drv); 126void bfad_os_itnim_process(struct bfad_itnim_s *itnim_drv);
128void bfad_os_fc_host_init(struct bfad_im_port_s *im_port); 127void bfad_os_fc_host_init(struct bfad_im_port_s *im_port);
129void bfad_os_init_work(struct bfad_im_port_s *im_port);
130void bfad_os_scsi_host_free(struct bfad_s *bfad, 128void bfad_os_scsi_host_free(struct bfad_s *bfad,
131 struct bfad_im_port_s *im_port); 129 struct bfad_im_port_s *im_port);
132void bfad_os_ramp_up_qdepth(struct bfad_itnim_s *itnim, 130void bfad_os_ramp_up_qdepth(struct bfad_itnim_s *itnim,
@@ -136,9 +134,6 @@ struct bfad_itnim_s *bfad_os_get_itnim(struct bfad_im_port_s *im_port, int id);
136int bfad_os_scsi_add_host(struct Scsi_Host *shost, 134int bfad_os_scsi_add_host(struct Scsi_Host *shost,
137 struct bfad_im_port_s *im_port, struct bfad_s *bfad); 135 struct bfad_im_port_s *im_port, struct bfad_s *bfad);
138 136
139/*
140 * scsi_host_template entries
141 */
142void bfad_im_itnim_unmap(struct bfad_im_port_s *im_port, 137void bfad_im_itnim_unmap(struct bfad_im_port_s *im_port,
143 struct bfad_itnim_s *itnim); 138 struct bfad_itnim_s *itnim);
144 139
diff --git a/drivers/scsi/bfa/bfad_intr.c b/drivers/scsi/bfa/bfad_intr.c
index 7de8832f6fee..2b7dbecbebca 100644
--- a/drivers/scsi/bfa/bfad_intr.c
+++ b/drivers/scsi/bfa/bfad_intr.c
@@ -23,8 +23,10 @@ BFA_TRC_FILE(LDRV, INTR);
23/** 23/**
24 * bfa_isr BFA driver interrupt functions 24 * bfa_isr BFA driver interrupt functions
25 */ 25 */
26static int msix_disable; 26static int msix_disable_cb;
27module_param(msix_disable, int, S_IRUGO | S_IWUSR); 27static int msix_disable_ct;
28module_param(msix_disable_cb, int, S_IRUGO | S_IWUSR);
29module_param(msix_disable_ct, int, S_IRUGO | S_IWUSR);
28/** 30/**
29 * Line based interrupt handler. 31 * Line based interrupt handler.
30 */ 32 */
@@ -141,6 +143,7 @@ bfad_setup_intr(struct bfad_s *bfad)
141 int error = 0; 143 int error = 0;
142 u32 mask = 0, i, num_bit = 0, max_bit = 0; 144 u32 mask = 0, i, num_bit = 0, max_bit = 0;
143 struct msix_entry msix_entries[MAX_MSIX_ENTRY]; 145 struct msix_entry msix_entries[MAX_MSIX_ENTRY];
146 struct pci_dev *pdev = bfad->pcidev;
144 147
145 /* Call BFA to get the msix map for this PCI function. */ 148 /* Call BFA to get the msix map for this PCI function. */
146 bfa_msix_getvecs(&bfad->bfa, &mask, &num_bit, &max_bit); 149 bfa_msix_getvecs(&bfad->bfa, &mask, &num_bit, &max_bit);
@@ -148,7 +151,9 @@ bfad_setup_intr(struct bfad_s *bfad)
148 /* Set up the msix entry table */ 151 /* Set up the msix entry table */
149 bfad_init_msix_entry(bfad, msix_entries, mask, max_bit); 152 bfad_init_msix_entry(bfad, msix_entries, mask, max_bit);
150 153
151 if (!msix_disable) { 154 if ((pdev->device == BFA_PCI_DEVICE_ID_CT && !msix_disable_ct) ||
155 (pdev->device != BFA_PCI_DEVICE_ID_CT && !msix_disable_cb)) {
156
152 error = pci_enable_msix(bfad->pcidev, msix_entries, bfad->nvec); 157 error = pci_enable_msix(bfad->pcidev, msix_entries, bfad->nvec);
153 if (error) { 158 if (error) {
154 /* 159 /*
diff --git a/drivers/scsi/bfa/fabric.c b/drivers/scsi/bfa/fabric.c
index a4b5dd449573..8166e9745ec0 100644
--- a/drivers/scsi/bfa/fabric.c
+++ b/drivers/scsi/bfa/fabric.c
@@ -37,7 +37,7 @@ BFA_TRC_FILE(FCS, FABRIC);
37#define BFA_FCS_FABRIC_CLEANUP_DELAY (10000) /* Milliseconds */ 37#define BFA_FCS_FABRIC_CLEANUP_DELAY (10000) /* Milliseconds */
38 38
39#define bfa_fcs_fabric_set_opertype(__fabric) do { \ 39#define bfa_fcs_fabric_set_opertype(__fabric) do { \
40 if (bfa_pport_get_topology((__fabric)->fcs->bfa) \ 40 if (bfa_fcport_get_topology((__fabric)->fcs->bfa) \
41 == BFA_PPORT_TOPOLOGY_P2P) \ 41 == BFA_PPORT_TOPOLOGY_P2P) \
42 (__fabric)->oper_type = BFA_PPORT_TYPE_NPORT; \ 42 (__fabric)->oper_type = BFA_PPORT_TYPE_NPORT; \
43 else \ 43 else \
@@ -136,8 +136,7 @@ bfa_fcs_fabric_sm_uninit(struct bfa_fcs_fabric_s *fabric,
136 case BFA_FCS_FABRIC_SM_CREATE: 136 case BFA_FCS_FABRIC_SM_CREATE:
137 bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_created); 137 bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_created);
138 bfa_fcs_fabric_init(fabric); 138 bfa_fcs_fabric_init(fabric);
139 bfa_fcs_lport_init(&fabric->bport, fabric->fcs, FC_VF_ID_NULL, 139 bfa_fcs_lport_init(&fabric->bport, &fabric->bport.port_cfg);
140 &fabric->bport.port_cfg, NULL);
141 break; 140 break;
142 141
143 case BFA_FCS_FABRIC_SM_LINK_UP: 142 case BFA_FCS_FABRIC_SM_LINK_UP:
@@ -161,7 +160,7 @@ bfa_fcs_fabric_sm_created(struct bfa_fcs_fabric_s *fabric,
161 160
162 switch (event) { 161 switch (event) {
163 case BFA_FCS_FABRIC_SM_START: 162 case BFA_FCS_FABRIC_SM_START:
164 if (bfa_pport_is_linkup(fabric->fcs->bfa)) { 163 if (bfa_fcport_is_linkup(fabric->fcs->bfa)) {
165 bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_flogi); 164 bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_flogi);
166 bfa_fcs_fabric_login(fabric); 165 bfa_fcs_fabric_login(fabric);
167 } else 166 } else
@@ -225,7 +224,7 @@ bfa_fcs_fabric_sm_flogi(struct bfa_fcs_fabric_s *fabric,
225 switch (event) { 224 switch (event) {
226 case BFA_FCS_FABRIC_SM_CONT_OP: 225 case BFA_FCS_FABRIC_SM_CONT_OP:
227 226
228 bfa_pport_set_tx_bbcredit(fabric->fcs->bfa, fabric->bb_credit); 227 bfa_fcport_set_tx_bbcredit(fabric->fcs->bfa, fabric->bb_credit);
229 fabric->fab_type = BFA_FCS_FABRIC_SWITCHED; 228 fabric->fab_type = BFA_FCS_FABRIC_SWITCHED;
230 229
231 if (fabric->auth_reqd && fabric->is_auth) { 230 if (fabric->auth_reqd && fabric->is_auth) {
@@ -252,7 +251,7 @@ bfa_fcs_fabric_sm_flogi(struct bfa_fcs_fabric_s *fabric,
252 251
253 case BFA_FCS_FABRIC_SM_NO_FABRIC: 252 case BFA_FCS_FABRIC_SM_NO_FABRIC:
254 fabric->fab_type = BFA_FCS_FABRIC_N2N; 253 fabric->fab_type = BFA_FCS_FABRIC_N2N;
255 bfa_pport_set_tx_bbcredit(fabric->fcs->bfa, fabric->bb_credit); 254 bfa_fcport_set_tx_bbcredit(fabric->fcs->bfa, fabric->bb_credit);
256 bfa_fcs_fabric_notify_online(fabric); 255 bfa_fcs_fabric_notify_online(fabric);
257 bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_nofabric); 256 bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_nofabric);
258 break; 257 break;
@@ -419,7 +418,7 @@ bfa_fcs_fabric_sm_nofabric(struct bfa_fcs_fabric_s *fabric,
419 418
420 case BFA_FCS_FABRIC_SM_NO_FABRIC: 419 case BFA_FCS_FABRIC_SM_NO_FABRIC:
421 bfa_trc(fabric->fcs, fabric->bb_credit); 420 bfa_trc(fabric->fcs, fabric->bb_credit);
422 bfa_pport_set_tx_bbcredit(fabric->fcs->bfa, fabric->bb_credit); 421 bfa_fcport_set_tx_bbcredit(fabric->fcs->bfa, fabric->bb_credit);
423 break; 422 break;
424 423
425 default: 424 default:
@@ -563,17 +562,15 @@ void
563bfa_fcs_fabric_psymb_init(struct bfa_fcs_fabric_s *fabric) 562bfa_fcs_fabric_psymb_init(struct bfa_fcs_fabric_s *fabric)
564{ 563{
565 struct bfa_port_cfg_s *port_cfg = &fabric->bport.port_cfg; 564 struct bfa_port_cfg_s *port_cfg = &fabric->bport.port_cfg;
566 struct bfa_adapter_attr_s adapter_attr; 565 char model[BFA_ADAPTER_MODEL_NAME_LEN] = {0};
567 struct bfa_fcs_driver_info_s *driver_info = &fabric->fcs->driver_info; 566 struct bfa_fcs_driver_info_s *driver_info = &fabric->fcs->driver_info;
568 567
569 bfa_os_memset((void *)&adapter_attr, 0, 568 bfa_ioc_get_adapter_model(&fabric->fcs->bfa->ioc, model);
570 sizeof(struct bfa_adapter_attr_s));
571 bfa_ioc_get_adapter_attr(&fabric->fcs->bfa->ioc, &adapter_attr);
572 569
573 /* 570 /*
574 * Model name/number 571 * Model name/number
575 */ 572 */
576 strncpy((char *)&port_cfg->sym_name, adapter_attr.model, 573 strncpy((char *)&port_cfg->sym_name, model,
577 BFA_FCS_PORT_SYMBNAME_MODEL_SZ); 574 BFA_FCS_PORT_SYMBNAME_MODEL_SZ);
578 strncat((char *)&port_cfg->sym_name, BFA_FCS_PORT_SYMBNAME_SEPARATOR, 575 strncat((char *)&port_cfg->sym_name, BFA_FCS_PORT_SYMBNAME_SEPARATOR,
579 sizeof(BFA_FCS_PORT_SYMBNAME_SEPARATOR)); 576 sizeof(BFA_FCS_PORT_SYMBNAME_SEPARATOR));
@@ -719,10 +716,10 @@ bfa_fcs_fabric_login(struct bfa_fcs_fabric_s *fabric)
719 struct bfa_port_cfg_s *pcfg = &fabric->bport.port_cfg; 716 struct bfa_port_cfg_s *pcfg = &fabric->bport.port_cfg;
720 u8 alpa = 0; 717 u8 alpa = 0;
721 718
722 if (bfa_pport_get_topology(bfa) == BFA_PPORT_TOPOLOGY_LOOP) 719 if (bfa_fcport_get_topology(bfa) == BFA_PPORT_TOPOLOGY_LOOP)
723 alpa = bfa_pport_get_myalpa(bfa); 720 alpa = bfa_fcport_get_myalpa(bfa);
724 721
725 bfa_lps_flogi(fabric->lps, fabric, alpa, bfa_pport_get_maxfrsize(bfa), 722 bfa_lps_flogi(fabric->lps, fabric, alpa, bfa_fcport_get_maxfrsize(bfa),
726 pcfg->pwwn, pcfg->nwwn, fabric->auth_reqd); 723 pcfg->pwwn, pcfg->nwwn, fabric->auth_reqd);
727 724
728 fabric->stats.flogi_sent++; 725 fabric->stats.flogi_sent++;
@@ -814,10 +811,10 @@ bfa_fcs_fabric_delete_comp(void *cbarg)
814 */ 811 */
815 812
816/** 813/**
817 * Module initialization 814 * Attach time initialization
818 */ 815 */
819void 816void
820bfa_fcs_fabric_modinit(struct bfa_fcs_s *fcs) 817bfa_fcs_fabric_attach(struct bfa_fcs_s *fcs)
821{ 818{
822 struct bfa_fcs_fabric_s *fabric; 819 struct bfa_fcs_fabric_s *fabric;
823 820
@@ -841,7 +838,13 @@ bfa_fcs_fabric_modinit(struct bfa_fcs_s *fcs)
841 bfa_wc_up(&fabric->wc); /* For the base port */ 838 bfa_wc_up(&fabric->wc); /* For the base port */
842 839
843 bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_uninit); 840 bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_uninit);
844 bfa_sm_send_event(fabric, BFA_FCS_FABRIC_SM_CREATE); 841 bfa_fcs_lport_attach(&fabric->bport, fabric->fcs, FC_VF_ID_NULL, NULL);
842}
843
844void
845bfa_fcs_fabric_modinit(struct bfa_fcs_s *fcs)
846{
847 bfa_sm_send_event(&fcs->fabric, BFA_FCS_FABRIC_SM_CREATE);
845 bfa_trc(fcs, 0); 848 bfa_trc(fcs, 0);
846} 849}
847 850
@@ -890,6 +893,12 @@ bfa_fcs_fabric_is_loopback(struct bfa_fcs_fabric_s *fabric)
890 return bfa_sm_cmp_state(fabric, bfa_fcs_fabric_sm_loopback); 893 return bfa_sm_cmp_state(fabric, bfa_fcs_fabric_sm_loopback);
891} 894}
892 895
896bfa_boolean_t
897bfa_fcs_fabric_is_auth_failed(struct bfa_fcs_fabric_s *fabric)
898{
899 return bfa_sm_cmp_state(fabric, bfa_fcs_fabric_sm_auth_failed);
900}
901
893enum bfa_pport_type 902enum bfa_pport_type
894bfa_fcs_fabric_port_type(struct bfa_fcs_fabric_s *fabric) 903bfa_fcs_fabric_port_type(struct bfa_fcs_fabric_s *fabric)
895{ 904{
@@ -1165,8 +1174,8 @@ bfa_fcs_fabric_send_flogi_acc(struct bfa_fcs_fabric_s *fabric)
1165 reqlen = fc_flogi_acc_build(&fchs, bfa_fcxp_get_reqbuf(fcxp), 1174 reqlen = fc_flogi_acc_build(&fchs, bfa_fcxp_get_reqbuf(fcxp),
1166 bfa_os_hton3b(FC_FABRIC_PORT), 1175 bfa_os_hton3b(FC_FABRIC_PORT),
1167 n2n_port->reply_oxid, pcfg->pwwn, 1176 n2n_port->reply_oxid, pcfg->pwwn,
1168 pcfg->nwwn, bfa_pport_get_maxfrsize(bfa), 1177 pcfg->nwwn, bfa_fcport_get_maxfrsize(bfa),
1169 bfa_pport_get_rx_bbcredit(bfa)); 1178 bfa_fcport_get_rx_bbcredit(bfa));
1170 1179
1171 bfa_fcxp_send(fcxp, NULL, fabric->vf_id, bfa_lps_get_tag(fabric->lps), 1180 bfa_fcxp_send(fcxp, NULL, fabric->vf_id, bfa_lps_get_tag(fabric->lps),
1172 BFA_FALSE, FC_CLASS_3, reqlen, &fchs, 1181 BFA_FALSE, FC_CLASS_3, reqlen, &fchs,
@@ -1224,14 +1233,8 @@ bfa_fcs_fabric_aen_post(struct bfa_fcs_port_s *port,
1224 wwn2str(pwwn_ptr, pwwn); 1233 wwn2str(pwwn_ptr, pwwn);
1225 wwn2str(fwwn_ptr, fwwn); 1234 wwn2str(fwwn_ptr, fwwn);
1226 1235
1227 switch (event) { 1236 bfa_log(logmod, BFA_LOG_CREATE_ID(BFA_AEN_CAT_PORT, event),
1228 case BFA_PORT_AEN_FABRIC_NAME_CHANGE: 1237 pwwn_ptr, fwwn_ptr);
1229 bfa_log(logmod, BFA_AEN_PORT_FABRIC_NAME_CHANGE, pwwn_ptr,
1230 fwwn_ptr);
1231 break;
1232 default:
1233 break;
1234 }
1235 1238
1236 aen_data.port.pwwn = pwwn; 1239 aen_data.port.pwwn = pwwn;
1237 aen_data.port.fwwn = fwwn; 1240 aen_data.port.fwwn = fwwn;
diff --git a/drivers/scsi/bfa/fcbuild.h b/drivers/scsi/bfa/fcbuild.h
index 8fa7f270ef7b..981d98d542b9 100644
--- a/drivers/scsi/bfa/fcbuild.h
+++ b/drivers/scsi/bfa/fcbuild.h
@@ -72,6 +72,9 @@ fc_rpsc_operspeed_to_bfa_speed(enum fc_rpsc_op_speed_s speed)
72 case RPSC_OP_SPEED_8G: 72 case RPSC_OP_SPEED_8G:
73 return BFA_PPORT_SPEED_8GBPS; 73 return BFA_PPORT_SPEED_8GBPS;
74 74
75 case RPSC_OP_SPEED_10G:
76 return BFA_PPORT_SPEED_10GBPS;
77
75 default: 78 default:
76 return BFA_PPORT_SPEED_UNKNOWN; 79 return BFA_PPORT_SPEED_UNKNOWN;
77 } 80 }
@@ -97,6 +100,9 @@ fc_bfa_speed_to_rpsc_operspeed(enum bfa_pport_speed op_speed)
97 case BFA_PPORT_SPEED_8GBPS: 100 case BFA_PPORT_SPEED_8GBPS:
98 return RPSC_OP_SPEED_8G; 101 return RPSC_OP_SPEED_8G;
99 102
103 case BFA_PPORT_SPEED_10GBPS:
104 return RPSC_OP_SPEED_10G;
105
100 default: 106 default:
101 return RPSC_OP_SPEED_NOT_EST; 107 return RPSC_OP_SPEED_NOT_EST;
102 } 108 }
diff --git a/drivers/scsi/bfa/fcpim.c b/drivers/scsi/bfa/fcpim.c
index 1f3c06efaa9e..8ae4a2cfa85b 100644
--- a/drivers/scsi/bfa/fcpim.c
+++ b/drivers/scsi/bfa/fcpim.c
@@ -126,7 +126,7 @@ bfa_fcs_itnim_sm_offline(struct bfa_fcs_itnim_s *itnim,
126 break; 126 break;
127 127
128 default: 128 default:
129 bfa_assert(0); 129 bfa_sm_fault(itnim->fcs, event);
130 } 130 }
131 131
132} 132}
@@ -161,7 +161,7 @@ bfa_fcs_itnim_sm_prli_send(struct bfa_fcs_itnim_s *itnim,
161 break; 161 break;
162 162
163 default: 163 default:
164 bfa_assert(0); 164 bfa_sm_fault(itnim->fcs, event);
165 } 165 }
166} 166}
167 167
@@ -205,7 +205,7 @@ bfa_fcs_itnim_sm_prli(struct bfa_fcs_itnim_s *itnim,
205 break; 205 break;
206 206
207 default: 207 default:
208 bfa_assert(0); 208 bfa_sm_fault(itnim->fcs, event);
209 } 209 }
210} 210}
211 211
@@ -240,7 +240,7 @@ bfa_fcs_itnim_sm_prli_retry(struct bfa_fcs_itnim_s *itnim,
240 break; 240 break;
241 241
242 default: 242 default:
243 bfa_assert(0); 243 bfa_sm_fault(itnim->fcs, event);
244 } 244 }
245} 245}
246 246
@@ -270,7 +270,7 @@ bfa_fcs_itnim_sm_hcb_online(struct bfa_fcs_itnim_s *itnim,
270 break; 270 break;
271 271
272 default: 272 default:
273 bfa_assert(0); 273 bfa_sm_fault(itnim->fcs, event);
274 } 274 }
275} 275}
276 276
@@ -298,7 +298,7 @@ bfa_fcs_itnim_sm_online(struct bfa_fcs_itnim_s *itnim,
298 break; 298 break;
299 299
300 default: 300 default:
301 bfa_assert(0); 301 bfa_sm_fault(itnim->fcs, event);
302 } 302 }
303} 303}
304 304
@@ -321,7 +321,7 @@ bfa_fcs_itnim_sm_hcb_offline(struct bfa_fcs_itnim_s *itnim,
321 break; 321 break;
322 322
323 default: 323 default:
324 bfa_assert(0); 324 bfa_sm_fault(itnim->fcs, event);
325 } 325 }
326} 326}
327 327
@@ -354,7 +354,7 @@ bfa_fcs_itnim_sm_initiator(struct bfa_fcs_itnim_s *itnim,
354 break; 354 break;
355 355
356 default: 356 default:
357 bfa_assert(0); 357 bfa_sm_fault(itnim->fcs, event);
358 } 358 }
359} 359}
360 360
@@ -385,19 +385,8 @@ bfa_fcs_itnim_aen_post(struct bfa_fcs_itnim_s *itnim,
385 wwn2str(lpwwn_ptr, lpwwn); 385 wwn2str(lpwwn_ptr, lpwwn);
386 wwn2str(rpwwn_ptr, rpwwn); 386 wwn2str(rpwwn_ptr, rpwwn);
387 387
388 switch (event) { 388 bfa_log(logmod, BFA_LOG_CREATE_ID(BFA_AEN_CAT_ITNIM, event),
389 case BFA_ITNIM_AEN_ONLINE: 389 rpwwn_ptr, lpwwn_ptr);
390 bfa_log(logmod, BFA_AEN_ITNIM_ONLINE, rpwwn_ptr, lpwwn_ptr);
391 break;
392 case BFA_ITNIM_AEN_OFFLINE:
393 bfa_log(logmod, BFA_AEN_ITNIM_OFFLINE, rpwwn_ptr, lpwwn_ptr);
394 break;
395 case BFA_ITNIM_AEN_DISCONNECT:
396 bfa_log(logmod, BFA_AEN_ITNIM_DISCONNECT, rpwwn_ptr, lpwwn_ptr);
397 break;
398 default:
399 break;
400 }
401 390
402 aen_data.itnim.vf_id = rport->port->fabric->vf_id; 391 aen_data.itnim.vf_id = rport->port->fabric->vf_id;
403 aen_data.itnim.ppwwn = 392 aen_data.itnim.ppwwn =
@@ -689,7 +678,6 @@ bfa_cb_itnim_tov_begin(void *cb_arg)
689 struct bfa_fcs_itnim_s *itnim = (struct bfa_fcs_itnim_s *)cb_arg; 678 struct bfa_fcs_itnim_s *itnim = (struct bfa_fcs_itnim_s *)cb_arg;
690 679
691 bfa_trc(itnim->fcs, itnim->rport->pwwn); 680 bfa_trc(itnim->fcs, itnim->rport->pwwn);
692 bfa_fcb_itnim_tov_begin(itnim->itnim_drv);
693} 681}
694 682
695/** 683/**
@@ -822,22 +810,3 @@ void
822bfa_fcs_itnim_resume(struct bfa_fcs_itnim_s *itnim) 810bfa_fcs_itnim_resume(struct bfa_fcs_itnim_s *itnim)
823{ 811{
824} 812}
825
826/**
827 * Module initialization
828 */
829void
830bfa_fcs_fcpim_modinit(struct bfa_fcs_s *fcs)
831{
832}
833
834/**
835 * Module cleanup
836 */
837void
838bfa_fcs_fcpim_modexit(struct bfa_fcs_s *fcs)
839{
840 bfa_fcs_modexit_comp(fcs);
841}
842
843
diff --git a/drivers/scsi/bfa/fcs_fabric.h b/drivers/scsi/bfa/fcs_fabric.h
index eee960820f86..244c3f00c50c 100644
--- a/drivers/scsi/bfa/fcs_fabric.h
+++ b/drivers/scsi/bfa/fcs_fabric.h
@@ -29,6 +29,7 @@
29/* 29/*
30* fcs friend functions: only between fcs modules 30* fcs friend functions: only between fcs modules
31 */ 31 */
32void bfa_fcs_fabric_attach(struct bfa_fcs_s *fcs);
32void bfa_fcs_fabric_modinit(struct bfa_fcs_s *fcs); 33void bfa_fcs_fabric_modinit(struct bfa_fcs_s *fcs);
33void bfa_fcs_fabric_modexit(struct bfa_fcs_s *fcs); 34void bfa_fcs_fabric_modexit(struct bfa_fcs_s *fcs);
34void bfa_fcs_fabric_modsusp(struct bfa_fcs_s *fcs); 35void bfa_fcs_fabric_modsusp(struct bfa_fcs_s *fcs);
@@ -46,6 +47,7 @@ void bfa_fcs_fabric_uf_recv(struct bfa_fcs_fabric_s *fabric,
46 struct fchs_s *fchs, u16 len); 47 struct fchs_s *fchs, u16 len);
47u16 bfa_fcs_fabric_vport_count(struct bfa_fcs_fabric_s *fabric); 48u16 bfa_fcs_fabric_vport_count(struct bfa_fcs_fabric_s *fabric);
48bfa_boolean_t bfa_fcs_fabric_is_loopback(struct bfa_fcs_fabric_s *fabric); 49bfa_boolean_t bfa_fcs_fabric_is_loopback(struct bfa_fcs_fabric_s *fabric);
50bfa_boolean_t bfa_fcs_fabric_is_auth_failed(struct bfa_fcs_fabric_s *fabric);
49enum bfa_pport_type bfa_fcs_fabric_port_type(struct bfa_fcs_fabric_s *fabric); 51enum bfa_pport_type bfa_fcs_fabric_port_type(struct bfa_fcs_fabric_s *fabric);
50void bfa_fcs_fabric_psymb_init(struct bfa_fcs_fabric_s *fabric); 52void bfa_fcs_fabric_psymb_init(struct bfa_fcs_fabric_s *fabric);
51void bfa_fcs_fabric_port_delete_comp(struct bfa_fcs_fabric_s *fabric); 53void bfa_fcs_fabric_port_delete_comp(struct bfa_fcs_fabric_s *fabric);
diff --git a/drivers/scsi/bfa/fcs_fcpim.h b/drivers/scsi/bfa/fcs_fcpim.h
index 61e9e2687de3..11e6e7bce9f6 100644
--- a/drivers/scsi/bfa/fcs_fcpim.h
+++ b/drivers/scsi/bfa/fcs_fcpim.h
@@ -34,11 +34,6 @@ void bfa_fcs_itnim_is_initiator(struct bfa_fcs_itnim_s *itnim);
34void bfa_fcs_itnim_pause(struct bfa_fcs_itnim_s *itnim); 34void bfa_fcs_itnim_pause(struct bfa_fcs_itnim_s *itnim);
35void bfa_fcs_itnim_resume(struct bfa_fcs_itnim_s *itnim); 35void bfa_fcs_itnim_resume(struct bfa_fcs_itnim_s *itnim);
36 36
37/*
38 * Modudle init/cleanup routines.
39 */
40void bfa_fcs_fcpim_modinit(struct bfa_fcs_s *fcs);
41void bfa_fcs_fcpim_modexit(struct bfa_fcs_s *fcs);
42void bfa_fcs_fcpim_uf_recv(struct bfa_fcs_itnim_s *itnim, struct fchs_s *fchs, 37void bfa_fcs_fcpim_uf_recv(struct bfa_fcs_itnim_s *itnim, struct fchs_s *fchs,
43 u16 len); 38 u16 len);
44#endif /* __FCS_FCPIM_H__ */ 39#endif /* __FCS_FCPIM_H__ */
diff --git a/drivers/scsi/bfa/fcs_lport.h b/drivers/scsi/bfa/fcs_lport.h
index ae744ba35671..a6508c8ab184 100644
--- a/drivers/scsi/bfa/fcs_lport.h
+++ b/drivers/scsi/bfa/fcs_lport.h
@@ -84,9 +84,10 @@ void bfa_fcs_port_uf_recv(struct bfa_fcs_port_s *lport, struct fchs_s *fchs,
84 * Following routines will be called by Fabric to indicate port 84 * Following routines will be called by Fabric to indicate port
85 * online/offline to vport. 85 * online/offline to vport.
86 */ 86 */
87void bfa_fcs_lport_init(struct bfa_fcs_port_s *lport, struct bfa_fcs_s *fcs, 87void bfa_fcs_lport_attach(struct bfa_fcs_port_s *lport, struct bfa_fcs_s *fcs,
88 u16 vf_id, struct bfa_port_cfg_s *port_cfg, 88 uint16_t vf_id, struct bfa_fcs_vport_s *vport);
89 struct bfa_fcs_vport_s *vport); 89void bfa_fcs_lport_init(struct bfa_fcs_port_s *lport,
90 struct bfa_port_cfg_s *port_cfg);
90void bfa_fcs_port_online(struct bfa_fcs_port_s *port); 91void bfa_fcs_port_online(struct bfa_fcs_port_s *port);
91void bfa_fcs_port_offline(struct bfa_fcs_port_s *port); 92void bfa_fcs_port_offline(struct bfa_fcs_port_s *port);
92void bfa_fcs_port_delete(struct bfa_fcs_port_s *port); 93void bfa_fcs_port_delete(struct bfa_fcs_port_s *port);
diff --git a/drivers/scsi/bfa/fcs_port.h b/drivers/scsi/bfa/fcs_port.h
index abb65191dd27..408c06a7d164 100644
--- a/drivers/scsi/bfa/fcs_port.h
+++ b/drivers/scsi/bfa/fcs_port.h
@@ -26,7 +26,6 @@
26/* 26/*
27 * fcs friend functions: only between fcs modules 27 * fcs friend functions: only between fcs modules
28 */ 28 */
29void bfa_fcs_pport_modinit(struct bfa_fcs_s *fcs); 29void bfa_fcs_pport_attach(struct bfa_fcs_s *fcs);
30void bfa_fcs_pport_modexit(struct bfa_fcs_s *fcs);
31 30
32#endif /* __FCS_PPORT_H__ */ 31#endif /* __FCS_PPORT_H__ */
diff --git a/drivers/scsi/bfa/fcs_rport.h b/drivers/scsi/bfa/fcs_rport.h
index f601e9d74236..9c8d1d292380 100644
--- a/drivers/scsi/bfa/fcs_rport.h
+++ b/drivers/scsi/bfa/fcs_rport.h
@@ -24,9 +24,6 @@
24 24
25#include <fcs/bfa_fcs_rport.h> 25#include <fcs/bfa_fcs_rport.h>
26 26
27void bfa_fcs_rport_modinit(struct bfa_fcs_s *fcs);
28void bfa_fcs_rport_modexit(struct bfa_fcs_s *fcs);
29
30void bfa_fcs_rport_uf_recv(struct bfa_fcs_rport_s *rport, struct fchs_s *fchs, 27void bfa_fcs_rport_uf_recv(struct bfa_fcs_rport_s *rport, struct fchs_s *fchs,
31 u16 len); 28 u16 len);
32void bfa_fcs_rport_scn(struct bfa_fcs_rport_s *rport); 29void bfa_fcs_rport_scn(struct bfa_fcs_rport_s *rport);
diff --git a/drivers/scsi/bfa/fcs_uf.h b/drivers/scsi/bfa/fcs_uf.h
index 96f1bdcb31ed..f591072214fe 100644
--- a/drivers/scsi/bfa/fcs_uf.h
+++ b/drivers/scsi/bfa/fcs_uf.h
@@ -26,7 +26,6 @@
26/* 26/*
27 * fcs friend functions: only between fcs modules 27 * fcs friend functions: only between fcs modules
28 */ 28 */
29void bfa_fcs_uf_modinit(struct bfa_fcs_s *fcs); 29void bfa_fcs_uf_attach(struct bfa_fcs_s *fcs);
30void bfa_fcs_uf_modexit(struct bfa_fcs_s *fcs);
31 30
32#endif /* __FCS_UF_H__ */ 31#endif /* __FCS_UF_H__ */
diff --git a/drivers/scsi/bfa/fcs_vport.h b/drivers/scsi/bfa/fcs_vport.h
index 9e80b6a97b7f..13c32ebf946c 100644
--- a/drivers/scsi/bfa/fcs_vport.h
+++ b/drivers/scsi/bfa/fcs_vport.h
@@ -22,18 +22,10 @@
22#include <fcs/bfa_fcs_vport.h> 22#include <fcs/bfa_fcs_vport.h>
23#include <defs/bfa_defs_pci.h> 23#include <defs/bfa_defs_pci.h>
24 24
25/*
26 * Modudle init/cleanup routines.
27 */
28
29void bfa_fcs_vport_modinit(struct bfa_fcs_s *fcs);
30void bfa_fcs_vport_modexit(struct bfa_fcs_s *fcs);
31
32void bfa_fcs_vport_cleanup(struct bfa_fcs_vport_s *vport); 25void bfa_fcs_vport_cleanup(struct bfa_fcs_vport_s *vport);
33void bfa_fcs_vport_online(struct bfa_fcs_vport_s *vport); 26void bfa_fcs_vport_online(struct bfa_fcs_vport_s *vport);
34void bfa_fcs_vport_offline(struct bfa_fcs_vport_s *vport); 27void bfa_fcs_vport_offline(struct bfa_fcs_vport_s *vport);
35void bfa_fcs_vport_delete_comp(struct bfa_fcs_vport_s *vport); 28void bfa_fcs_vport_delete_comp(struct bfa_fcs_vport_s *vport);
36u32 bfa_fcs_vport_get_max(struct bfa_fcs_s *fcs);
37 29
38#endif /* __FCS_VPORT_H__ */ 30#endif /* __FCS_VPORT_H__ */
39 31
diff --git a/drivers/scsi/bfa/fdmi.c b/drivers/scsi/bfa/fdmi.c
index df2a1e54e16b..8f17076d1a87 100644
--- a/drivers/scsi/bfa/fdmi.c
+++ b/drivers/scsi/bfa/fdmi.c
@@ -116,6 +116,9 @@ static void bfa_fcs_port_fdmi_sm_rpa_retry(struct bfa_fcs_port_fdmi_s *fdmi,
116 enum port_fdmi_event event); 116 enum port_fdmi_event event);
117static void bfa_fcs_port_fdmi_sm_online(struct bfa_fcs_port_fdmi_s *fdmi, 117static void bfa_fcs_port_fdmi_sm_online(struct bfa_fcs_port_fdmi_s *fdmi,
118 enum port_fdmi_event event); 118 enum port_fdmi_event event);
119static void bfa_fcs_port_fdmi_sm_disabled(struct bfa_fcs_port_fdmi_s *fdmi,
120 enum port_fdmi_event event);
121
119/** 122/**
120 * Start in offline state - awaiting MS to send start. 123 * Start in offline state - awaiting MS to send start.
121 */ 124 */
@@ -155,7 +158,7 @@ bfa_fcs_port_fdmi_sm_offline(struct bfa_fcs_port_fdmi_s *fdmi,
155 break; 158 break;
156 159
157 default: 160 default:
158 bfa_assert(0); 161 bfa_sm_fault(port->fcs, event);
159 } 162 }
160} 163}
161 164
@@ -180,7 +183,7 @@ bfa_fcs_port_fdmi_sm_sending_rhba(struct bfa_fcs_port_fdmi_s *fdmi,
180 break; 183 break;
181 184
182 default: 185 default:
183 bfa_assert(0); 186 bfa_sm_fault(port->fcs, event);
184 } 187 }
185} 188}
186 189
@@ -227,7 +230,7 @@ bfa_fcs_port_fdmi_sm_rhba(struct bfa_fcs_port_fdmi_s *fdmi,
227 break; 230 break;
228 231
229 default: 232 default:
230 bfa_assert(0); 233 bfa_sm_fault(port->fcs, event);
231 } 234 }
232} 235}
233 236
@@ -255,7 +258,7 @@ bfa_fcs_port_fdmi_sm_rhba_retry(struct bfa_fcs_port_fdmi_s *fdmi,
255 break; 258 break;
256 259
257 default: 260 default:
258 bfa_assert(0); 261 bfa_sm_fault(port->fcs, event);
259 } 262 }
260} 263}
261 264
@@ -283,7 +286,7 @@ bfa_fcs_port_fdmi_sm_sending_rprt(struct bfa_fcs_port_fdmi_s *fdmi,
283 break; 286 break;
284 287
285 default: 288 default:
286 bfa_assert(0); 289 bfa_sm_fault(port->fcs, event);
287 } 290 }
288} 291}
289 292
@@ -328,7 +331,7 @@ bfa_fcs_port_fdmi_sm_rprt(struct bfa_fcs_port_fdmi_s *fdmi,
328 break; 331 break;
329 332
330 default: 333 default:
331 bfa_assert(0); 334 bfa_sm_fault(port->fcs, event);
332 } 335 }
333} 336}
334 337
@@ -356,7 +359,7 @@ bfa_fcs_port_fdmi_sm_rprt_retry(struct bfa_fcs_port_fdmi_s *fdmi,
356 break; 359 break;
357 360
358 default: 361 default:
359 bfa_assert(0); 362 bfa_sm_fault(port->fcs, event);
360 } 363 }
361} 364}
362 365
@@ -384,7 +387,7 @@ bfa_fcs_port_fdmi_sm_sending_rpa(struct bfa_fcs_port_fdmi_s *fdmi,
384 break; 387 break;
385 388
386 default: 389 default:
387 bfa_assert(0); 390 bfa_sm_fault(port->fcs, event);
388 } 391 }
389} 392}
390 393
@@ -428,7 +431,7 @@ bfa_fcs_port_fdmi_sm_rpa(struct bfa_fcs_port_fdmi_s *fdmi,
428 break; 431 break;
429 432
430 default: 433 default:
431 bfa_assert(0); 434 bfa_sm_fault(port->fcs, event);
432 } 435 }
433} 436}
434 437
@@ -456,7 +459,7 @@ bfa_fcs_port_fdmi_sm_rpa_retry(struct bfa_fcs_port_fdmi_s *fdmi,
456 break; 459 break;
457 460
458 default: 461 default:
459 bfa_assert(0); 462 bfa_sm_fault(port->fcs, event);
460 } 463 }
461} 464}
462 465
@@ -475,10 +478,24 @@ bfa_fcs_port_fdmi_sm_online(struct bfa_fcs_port_fdmi_s *fdmi,
475 break; 478 break;
476 479
477 default: 480 default:
478 bfa_assert(0); 481 bfa_sm_fault(port->fcs, event);
479 } 482 }
480} 483}
481 484
485/**
486 * FDMI is disabled state.
487 */
488static void
489bfa_fcs_port_fdmi_sm_disabled(struct bfa_fcs_port_fdmi_s *fdmi,
490 enum port_fdmi_event event)
491{
492 struct bfa_fcs_port_s *port = fdmi->ms->port;
493
494 bfa_trc(port->fcs, port->port_cfg.pwwn);
495 bfa_trc(port->fcs, event);
496
497 /* No op State. It can only be enabled at Driver Init. */
498}
482 499
483/** 500/**
484* RHBA : Register HBA Attributes. 501* RHBA : Register HBA Attributes.
@@ -1097,36 +1114,23 @@ bfa_fcs_fdmi_get_hbaattr(struct bfa_fcs_port_fdmi_s *fdmi,
1097{ 1114{
1098 struct bfa_fcs_port_s *port = fdmi->ms->port; 1115 struct bfa_fcs_port_s *port = fdmi->ms->port;
1099 struct bfa_fcs_driver_info_s *driver_info = &port->fcs->driver_info; 1116 struct bfa_fcs_driver_info_s *driver_info = &port->fcs->driver_info;
1100 struct bfa_adapter_attr_s adapter_attr;
1101 1117
1102 bfa_os_memset(hba_attr, 0, sizeof(struct bfa_fcs_fdmi_hba_attr_s)); 1118 bfa_os_memset(hba_attr, 0, sizeof(struct bfa_fcs_fdmi_hba_attr_s));
1103 bfa_os_memset(&adapter_attr, 0, sizeof(struct bfa_adapter_attr_s));
1104
1105 bfa_ioc_get_adapter_attr(&port->fcs->bfa->ioc, &adapter_attr);
1106
1107 strncpy(hba_attr->manufacturer, adapter_attr.manufacturer,
1108 sizeof(adapter_attr.manufacturer));
1109
1110 strncpy(hba_attr->serial_num, adapter_attr.serial_num,
1111 sizeof(adapter_attr.serial_num));
1112 1119
1113 strncpy(hba_attr->model, adapter_attr.model, sizeof(hba_attr->model)); 1120 bfa_ioc_get_adapter_manufacturer(&port->fcs->bfa->ioc,
1114 1121 hba_attr->manufacturer);
1115 strncpy(hba_attr->model_desc, adapter_attr.model_descr, 1122 bfa_ioc_get_adapter_serial_num(&port->fcs->bfa->ioc,
1116 sizeof(hba_attr->model_desc)); 1123 hba_attr->serial_num);
1117 1124 bfa_ioc_get_adapter_model(&port->fcs->bfa->ioc, hba_attr->model);
1118 strncpy(hba_attr->hw_version, adapter_attr.hw_ver, 1125 bfa_ioc_get_adapter_model(&port->fcs->bfa->ioc, hba_attr->model_desc);
1119 sizeof(hba_attr->hw_version)); 1126 bfa_ioc_get_pci_chip_rev(&port->fcs->bfa->ioc, hba_attr->hw_version);
1127 bfa_ioc_get_adapter_optrom_ver(&port->fcs->bfa->ioc,
1128 hba_attr->option_rom_ver);
1129 bfa_ioc_get_adapter_fw_ver(&port->fcs->bfa->ioc, hba_attr->fw_version);
1120 1130
1121 strncpy(hba_attr->driver_version, (char *)driver_info->version, 1131 strncpy(hba_attr->driver_version, (char *)driver_info->version,
1122 sizeof(hba_attr->driver_version)); 1132 sizeof(hba_attr->driver_version));
1123 1133
1124 strncpy(hba_attr->option_rom_ver, adapter_attr.optrom_ver,
1125 sizeof(hba_attr->option_rom_ver));
1126
1127 strncpy(hba_attr->fw_version, adapter_attr.fw_ver,
1128 sizeof(hba_attr->fw_version));
1129
1130 strncpy(hba_attr->os_name, driver_info->host_os_name, 1134 strncpy(hba_attr->os_name, driver_info->host_os_name,
1131 sizeof(hba_attr->os_name)); 1135 sizeof(hba_attr->os_name));
1132 1136
@@ -1158,7 +1162,7 @@ bfa_fcs_fdmi_get_portattr(struct bfa_fcs_port_fdmi_s *fdmi,
1158 /* 1162 /*
1159 * get pport attributes from hal 1163 * get pport attributes from hal
1160 */ 1164 */
1161 bfa_pport_get_attr(port->fcs->bfa, &pport_attr); 1165 bfa_fcport_get_attr(port->fcs->bfa, &pport_attr);
1162 1166
1163 /* 1167 /*
1164 * get FC4 type Bitmask 1168 * get FC4 type Bitmask
@@ -1201,7 +1205,10 @@ bfa_fcs_port_fdmi_init(struct bfa_fcs_port_ms_s *ms)
1201 struct bfa_fcs_port_fdmi_s *fdmi = &ms->fdmi; 1205 struct bfa_fcs_port_fdmi_s *fdmi = &ms->fdmi;
1202 1206
1203 fdmi->ms = ms; 1207 fdmi->ms = ms;
1204 bfa_sm_set_state(fdmi, bfa_fcs_port_fdmi_sm_offline); 1208 if (ms->port->fcs->fdmi_enabled)
1209 bfa_sm_set_state(fdmi, bfa_fcs_port_fdmi_sm_offline);
1210 else
1211 bfa_sm_set_state(fdmi, bfa_fcs_port_fdmi_sm_disabled);
1205} 1212}
1206 1213
1207void 1214void
diff --git a/drivers/scsi/bfa/include/aen/bfa_aen.h b/drivers/scsi/bfa/include/aen/bfa_aen.h
index d9cbc2a783d4..6abbab005db6 100644
--- a/drivers/scsi/bfa/include/aen/bfa_aen.h
+++ b/drivers/scsi/bfa/include/aen/bfa_aen.h
@@ -18,21 +18,24 @@
18#define __BFA_AEN_H__ 18#define __BFA_AEN_H__
19 19
20#include "defs/bfa_defs_aen.h" 20#include "defs/bfa_defs_aen.h"
21#include "defs/bfa_defs_status.h"
22#include "cs/bfa_debug.h"
21 23
22#define BFA_AEN_MAX_ENTRY 512 24#define BFA_AEN_MAX_ENTRY 512
23 25
24extern s32 bfa_aen_max_cfg_entry; 26extern int bfa_aen_max_cfg_entry;
25struct bfa_aen_s { 27struct bfa_aen_s {
26 void *bfad; 28 void *bfad;
27 s32 max_entry; 29 int max_entry;
28 s32 write_index; 30 int write_index;
29 s32 read_index; 31 int read_index;
30 u32 bfad_num; 32 int bfad_num;
31 u32 seq_num; 33 int seq_num;
32 void (*aen_cb_notify)(void *bfad); 34 void (*aen_cb_notify)(void *bfad);
33 void (*gettimeofday)(struct bfa_timeval_s *tv); 35 void (*gettimeofday)(struct bfa_timeval_s *tv);
34 struct bfa_trc_mod_s *trcmod; 36 struct bfa_trc_mod_s *trcmod;
35 struct bfa_aen_entry_s list[BFA_AEN_MAX_ENTRY]; /* Must be the last */ 37 int app_ri[BFA_AEN_MAX_APP]; /* For multiclient support */
38 struct bfa_aen_entry_s list[BFA_AEN_MAX_ENTRY]; /* Must be the last */
36}; 39};
37 40
38 41
@@ -45,48 +48,49 @@ bfa_aen_set_max_cfg_entry(int max_entry)
45 bfa_aen_max_cfg_entry = max_entry; 48 bfa_aen_max_cfg_entry = max_entry;
46} 49}
47 50
48static inline s32 51static inline int
49bfa_aen_get_max_cfg_entry(void) 52bfa_aen_get_max_cfg_entry(void)
50{ 53{
51 return bfa_aen_max_cfg_entry; 54 return bfa_aen_max_cfg_entry;
52} 55}
53 56
54static inline s32 57static inline int
55bfa_aen_get_meminfo(void) 58bfa_aen_get_meminfo(void)
56{ 59{
57 return sizeof(struct bfa_aen_entry_s) * bfa_aen_get_max_cfg_entry(); 60 return sizeof(struct bfa_aen_entry_s) * bfa_aen_get_max_cfg_entry();
58} 61}
59 62
60static inline s32 63static inline int
61bfa_aen_get_wi(struct bfa_aen_s *aen) 64bfa_aen_get_wi(struct bfa_aen_s *aen)
62{ 65{
63 return aen->write_index; 66 return aen->write_index;
64} 67}
65 68
66static inline s32 69static inline int
67bfa_aen_get_ri(struct bfa_aen_s *aen) 70bfa_aen_get_ri(struct bfa_aen_s *aen)
68{ 71{
69 return aen->read_index; 72 return aen->read_index;
70} 73}
71 74
72static inline s32 75static inline int
73bfa_aen_fetch_count(struct bfa_aen_s *aen, s32 read_index) 76bfa_aen_fetch_count(struct bfa_aen_s *aen, enum bfa_aen_app app_id)
74{ 77{
75 return ((aen->write_index + aen->max_entry) - read_index) 78 bfa_assert((app_id < BFA_AEN_MAX_APP) && (app_id >= bfa_aen_app_bcu));
79 return ((aen->write_index + aen->max_entry) - aen->app_ri[app_id])
76 % aen->max_entry; 80 % aen->max_entry;
77} 81}
78 82
79s32 bfa_aen_init(struct bfa_aen_s *aen, struct bfa_trc_mod_s *trcmod, 83int bfa_aen_init(struct bfa_aen_s *aen, struct bfa_trc_mod_s *trcmod,
80 void *bfad, u32 inst_id, void (*aen_cb_notify)(void *), 84 void *bfad, int bfad_num, void (*aen_cb_notify)(void *),
81 void (*gettimeofday)(struct bfa_timeval_s *)); 85 void (*gettimeofday)(struct bfa_timeval_s *));
82 86
83s32 bfa_aen_post(struct bfa_aen_s *aen, enum bfa_aen_category aen_category, 87void bfa_aen_post(struct bfa_aen_s *aen, enum bfa_aen_category aen_category,
84 int aen_type, union bfa_aen_data_u *aen_data); 88 int aen_type, union bfa_aen_data_u *aen_data);
85 89
86s32 bfa_aen_fetch(struct bfa_aen_s *aen, struct bfa_aen_entry_s *aen_entry, 90bfa_status_t bfa_aen_fetch(struct bfa_aen_s *aen,
87 s32 entry_space, s32 rii, s32 *ri_arr, 91 struct bfa_aen_entry_s *aen_entry,
88 s32 ri_arr_cnt); 92 int entry_req, enum bfa_aen_app app_id, int *entry_ret);
89 93
90s32 bfa_aen_get_inst(struct bfa_aen_s *aen); 94int bfa_aen_get_inst(struct bfa_aen_s *aen);
91 95
92#endif /* __BFA_AEN_H__ */ 96#endif /* __BFA_AEN_H__ */
diff --git a/drivers/scsi/bfa/include/bfa.h b/drivers/scsi/bfa/include/bfa.h
index d4bc0d9fa42c..1f5966cfbd16 100644
--- a/drivers/scsi/bfa/include/bfa.h
+++ b/drivers/scsi/bfa/include/bfa.h
@@ -106,6 +106,26 @@ struct bfa_sge_s {
106 bfa_ioc_fetch_stats(&(__bfa)->ioc, __ioc_stats) 106 bfa_ioc_fetch_stats(&(__bfa)->ioc, __ioc_stats)
107#define bfa_ioc_clear_stats(__bfa) \ 107#define bfa_ioc_clear_stats(__bfa) \
108 bfa_ioc_clr_stats(&(__bfa)->ioc) 108 bfa_ioc_clr_stats(&(__bfa)->ioc)
109#define bfa_get_nports(__bfa) \
110 bfa_ioc_get_nports(&(__bfa)->ioc)
111#define bfa_get_adapter_manufacturer(__bfa, __manufacturer) \
112 bfa_ioc_get_adapter_manufacturer(&(__bfa)->ioc, __manufacturer)
113#define bfa_get_adapter_model(__bfa, __model) \
114 bfa_ioc_get_adapter_model(&(__bfa)->ioc, __model)
115#define bfa_get_adapter_serial_num(__bfa, __serial_num) \
116 bfa_ioc_get_adapter_serial_num(&(__bfa)->ioc, __serial_num)
117#define bfa_get_adapter_fw_ver(__bfa, __fw_ver) \
118 bfa_ioc_get_adapter_fw_ver(&(__bfa)->ioc, __fw_ver)
119#define bfa_get_adapter_optrom_ver(__bfa, __optrom_ver) \
120 bfa_ioc_get_adapter_optrom_ver(&(__bfa)->ioc, __optrom_ver)
121#define bfa_get_pci_chip_rev(__bfa, __chip_rev) \
122 bfa_ioc_get_pci_chip_rev(&(__bfa)->ioc, __chip_rev)
123#define bfa_get_ioc_state(__bfa) \
124 bfa_ioc_get_state(&(__bfa)->ioc)
125#define bfa_get_type(__bfa) \
126 bfa_ioc_get_type(&(__bfa)->ioc)
127#define bfa_get_mac(__bfa) \
128 bfa_ioc_get_mac(&(__bfa)->ioc)
109 129
110/* 130/*
111 * bfa API functions 131 * bfa API functions
@@ -161,6 +181,7 @@ bfa_status_t bfa_iocfc_israttr_set(struct bfa_s *bfa,
161void bfa_iocfc_enable(struct bfa_s *bfa); 181void bfa_iocfc_enable(struct bfa_s *bfa);
162void bfa_iocfc_disable(struct bfa_s *bfa); 182void bfa_iocfc_disable(struct bfa_s *bfa);
163void bfa_ioc_auto_recover(bfa_boolean_t auto_recover); 183void bfa_ioc_auto_recover(bfa_boolean_t auto_recover);
184void bfa_chip_reset(struct bfa_s *bfa);
164void bfa_cb_ioc_disable(void *bfad); 185void bfa_cb_ioc_disable(void *bfad);
165void bfa_timer_tick(struct bfa_s *bfa); 186void bfa_timer_tick(struct bfa_s *bfa);
166#define bfa_timer_start(_bfa, _timer, _timercb, _arg, _timeout) \ 187#define bfa_timer_start(_bfa, _timer, _timercb, _arg, _timeout) \
@@ -171,6 +192,7 @@ void bfa_timer_tick(struct bfa_s *bfa);
171 */ 192 */
172bfa_status_t bfa_debug_fwtrc(struct bfa_s *bfa, void *trcdata, int *trclen); 193bfa_status_t bfa_debug_fwtrc(struct bfa_s *bfa, void *trcdata, int *trclen);
173bfa_status_t bfa_debug_fwsave(struct bfa_s *bfa, void *trcdata, int *trclen); 194bfa_status_t bfa_debug_fwsave(struct bfa_s *bfa, void *trcdata, int *trclen);
195void bfa_debug_fwsave_clear(struct bfa_s *bfa);
174 196
175#include "bfa_priv.h" 197#include "bfa_priv.h"
176 198
diff --git a/drivers/scsi/bfa/include/bfa_svc.h b/drivers/scsi/bfa/include/bfa_svc.h
index 268d956bad89..1349b99a3c6d 100644
--- a/drivers/scsi/bfa/include/bfa_svc.h
+++ b/drivers/scsi/bfa/include/bfa_svc.h
@@ -26,6 +26,7 @@ struct bfa_fcxp_s;
26#include <defs/bfa_defs_pport.h> 26#include <defs/bfa_defs_pport.h>
27#include <defs/bfa_defs_rport.h> 27#include <defs/bfa_defs_rport.h>
28#include <defs/bfa_defs_qos.h> 28#include <defs/bfa_defs_qos.h>
29#include <defs/bfa_defs_fcport.h>
29#include <cs/bfa_sm.h> 30#include <cs/bfa_sm.h>
30#include <bfa.h> 31#include <bfa.h>
31 32
@@ -35,7 +36,7 @@ struct bfa_fcxp_s;
35struct bfa_rport_info_s { 36struct bfa_rport_info_s {
36 u16 max_frmsz; /* max rcv pdu size */ 37 u16 max_frmsz; /* max rcv pdu size */
37 u32 pid:24, /* remote port ID */ 38 u32 pid:24, /* remote port ID */
38 lp_tag:8; 39 lp_tag:8; /* tag */
39 u32 local_pid:24, /* local port ID */ 40 u32 local_pid:24, /* local port ID */
40 cisc:8; /* CIRO supported */ 41 cisc:8; /* CIRO supported */
41 u8 fc_class; /* supported FC classes. enum fc_cos */ 42 u8 fc_class; /* supported FC classes. enum fc_cos */
@@ -54,7 +55,7 @@ struct bfa_rport_s {
54 void *rport_drv; /* fcs/driver rport object */ 55 void *rport_drv; /* fcs/driver rport object */
55 u16 fw_handle; /* firmware rport handle */ 56 u16 fw_handle; /* firmware rport handle */
56 u16 rport_tag; /* BFA rport tag */ 57 u16 rport_tag; /* BFA rport tag */
57 struct bfa_rport_info_s rport_info; /* rport info from *fcs/driver */ 58 struct bfa_rport_info_s rport_info; /* rport info from fcs/driver */
58 struct bfa_reqq_wait_s reqq_wait; /* to wait for room in reqq */ 59 struct bfa_reqq_wait_s reqq_wait; /* to wait for room in reqq */
59 struct bfa_cb_qe_s hcb_qe; /* BFA callback qelem */ 60 struct bfa_cb_qe_s hcb_qe; /* BFA callback qelem */
60 struct bfa_rport_hal_stats_s stats; /* BFA rport statistics */ 61 struct bfa_rport_hal_stats_s stats; /* BFA rport statistics */
@@ -101,7 +102,7 @@ struct bfa_uf_buf_s {
101struct bfa_uf_s { 102struct bfa_uf_s {
102 struct list_head qe; /* queue element */ 103 struct list_head qe; /* queue element */
103 struct bfa_s *bfa; /* bfa instance */ 104 struct bfa_s *bfa; /* bfa instance */
104 u16 uf_tag; /* identifying tag f/w messages */ 105 u16 uf_tag; /* identifying tag fw msgs */
105 u16 vf_id; 106 u16 vf_id;
106 u16 src_rport_handle; 107 u16 src_rport_handle;
107 u16 rsvd; 108 u16 rsvd;
@@ -127,7 +128,7 @@ struct bfa_lps_s {
127 u8 reqq; /* lport request queue */ 128 u8 reqq; /* lport request queue */
128 u8 alpa; /* ALPA for loop topologies */ 129 u8 alpa; /* ALPA for loop topologies */
129 u32 lp_pid; /* lport port ID */ 130 u32 lp_pid; /* lport port ID */
130 bfa_boolean_t fdisc; /* send FDISC instead of FLOGI*/ 131 bfa_boolean_t fdisc; /* send FDISC instead of FLOGI */
131 bfa_boolean_t auth_en; /* enable authentication */ 132 bfa_boolean_t auth_en; /* enable authentication */
132 bfa_boolean_t auth_req; /* authentication required */ 133 bfa_boolean_t auth_req; /* authentication required */
133 bfa_boolean_t npiv_en; /* NPIV is allowed by peer */ 134 bfa_boolean_t npiv_en; /* NPIV is allowed by peer */
@@ -151,60 +152,69 @@ struct bfa_lps_s {
151 bfa_eproto_status_t ext_status; 152 bfa_eproto_status_t ext_status;
152}; 153};
153 154
155#define BFA_FCPORT(_bfa) (&((_bfa)->modules.port))
156
154/* 157/*
155 * bfa pport API functions 158 * bfa pport API functions
156 */ 159 */
157bfa_status_t bfa_pport_enable(struct bfa_s *bfa); 160bfa_status_t bfa_fcport_enable(struct bfa_s *bfa);
158bfa_status_t bfa_pport_disable(struct bfa_s *bfa); 161bfa_status_t bfa_fcport_disable(struct bfa_s *bfa);
159bfa_status_t bfa_pport_cfg_speed(struct bfa_s *bfa, 162bfa_status_t bfa_fcport_cfg_speed(struct bfa_s *bfa,
160 enum bfa_pport_speed speed); 163 enum bfa_pport_speed speed);
161enum bfa_pport_speed bfa_pport_get_speed(struct bfa_s *bfa); 164enum bfa_pport_speed bfa_fcport_get_speed(struct bfa_s *bfa);
162bfa_status_t bfa_pport_cfg_topology(struct bfa_s *bfa, 165bfa_status_t bfa_fcport_cfg_topology(struct bfa_s *bfa,
163 enum bfa_pport_topology topo); 166 enum bfa_pport_topology topo);
164enum bfa_pport_topology bfa_pport_get_topology(struct bfa_s *bfa); 167enum bfa_pport_topology bfa_fcport_get_topology(struct bfa_s *bfa);
165bfa_status_t bfa_pport_cfg_hardalpa(struct bfa_s *bfa, u8 alpa); 168bfa_status_t bfa_fcport_cfg_hardalpa(struct bfa_s *bfa, u8 alpa);
166bfa_boolean_t bfa_pport_get_hardalpa(struct bfa_s *bfa, u8 *alpa); 169bfa_boolean_t bfa_fcport_get_hardalpa(struct bfa_s *bfa, u8 *alpa);
167u8 bfa_pport_get_myalpa(struct bfa_s *bfa); 170u8 bfa_fcport_get_myalpa(struct bfa_s *bfa);
168bfa_status_t bfa_pport_clr_hardalpa(struct bfa_s *bfa); 171bfa_status_t bfa_fcport_clr_hardalpa(struct bfa_s *bfa);
169bfa_status_t bfa_pport_cfg_maxfrsize(struct bfa_s *bfa, u16 maxsize); 172bfa_status_t bfa_fcport_cfg_maxfrsize(struct bfa_s *bfa, u16 maxsize);
170u16 bfa_pport_get_maxfrsize(struct bfa_s *bfa); 173u16 bfa_fcport_get_maxfrsize(struct bfa_s *bfa);
171u32 bfa_pport_mypid(struct bfa_s *bfa); 174u32 bfa_fcport_mypid(struct bfa_s *bfa);
172u8 bfa_pport_get_rx_bbcredit(struct bfa_s *bfa); 175u8 bfa_fcport_get_rx_bbcredit(struct bfa_s *bfa);
173bfa_status_t bfa_pport_trunk_enable(struct bfa_s *bfa, u8 bitmap); 176bfa_status_t bfa_fcport_trunk_enable(struct bfa_s *bfa, u8 bitmap);
174bfa_status_t bfa_pport_trunk_disable(struct bfa_s *bfa); 177bfa_status_t bfa_fcport_trunk_disable(struct bfa_s *bfa);
175bfa_boolean_t bfa_pport_trunk_query(struct bfa_s *bfa, u32 *bitmap); 178bfa_boolean_t bfa_fcport_trunk_query(struct bfa_s *bfa, u32 *bitmap);
176void bfa_pport_get_attr(struct bfa_s *bfa, struct bfa_pport_attr_s *attr); 179void bfa_fcport_get_attr(struct bfa_s *bfa, struct bfa_pport_attr_s *attr);
177wwn_t bfa_pport_get_wwn(struct bfa_s *bfa, bfa_boolean_t node); 180wwn_t bfa_fcport_get_wwn(struct bfa_s *bfa, bfa_boolean_t node);
178bfa_status_t bfa_pport_get_stats(struct bfa_s *bfa, 181void bfa_fcport_event_register(struct bfa_s *bfa,
179 union bfa_pport_stats_u *stats,
180 bfa_cb_pport_t cbfn, void *cbarg);
181bfa_status_t bfa_pport_clear_stats(struct bfa_s *bfa, bfa_cb_pport_t cbfn,
182 void *cbarg);
183void bfa_pport_event_register(struct bfa_s *bfa,
184 void (*event_cbfn) (void *cbarg, 182 void (*event_cbfn) (void *cbarg,
185 bfa_pport_event_t event), void *event_cbarg); 183 bfa_pport_event_t event), void *event_cbarg);
186bfa_boolean_t bfa_pport_is_disabled(struct bfa_s *bfa); 184bfa_boolean_t bfa_fcport_is_disabled(struct bfa_s *bfa);
187void bfa_pport_cfg_qos(struct bfa_s *bfa, bfa_boolean_t on_off); 185void bfa_fcport_cfg_qos(struct bfa_s *bfa, bfa_boolean_t on_off);
188void bfa_pport_cfg_ratelim(struct bfa_s *bfa, bfa_boolean_t on_off); 186void bfa_fcport_cfg_ratelim(struct bfa_s *bfa, bfa_boolean_t on_off);
189bfa_status_t bfa_pport_cfg_ratelim_speed(struct bfa_s *bfa, 187bfa_status_t bfa_fcport_cfg_ratelim_speed(struct bfa_s *bfa,
190 enum bfa_pport_speed speed); 188 enum bfa_pport_speed speed);
191enum bfa_pport_speed bfa_pport_get_ratelim_speed(struct bfa_s *bfa); 189enum bfa_pport_speed bfa_fcport_get_ratelim_speed(struct bfa_s *bfa);
192 190
193void bfa_pport_set_tx_bbcredit(struct bfa_s *bfa, u16 tx_bbcredit); 191void bfa_fcport_set_tx_bbcredit(struct bfa_s *bfa, u16 tx_bbcredit);
194void bfa_pport_busy(struct bfa_s *bfa, bfa_boolean_t status); 192void bfa_fcport_busy(struct bfa_s *bfa, bfa_boolean_t status);
195void bfa_pport_beacon(struct bfa_s *bfa, bfa_boolean_t beacon, 193void bfa_fcport_beacon(struct bfa_s *bfa, bfa_boolean_t beacon,
196 bfa_boolean_t link_e2e_beacon); 194 bfa_boolean_t link_e2e_beacon);
197void bfa_cb_pport_event(void *cbarg, bfa_pport_event_t event); 195void bfa_cb_pport_event(void *cbarg, bfa_pport_event_t event);
198void bfa_pport_qos_get_attr(struct bfa_s *bfa, struct bfa_qos_attr_s *qos_attr); 196void bfa_fcport_qos_get_attr(struct bfa_s *bfa,
199void bfa_pport_qos_get_vc_attr(struct bfa_s *bfa, 197 struct bfa_qos_attr_s *qos_attr);
198void bfa_fcport_qos_get_vc_attr(struct bfa_s *bfa,
200 struct bfa_qos_vc_attr_s *qos_vc_attr); 199 struct bfa_qos_vc_attr_s *qos_vc_attr);
201bfa_status_t bfa_pport_get_qos_stats(struct bfa_s *bfa, 200bfa_status_t bfa_fcport_get_qos_stats(struct bfa_s *bfa,
202 union bfa_pport_stats_u *stats, 201 union bfa_fcport_stats_u *stats,
203 bfa_cb_pport_t cbfn, void *cbarg); 202 bfa_cb_pport_t cbfn, void *cbarg);
204bfa_status_t bfa_pport_clear_qos_stats(struct bfa_s *bfa, bfa_cb_pport_t cbfn, 203bfa_status_t bfa_fcport_clear_qos_stats(struct bfa_s *bfa, bfa_cb_pport_t cbfn,
205 void *cbarg); 204 void *cbarg);
206bfa_boolean_t bfa_pport_is_ratelim(struct bfa_s *bfa); 205bfa_status_t bfa_fcport_get_fcoe_stats(struct bfa_s *bfa,
207bfa_boolean_t bfa_pport_is_linkup(struct bfa_s *bfa); 206 union bfa_fcport_stats_u *stats,
207 bfa_cb_pport_t cbfn, void *cbarg);
208bfa_status_t bfa_fcport_clear_fcoe_stats(struct bfa_s *bfa, bfa_cb_pport_t cbfn,
209 void *cbarg);
210
211bfa_boolean_t bfa_fcport_is_ratelim(struct bfa_s *bfa);
212bfa_boolean_t bfa_fcport_is_linkup(struct bfa_s *bfa);
213bfa_status_t bfa_fcport_get_stats(struct bfa_s *bfa,
214 union bfa_fcport_stats_u *stats,
215 bfa_cb_pport_t cbfn, void *cbarg);
216bfa_status_t bfa_fcport_clear_stats(struct bfa_s *bfa, bfa_cb_pport_t cbfn,
217 void *cbarg);
208 218
209/* 219/*
210 * bfa rport API functions 220 * bfa rport API functions
@@ -293,6 +303,7 @@ void bfa_uf_free(struct bfa_uf_s *uf);
293 * bfa lport service api 303 * bfa lport service api
294 */ 304 */
295 305
306u32 bfa_lps_get_max_vport(struct bfa_s *bfa);
296struct bfa_lps_s *bfa_lps_alloc(struct bfa_s *bfa); 307struct bfa_lps_s *bfa_lps_alloc(struct bfa_s *bfa);
297void bfa_lps_delete(struct bfa_lps_s *lps); 308void bfa_lps_delete(struct bfa_lps_s *lps);
298void bfa_lps_discard(struct bfa_lps_s *lps); 309void bfa_lps_discard(struct bfa_lps_s *lps);
@@ -315,10 +326,12 @@ wwn_t bfa_lps_get_peer_pwwn(struct bfa_lps_s *lps);
315wwn_t bfa_lps_get_peer_nwwn(struct bfa_lps_s *lps); 326wwn_t bfa_lps_get_peer_nwwn(struct bfa_lps_s *lps);
316u8 bfa_lps_get_lsrjt_rsn(struct bfa_lps_s *lps); 327u8 bfa_lps_get_lsrjt_rsn(struct bfa_lps_s *lps);
317u8 bfa_lps_get_lsrjt_expl(struct bfa_lps_s *lps); 328u8 bfa_lps_get_lsrjt_expl(struct bfa_lps_s *lps);
329mac_t bfa_lps_get_lp_mac(struct bfa_lps_s *lps);
318void bfa_cb_lps_flogi_comp(void *bfad, void *uarg, bfa_status_t status); 330void bfa_cb_lps_flogi_comp(void *bfad, void *uarg, bfa_status_t status);
319void bfa_cb_lps_flogo_comp(void *bfad, void *uarg); 331void bfa_cb_lps_flogo_comp(void *bfad, void *uarg);
320void bfa_cb_lps_fdisc_comp(void *bfad, void *uarg, bfa_status_t status); 332void bfa_cb_lps_fdisc_comp(void *bfad, void *uarg, bfa_status_t status);
321void bfa_cb_lps_fdisclogo_comp(void *bfad, void *uarg); 333void bfa_cb_lps_fdisclogo_comp(void *bfad, void *uarg);
334void bfa_cb_lps_cvl_event(void *bfad, void *uarg);
322 335
323#endif /* __BFA_SVC_H__ */ 336#endif /* __BFA_SVC_H__ */
324 337
diff --git a/drivers/scsi/bfa/include/bfa_timer.h b/drivers/scsi/bfa/include/bfa_timer.h
index e407103fa565..f71087448222 100644
--- a/drivers/scsi/bfa/include/bfa_timer.h
+++ b/drivers/scsi/bfa/include/bfa_timer.h
@@ -41,7 +41,7 @@ struct bfa_timer_mod_s {
41 struct list_head timer_q; 41 struct list_head timer_q;
42}; 42};
43 43
44#define BFA_TIMER_FREQ 500 /**< specified in millisecs */ 44#define BFA_TIMER_FREQ 200 /**< specified in millisecs */
45 45
46void bfa_timer_beat(struct bfa_timer_mod_s *mod); 46void bfa_timer_beat(struct bfa_timer_mod_s *mod);
47void bfa_timer_init(struct bfa_timer_mod_s *mod); 47void bfa_timer_init(struct bfa_timer_mod_s *mod);
diff --git a/drivers/scsi/bfa/include/bfi/bfi.h b/drivers/scsi/bfa/include/bfi/bfi.h
index 7042c18e542d..a550e80cabd2 100644
--- a/drivers/scsi/bfa/include/bfi/bfi.h
+++ b/drivers/scsi/bfa/include/bfi/bfi.h
@@ -143,8 +143,8 @@ enum bfi_mclass {
143 BFI_MC_IOC = 1, /* IO Controller (IOC) */ 143 BFI_MC_IOC = 1, /* IO Controller (IOC) */
144 BFI_MC_DIAG = 2, /* Diagnostic Msgs */ 144 BFI_MC_DIAG = 2, /* Diagnostic Msgs */
145 BFI_MC_FLASH = 3, /* Flash message class */ 145 BFI_MC_FLASH = 3, /* Flash message class */
146 BFI_MC_CEE = 4, 146 BFI_MC_CEE = 4, /* CEE */
147 BFI_MC_FC_PORT = 5, /* FC port */ 147 BFI_MC_FCPORT = 5, /* FC port */
148 BFI_MC_IOCFC = 6, /* FC - IO Controller (IOC) */ 148 BFI_MC_IOCFC = 6, /* FC - IO Controller (IOC) */
149 BFI_MC_LL = 7, /* Link Layer */ 149 BFI_MC_LL = 7, /* Link Layer */
150 BFI_MC_UF = 8, /* Unsolicited frame receive */ 150 BFI_MC_UF = 8, /* Unsolicited frame receive */
diff --git a/drivers/scsi/bfa/include/bfi/bfi_cbreg.h b/drivers/scsi/bfa/include/bfi/bfi_cbreg.h
index b3bb52b565b1..a51ee61ddb19 100644
--- a/drivers/scsi/bfa/include/bfi/bfi_cbreg.h
+++ b/drivers/scsi/bfa/include/bfi/bfi_cbreg.h
@@ -177,7 +177,21 @@
177#define __PSS_LMEM_INIT_EN 0x00000100 177#define __PSS_LMEM_INIT_EN 0x00000100
178#define __PSS_LPU1_RESET 0x00000002 178#define __PSS_LPU1_RESET 0x00000002
179#define __PSS_LPU0_RESET 0x00000001 179#define __PSS_LPU0_RESET 0x00000001
180 180#define PSS_ERR_STATUS_REG 0x00018810
181#define __PSS_LMEM1_CORR_ERR 0x00000800
182#define __PSS_LMEM0_CORR_ERR 0x00000400
183#define __PSS_LMEM1_UNCORR_ERR 0x00000200
184#define __PSS_LMEM0_UNCORR_ERR 0x00000100
185#define __PSS_BAL_PERR 0x00000080
186#define __PSS_DIP_IF_ERR 0x00000040
187#define __PSS_IOH_IF_ERR 0x00000020
188#define __PSS_TDS_IF_ERR 0x00000010
189#define __PSS_RDS_IF_ERR 0x00000008
190#define __PSS_SGM_IF_ERR 0x00000004
191#define __PSS_LPU1_RAM_ERR 0x00000002
192#define __PSS_LPU0_RAM_ERR 0x00000001
193#define ERR_SET_REG 0x00018818
194#define __PSS_ERR_STATUS_SET 0x00000fff
181 195
182/* 196/*
183 * These definitions are either in error/missing in spec. Its auto-generated 197 * These definitions are either in error/missing in spec. Its auto-generated
diff --git a/drivers/scsi/bfa/include/bfi/bfi_ctreg.h b/drivers/scsi/bfa/include/bfi/bfi_ctreg.h
index d3caa58c0a0a..57a8497105af 100644
--- a/drivers/scsi/bfa/include/bfi/bfi_ctreg.h
+++ b/drivers/scsi/bfa/include/bfi/bfi_ctreg.h
@@ -430,6 +430,31 @@ enum {
430#define __PSS_LMEM_INIT_EN 0x00000100 430#define __PSS_LMEM_INIT_EN 0x00000100
431#define __PSS_LPU1_RESET 0x00000002 431#define __PSS_LPU1_RESET 0x00000002
432#define __PSS_LPU0_RESET 0x00000001 432#define __PSS_LPU0_RESET 0x00000001
433#define PSS_ERR_STATUS_REG 0x00018810
434#define __PSS_LPU1_TCM_READ_ERR 0x00200000
435#define __PSS_LPU0_TCM_READ_ERR 0x00100000
436#define __PSS_LMEM5_CORR_ERR 0x00080000
437#define __PSS_LMEM4_CORR_ERR 0x00040000
438#define __PSS_LMEM3_CORR_ERR 0x00020000
439#define __PSS_LMEM2_CORR_ERR 0x00010000
440#define __PSS_LMEM1_CORR_ERR 0x00008000
441#define __PSS_LMEM0_CORR_ERR 0x00004000
442#define __PSS_LMEM5_UNCORR_ERR 0x00002000
443#define __PSS_LMEM4_UNCORR_ERR 0x00001000
444#define __PSS_LMEM3_UNCORR_ERR 0x00000800
445#define __PSS_LMEM2_UNCORR_ERR 0x00000400
446#define __PSS_LMEM1_UNCORR_ERR 0x00000200
447#define __PSS_LMEM0_UNCORR_ERR 0x00000100
448#define __PSS_BAL_PERR 0x00000080
449#define __PSS_DIP_IF_ERR 0x00000040
450#define __PSS_IOH_IF_ERR 0x00000020
451#define __PSS_TDS_IF_ERR 0x00000010
452#define __PSS_RDS_IF_ERR 0x00000008
453#define __PSS_SGM_IF_ERR 0x00000004
454#define __PSS_LPU1_RAM_ERR 0x00000002
455#define __PSS_LPU0_RAM_ERR 0x00000001
456#define ERR_SET_REG 0x00018818
457#define __PSS_ERR_STATUS_SET 0x003fffff
433#define HQM_QSET0_RXQ_DRBL_P0 0x00038000 458#define HQM_QSET0_RXQ_DRBL_P0 0x00038000
434#define __RXQ0_ADD_VECTORS_P 0x80000000 459#define __RXQ0_ADD_VECTORS_P 0x80000000
435#define __RXQ0_STOP_P 0x40000000 460#define __RXQ0_STOP_P 0x40000000
@@ -589,6 +614,7 @@ enum {
589#define __HFN_INT_MBOX_LPU1 0x00200000U 614#define __HFN_INT_MBOX_LPU1 0x00200000U
590#define __HFN_INT_MBOX1_LPU0 0x00400000U 615#define __HFN_INT_MBOX1_LPU0 0x00400000U
591#define __HFN_INT_MBOX1_LPU1 0x00800000U 616#define __HFN_INT_MBOX1_LPU1 0x00800000U
617#define __HFN_INT_LL_HALT 0x01000000U
592#define __HFN_INT_CPE_MASK 0x000000ffU 618#define __HFN_INT_CPE_MASK 0x000000ffU
593#define __HFN_INT_RME_MASK 0x0000ff00U 619#define __HFN_INT_RME_MASK 0x0000ff00U
594 620
diff --git a/drivers/scsi/bfa/include/bfi/bfi_ioc.h b/drivers/scsi/bfa/include/bfi/bfi_ioc.h
index 96ef05670659..a0158aac0024 100644
--- a/drivers/scsi/bfa/include/bfi/bfi_ioc.h
+++ b/drivers/scsi/bfa/include/bfi/bfi_ioc.h
@@ -123,7 +123,7 @@ enum bfi_ioc_state {
123 BFI_IOC_DISABLING = 5, /* IOC is being disabled */ 123 BFI_IOC_DISABLING = 5, /* IOC is being disabled */
124 BFI_IOC_DISABLED = 6, /* IOC is disabled */ 124 BFI_IOC_DISABLED = 6, /* IOC is disabled */
125 BFI_IOC_CFG_DISABLED = 7, /* IOC is being disabled;transient */ 125 BFI_IOC_CFG_DISABLED = 7, /* IOC is being disabled;transient */
126 BFI_IOC_HBFAIL = 8, /* IOC heart-beat failure */ 126 BFI_IOC_FAIL = 8, /* IOC heart-beat failure */
127 BFI_IOC_MEMTEST = 9, /* IOC is doing memtest */ 127 BFI_IOC_MEMTEST = 9, /* IOC is doing memtest */
128}; 128};
129 129
diff --git a/drivers/scsi/bfa/include/bfi/bfi_lps.h b/drivers/scsi/bfa/include/bfi/bfi_lps.h
index c59d47badb4b..7ed31bbb8696 100644
--- a/drivers/scsi/bfa/include/bfi/bfi_lps.h
+++ b/drivers/scsi/bfa/include/bfi/bfi_lps.h
@@ -30,6 +30,7 @@ enum bfi_lps_h2i_msgs {
30enum bfi_lps_i2h_msgs { 30enum bfi_lps_i2h_msgs {
31 BFI_LPS_H2I_LOGIN_RSP = BFA_I2HM(1), 31 BFI_LPS_H2I_LOGIN_RSP = BFA_I2HM(1),
32 BFI_LPS_H2I_LOGOUT_RSP = BFA_I2HM(2), 32 BFI_LPS_H2I_LOGOUT_RSP = BFA_I2HM(2),
33 BFI_LPS_H2I_CVL_EVENT = BFA_I2HM(3),
33}; 34};
34 35
35struct bfi_lps_login_req_s { 36struct bfi_lps_login_req_s {
@@ -77,6 +78,12 @@ struct bfi_lps_logout_rsp_s {
77 u8 rsvd[2]; 78 u8 rsvd[2];
78}; 79};
79 80
81struct bfi_lps_cvl_event_s {
82 struct bfi_mhdr_s mh; /* common msg header */
83 u8 lp_tag;
84 u8 rsvd[3];
85};
86
80union bfi_lps_h2i_msg_u { 87union bfi_lps_h2i_msg_u {
81 struct bfi_mhdr_s *msg; 88 struct bfi_mhdr_s *msg;
82 struct bfi_lps_login_req_s *login_req; 89 struct bfi_lps_login_req_s *login_req;
@@ -87,6 +94,7 @@ union bfi_lps_i2h_msg_u {
87 struct bfi_msg_s *msg; 94 struct bfi_msg_s *msg;
88 struct bfi_lps_login_rsp_s *login_rsp; 95 struct bfi_lps_login_rsp_s *login_rsp;
89 struct bfi_lps_logout_rsp_s *logout_rsp; 96 struct bfi_lps_logout_rsp_s *logout_rsp;
97 struct bfi_lps_cvl_event_s *cvl_event;
90}; 98};
91 99
92#pragma pack() 100#pragma pack()
diff --git a/drivers/scsi/bfa/include/bfi/bfi_pport.h b/drivers/scsi/bfa/include/bfi/bfi_pport.h
index c96d246851af..50dcf45c7470 100644
--- a/drivers/scsi/bfa/include/bfi/bfi_pport.h
+++ b/drivers/scsi/bfa/include/bfi/bfi_pport.h
@@ -22,163 +22,97 @@
22 22
23#pragma pack(1) 23#pragma pack(1)
24 24
25enum bfi_pport_h2i { 25enum bfi_fcport_h2i {
26 BFI_PPORT_H2I_ENABLE_REQ = (1), 26 BFI_FCPORT_H2I_ENABLE_REQ = (1),
27 BFI_PPORT_H2I_DISABLE_REQ = (2), 27 BFI_FCPORT_H2I_DISABLE_REQ = (2),
28 BFI_PPORT_H2I_GET_STATS_REQ = (3), 28 BFI_FCPORT_H2I_SET_SVC_PARAMS_REQ = (3),
29 BFI_PPORT_H2I_CLEAR_STATS_REQ = (4), 29 BFI_FCPORT_H2I_STATS_GET_REQ = (4),
30 BFI_PPORT_H2I_SET_SVC_PARAMS_REQ = (5), 30 BFI_FCPORT_H2I_STATS_CLEAR_REQ = (5),
31 BFI_PPORT_H2I_ENABLE_RX_VF_TAG_REQ = (6),
32 BFI_PPORT_H2I_ENABLE_TX_VF_TAG_REQ = (7),
33 BFI_PPORT_H2I_GET_QOS_STATS_REQ = (8),
34 BFI_PPORT_H2I_CLEAR_QOS_STATS_REQ = (9),
35}; 31};
36 32
37enum bfi_pport_i2h { 33enum bfi_fcport_i2h {
38 BFI_PPORT_I2H_ENABLE_RSP = BFA_I2HM(1), 34 BFI_FCPORT_I2H_ENABLE_RSP = BFA_I2HM(1),
39 BFI_PPORT_I2H_DISABLE_RSP = BFA_I2HM(2), 35 BFI_FCPORT_I2H_DISABLE_RSP = BFA_I2HM(2),
40 BFI_PPORT_I2H_GET_STATS_RSP = BFA_I2HM(3), 36 BFI_FCPORT_I2H_SET_SVC_PARAMS_RSP = BFA_I2HM(3),
41 BFI_PPORT_I2H_CLEAR_STATS_RSP = BFA_I2HM(4), 37 BFI_FCPORT_I2H_STATS_GET_RSP = BFA_I2HM(4),
42 BFI_PPORT_I2H_SET_SVC_PARAMS_RSP = BFA_I2HM(5), 38 BFI_FCPORT_I2H_STATS_CLEAR_RSP = BFA_I2HM(5),
43 BFI_PPORT_I2H_ENABLE_RX_VF_TAG_RSP = BFA_I2HM(6), 39 BFI_FCPORT_I2H_EVENT = BFA_I2HM(6),
44 BFI_PPORT_I2H_ENABLE_TX_VF_TAG_RSP = BFA_I2HM(7),
45 BFI_PPORT_I2H_EVENT = BFA_I2HM(8),
46 BFI_PPORT_I2H_GET_QOS_STATS_RSP = BFA_I2HM(9),
47 BFI_PPORT_I2H_CLEAR_QOS_STATS_RSP = BFA_I2HM(10),
48}; 40};
49 41
50/** 42/**
51 * Generic REQ type 43 * Generic REQ type
52 */ 44 */
53struct bfi_pport_generic_req_s { 45struct bfi_fcport_req_s {
54 struct bfi_mhdr_s mh; /* msg header */ 46 struct bfi_mhdr_s mh; /* msg header */
55 u32 msgtag; /* msgtag for reply */ 47 u32 msgtag; /* msgtag for reply */
56}; 48};
57 49
58/** 50/**
59 * Generic RSP type 51 * Generic RSP type
60 */ 52 */
61struct bfi_pport_generic_rsp_s { 53struct bfi_fcport_rsp_s {
62 struct bfi_mhdr_s mh; /* common msg header */ 54 struct bfi_mhdr_s mh; /* common msg header */
63 u8 status; /* port enable status */ 55 u8 status; /* port enable status */
64 u8 rsvd[3]; 56 u8 rsvd[3];
65 u32 msgtag; /* msgtag for reply */ 57 u32 msgtag; /* msgtag for reply */
66}; 58};
67 59
68/** 60/**
69 * BFI_PPORT_H2I_ENABLE_REQ 61 * BFI_FCPORT_H2I_ENABLE_REQ
70 */ 62 */
71struct bfi_pport_enable_req_s { 63struct bfi_fcport_enable_req_s {
72 struct bfi_mhdr_s mh; /* msg header */ 64 struct bfi_mhdr_s mh; /* msg header */
73 u32 rsvd1; 65 u32 rsvd1;
74 wwn_t nwwn; /* node wwn of physical port */ 66 wwn_t nwwn; /* node wwn of physical port */
75 wwn_t pwwn; /* port wwn of physical port */ 67 wwn_t pwwn; /* port wwn of physical port */
76 struct bfa_pport_cfg_s port_cfg; /* port configuration */ 68 struct bfa_pport_cfg_s port_cfg; /* port configuration */
77 union bfi_addr_u stats_dma_addr; /* DMA address for stats */ 69 union bfi_addr_u stats_dma_addr; /* DMA address for stats */
78 u32 msgtag; /* msgtag for reply */ 70 u32 msgtag; /* msgtag for reply */
79 u32 rsvd2; 71 u32 rsvd2;
80}; 72};
81 73
82/** 74/**
83 * BFI_PPORT_I2H_ENABLE_RSP 75 * BFI_FCPORT_H2I_SET_SVC_PARAMS_REQ
84 */ 76 */
85#define bfi_pport_enable_rsp_t struct bfi_pport_generic_rsp_s 77struct bfi_fcport_set_svc_params_req_s {
86
87/**
88 * BFI_PPORT_H2I_DISABLE_REQ
89 */
90#define bfi_pport_disable_req_t struct bfi_pport_generic_req_s
91
92/**
93 * BFI_PPORT_I2H_DISABLE_RSP
94 */
95#define bfi_pport_disable_rsp_t struct bfi_pport_generic_rsp_s
96
97/**
98 * BFI_PPORT_H2I_GET_STATS_REQ
99 */
100#define bfi_pport_get_stats_req_t struct bfi_pport_generic_req_s
101
102/**
103 * BFI_PPORT_I2H_GET_STATS_RSP
104 */
105#define bfi_pport_get_stats_rsp_t struct bfi_pport_generic_rsp_s
106
107/**
108 * BFI_PPORT_H2I_CLEAR_STATS_REQ
109 */
110#define bfi_pport_clear_stats_req_t struct bfi_pport_generic_req_s
111
112/**
113 * BFI_PPORT_I2H_CLEAR_STATS_RSP
114 */
115#define bfi_pport_clear_stats_rsp_t struct bfi_pport_generic_rsp_s
116
117/**
118 * BFI_PPORT_H2I_GET_QOS_STATS_REQ
119 */
120#define bfi_pport_get_qos_stats_req_t struct bfi_pport_generic_req_s
121
122/**
123 * BFI_PPORT_H2I_GET_QOS_STATS_RSP
124 */
125#define bfi_pport_get_qos_stats_rsp_t struct bfi_pport_generic_rsp_s
126
127/**
128 * BFI_PPORT_H2I_CLEAR_QOS_STATS_REQ
129 */
130#define bfi_pport_clear_qos_stats_req_t struct bfi_pport_generic_req_s
131
132/**
133 * BFI_PPORT_H2I_CLEAR_QOS_STATS_RSP
134 */
135#define bfi_pport_clear_qos_stats_rsp_t struct bfi_pport_generic_rsp_s
136
137/**
138 * BFI_PPORT_H2I_SET_SVC_PARAMS_REQ
139 */
140struct bfi_pport_set_svc_params_req_s {
141 struct bfi_mhdr_s mh; /* msg header */ 78 struct bfi_mhdr_s mh; /* msg header */
142 u16 tx_bbcredit; /* Tx credits */ 79 u16 tx_bbcredit; /* Tx credits */
143 u16 rsvd; 80 u16 rsvd;
144}; 81};
145 82
146/** 83/**
147 * BFI_PPORT_I2H_SET_SVC_PARAMS_RSP 84 * BFI_FCPORT_I2H_EVENT
148 */
149
150/**
151 * BFI_PPORT_I2H_EVENT
152 */ 85 */
153struct bfi_pport_event_s { 86struct bfi_fcport_event_s {
154 struct bfi_mhdr_s mh; /* common msg header */ 87 struct bfi_mhdr_s mh; /* common msg header */
155 struct bfa_pport_link_s link_state; 88 struct bfa_pport_link_s link_state;
156}; 89};
157 90
158union bfi_pport_h2i_msg_u { 91/**
92 * fcport H2I message
93 */
94union bfi_fcport_h2i_msg_u {
159 struct bfi_mhdr_s *mhdr; 95 struct bfi_mhdr_s *mhdr;
160 struct bfi_pport_enable_req_s *penable; 96 struct bfi_fcport_enable_req_s *penable;
161 struct bfi_pport_generic_req_s *pdisable; 97 struct bfi_fcport_req_s *pdisable;
162 struct bfi_pport_generic_req_s *pgetstats; 98 struct bfi_fcport_set_svc_params_req_s *psetsvcparams;
163 struct bfi_pport_generic_req_s *pclearstats; 99 struct bfi_fcport_req_s *pstatsget;
164 struct bfi_pport_set_svc_params_req_s *psetsvcparams; 100 struct bfi_fcport_req_s *pstatsclear;
165 struct bfi_pport_get_qos_stats_req_s *pgetqosstats;
166 struct bfi_pport_generic_req_s *pclearqosstats;
167}; 101};
168 102
169union bfi_pport_i2h_msg_u { 103/**
104 * fcport I2H message
105 */
106union bfi_fcport_i2h_msg_u {
170 struct bfi_msg_s *msg; 107 struct bfi_msg_s *msg;
171 struct bfi_pport_generic_rsp_s *enable_rsp; 108 struct bfi_fcport_rsp_s *penable_rsp;
172 struct bfi_pport_disable_rsp_s *disable_rsp; 109 struct bfi_fcport_rsp_s *pdisable_rsp;
173 struct bfi_pport_generic_rsp_s *getstats_rsp; 110 struct bfi_fcport_rsp_s *psetsvcparams_rsp;
174 struct bfi_pport_clear_stats_rsp_s *clearstats_rsp; 111 struct bfi_fcport_rsp_s *pstatsget_rsp;
175 struct bfi_pport_set_svc_params_rsp_s *setsvcparasm_rsp; 112 struct bfi_fcport_rsp_s *pstatsclear_rsp;
176 struct bfi_pport_get_qos_stats_rsp_s *getqosstats_rsp; 113 struct bfi_fcport_event_s *event;
177 struct bfi_pport_clear_qos_stats_rsp_s *clearqosstats_rsp;
178 struct bfi_pport_event_s *event;
179}; 114};
180 115
181#pragma pack() 116#pragma pack()
182 117
183#endif /* __BFI_PPORT_H__ */ 118#endif /* __BFI_PPORT_H__ */
184
diff --git a/drivers/scsi/bfa/include/cna/bfa_cna_trcmod.h b/drivers/scsi/bfa/include/cna/bfa_cna_trcmod.h
index 43ba7064e81a..a75a1f3be315 100644
--- a/drivers/scsi/bfa/include/cna/bfa_cna_trcmod.h
+++ b/drivers/scsi/bfa/include/cna/bfa_cna_trcmod.h
@@ -31,6 +31,10 @@
31enum { 31enum {
32 BFA_TRC_CNA_CEE = 1, 32 BFA_TRC_CNA_CEE = 1,
33 BFA_TRC_CNA_PORT = 2, 33 BFA_TRC_CNA_PORT = 2,
34 BFA_TRC_CNA_IOC = 3,
35 BFA_TRC_CNA_DIAG = 4,
36 BFA_TRC_CNA_IOC_CB = 5,
37 BFA_TRC_CNA_IOC_CT = 6,
34}; 38};
35 39
36#endif /* __BFA_CNA_TRCMOD_H__ */ 40#endif /* __BFA_CNA_TRCMOD_H__ */
diff --git a/drivers/scsi/bfa/include/cs/bfa_log.h b/drivers/scsi/bfa/include/cs/bfa_log.h
index 761cbe22130a..bc334e0a93fa 100644
--- a/drivers/scsi/bfa/include/cs/bfa_log.h
+++ b/drivers/scsi/bfa/include/cs/bfa_log.h
@@ -157,7 +157,7 @@ typedef void (*bfa_log_cb_t)(struct bfa_log_mod_s *log_mod, u32 msg_id,
157 157
158 158
159struct bfa_log_mod_s { 159struct bfa_log_mod_s {
160 char instance_info[16]; /* instance info */ 160 char instance_info[BFA_STRING_32]; /* instance info */
161 int log_level[BFA_LOG_MODULE_ID_MAX + 1]; 161 int log_level[BFA_LOG_MODULE_ID_MAX + 1];
162 /* log level for modules */ 162 /* log level for modules */
163 bfa_log_cb_t cbfn; /* callback function */ 163 bfa_log_cb_t cbfn; /* callback function */
diff --git a/drivers/scsi/bfa/include/cs/bfa_plog.h b/drivers/scsi/bfa/include/cs/bfa_plog.h
index 670f86e5fc6e..f5bef63b5877 100644
--- a/drivers/scsi/bfa/include/cs/bfa_plog.h
+++ b/drivers/scsi/bfa/include/cs/bfa_plog.h
@@ -80,7 +80,8 @@ enum bfa_plog_mid {
80 BFA_PL_MID_HAL_FCXP = 4, 80 BFA_PL_MID_HAL_FCXP = 4,
81 BFA_PL_MID_HAL_UF = 5, 81 BFA_PL_MID_HAL_UF = 5,
82 BFA_PL_MID_FCS = 6, 82 BFA_PL_MID_FCS = 6,
83 BFA_PL_MID_MAX = 7 83 BFA_PL_MID_LPS = 7,
84 BFA_PL_MID_MAX = 8
84}; 85};
85 86
86#define BFA_PL_MID_STRLEN 8 87#define BFA_PL_MID_STRLEN 8
@@ -118,7 +119,11 @@ enum bfa_plog_eid {
118 BFA_PL_EID_RSCN = 17, 119 BFA_PL_EID_RSCN = 17,
119 BFA_PL_EID_DEBUG = 18, 120 BFA_PL_EID_DEBUG = 18,
120 BFA_PL_EID_MISC = 19, 121 BFA_PL_EID_MISC = 19,
121 BFA_PL_EID_MAX = 20 122 BFA_PL_EID_FIP_FCF_DISC = 20,
123 BFA_PL_EID_FIP_FCF_CVL = 21,
124 BFA_PL_EID_LOGIN = 22,
125 BFA_PL_EID_LOGO = 23,
126 BFA_PL_EID_MAX = 24
122}; 127};
123 128
124#define BFA_PL_ENAME_STRLEN 8 129#define BFA_PL_ENAME_STRLEN 8
diff --git a/drivers/scsi/bfa/include/cs/bfa_sm.h b/drivers/scsi/bfa/include/cs/bfa_sm.h
index b0a92baf6657..11fba9082f05 100644
--- a/drivers/scsi/bfa/include/cs/bfa_sm.h
+++ b/drivers/scsi/bfa/include/cs/bfa_sm.h
@@ -23,6 +23,14 @@
23#define __BFA_SM_H__ 23#define __BFA_SM_H__
24 24
25typedef void (*bfa_sm_t)(void *sm, int event); 25typedef void (*bfa_sm_t)(void *sm, int event);
26/**
27 * oc - object class eg. bfa_ioc
28 * st - state, eg. reset
29 * otype - object type, eg. struct bfa_ioc_s
30 * etype - object type, eg. enum ioc_event
31 */
32#define bfa_sm_state_decl(oc, st, otype, etype) \
33 static void oc ## _sm_ ## st(otype * fsm, etype event)
26 34
27#define bfa_sm_set_state(_sm, _state) ((_sm)->sm = (bfa_sm_t)(_state)) 35#define bfa_sm_set_state(_sm, _state) ((_sm)->sm = (bfa_sm_t)(_state))
28#define bfa_sm_send_event(_sm, _event) ((_sm)->sm((_sm), (_event))) 36#define bfa_sm_send_event(_sm, _event) ((_sm)->sm((_sm), (_event)))
diff --git a/drivers/scsi/bfa/include/defs/bfa_defs_aen.h b/drivers/scsi/bfa/include/defs/bfa_defs_aen.h
index 4c81a613db3d..35244698fcdc 100644
--- a/drivers/scsi/bfa/include/defs/bfa_defs_aen.h
+++ b/drivers/scsi/bfa/include/defs/bfa_defs_aen.h
@@ -30,6 +30,16 @@
30#include <defs/bfa_defs_audit.h> 30#include <defs/bfa_defs_audit.h>
31#include <defs/bfa_defs_ethport.h> 31#include <defs/bfa_defs_ethport.h>
32 32
33#define BFA_AEN_MAX_APP 5
34
35enum bfa_aen_app {
36 bfa_aen_app_bcu = 0, /* No thread for bcu */
37 bfa_aen_app_hcm = 1,
38 bfa_aen_app_cim = 2,
39 bfa_aen_app_snia = 3,
40 bfa_aen_app_test = 4, /* To be removed after unit test */
41};
42
33enum bfa_aen_category { 43enum bfa_aen_category {
34 BFA_AEN_CAT_ADAPTER = 1, 44 BFA_AEN_CAT_ADAPTER = 1,
35 BFA_AEN_CAT_PORT = 2, 45 BFA_AEN_CAT_PORT = 2,
diff --git a/drivers/scsi/bfa/include/defs/bfa_defs_auth.h b/drivers/scsi/bfa/include/defs/bfa_defs_auth.h
index dd19c83aba58..45df32820911 100644
--- a/drivers/scsi/bfa/include/defs/bfa_defs_auth.h
+++ b/drivers/scsi/bfa/include/defs/bfa_defs_auth.h
@@ -23,6 +23,7 @@
23#define PRIVATE_KEY 19009 23#define PRIVATE_KEY 19009
24#define KEY_LEN 32399 24#define KEY_LEN 32399
25#define BFA_AUTH_SECRET_STRING_LEN 256 25#define BFA_AUTH_SECRET_STRING_LEN 256
26#define BFA_AUTH_FAIL_NO_PASSWORD 0xFE
26#define BFA_AUTH_FAIL_TIMEOUT 0xFF 27#define BFA_AUTH_FAIL_TIMEOUT 0xFF
27 28
28/** 29/**
@@ -41,6 +42,27 @@ enum bfa_auth_status {
41 BFA_AUTH_STATUS_UNKNOWN = 9, /* authentication status unknown */ 42 BFA_AUTH_STATUS_UNKNOWN = 9, /* authentication status unknown */
42}; 43};
43 44
45enum bfa_auth_rej_code {
46 BFA_AUTH_RJT_CODE_AUTH_FAILURE = 1, /* auth failure */
47 BFA_AUTH_RJT_CODE_LOGICAL_ERR = 2, /* logical error */
48};
49
50/**
51 * Authentication reject codes
52 */
53enum bfa_auth_rej_code_exp {
54 BFA_AUTH_MECH_NOT_USABLE = 1, /* auth. mechanism not usable */
55 BFA_AUTH_DH_GROUP_NOT_USABLE = 2, /* DH Group not usable */
56 BFA_AUTH_HASH_FUNC_NOT_USABLE = 3, /* hash Function not usable */
57 BFA_AUTH_AUTH_XACT_STARTED = 4, /* auth xact started */
58 BFA_AUTH_AUTH_FAILED = 5, /* auth failed */
59 BFA_AUTH_INCORRECT_PLD = 6, /* incorrect payload */
60 BFA_AUTH_INCORRECT_PROTO_MSG = 7, /* incorrect proto msg */
61 BFA_AUTH_RESTART_AUTH_PROTO = 8, /* restart auth protocol */
62 BFA_AUTH_AUTH_CONCAT_NOT_SUPP = 9, /* auth concat not supported */
63 BFA_AUTH_PROTO_VER_NOT_SUPP = 10,/* proto version not supported */
64};
65
44struct auth_proto_stats_s { 66struct auth_proto_stats_s {
45 u32 auth_rjts; 67 u32 auth_rjts;
46 u32 auth_negs; 68 u32 auth_negs;
diff --git a/drivers/scsi/bfa/include/defs/bfa_defs_cee.h b/drivers/scsi/bfa/include/defs/bfa_defs_cee.h
index 520a22f52dd1..b0ac9ac15c5d 100644
--- a/drivers/scsi/bfa/include/defs/bfa_defs_cee.h
+++ b/drivers/scsi/bfa/include/defs/bfa_defs_cee.h
@@ -28,10 +28,6 @@
28 28
29#define BFA_CEE_LLDP_MAX_STRING_LEN (128) 29#define BFA_CEE_LLDP_MAX_STRING_LEN (128)
30 30
31
32/* FIXME: this is coming from the protocol spec. Can the host & apps share the
33 protocol .h files ?
34 */
35#define BFA_CEE_LLDP_SYS_CAP_OTHER 0x0001 31#define BFA_CEE_LLDP_SYS_CAP_OTHER 0x0001
36#define BFA_CEE_LLDP_SYS_CAP_REPEATER 0x0002 32#define BFA_CEE_LLDP_SYS_CAP_REPEATER 0x0002
37#define BFA_CEE_LLDP_SYS_CAP_MAC_BRIDGE 0x0004 33#define BFA_CEE_LLDP_SYS_CAP_MAC_BRIDGE 0x0004
@@ -94,9 +90,10 @@ struct bfa_cee_dcbx_cfg_s {
94/* CEE status */ 90/* CEE status */
95/* Making this to tri-state for the benefit of port list command */ 91/* Making this to tri-state for the benefit of port list command */
96enum bfa_cee_status_e { 92enum bfa_cee_status_e {
97 CEE_PHY_DOWN = 0, 93 CEE_UP = 0,
98 CEE_PHY_UP = 1, 94 CEE_PHY_UP = 1,
99 CEE_UP = 2, 95 CEE_LOOPBACK = 2,
96 CEE_PHY_DOWN = 3,
100}; 97};
101 98
102/* CEE Query */ 99/* CEE Query */
@@ -107,7 +104,8 @@ struct bfa_cee_attr_s {
107 struct bfa_cee_dcbx_cfg_s dcbx_remote; 104 struct bfa_cee_dcbx_cfg_s dcbx_remote;
108 mac_t src_mac; 105 mac_t src_mac;
109 u8 link_speed; 106 u8 link_speed;
110 u8 filler[3]; 107 u8 nw_priority;
108 u8 filler[2];
111}; 109};
112 110
113 111
diff --git a/drivers/scsi/bfa/include/defs/bfa_defs_driver.h b/drivers/scsi/bfa/include/defs/bfa_defs_driver.h
index 57049805762b..50382dd2ab41 100644
--- a/drivers/scsi/bfa/include/defs/bfa_defs_driver.h
+++ b/drivers/scsi/bfa/include/defs/bfa_defs_driver.h
@@ -21,6 +21,7 @@
21/** 21/**
22 * Driver statistics 22 * Driver statistics
23 */ 23 */
24struct bfa_driver_stats_s {
24 u16 tm_io_abort; 25 u16 tm_io_abort;
25 u16 tm_io_abort_comp; 26 u16 tm_io_abort_comp;
26 u16 tm_lun_reset; 27 u16 tm_lun_reset;
@@ -34,7 +35,7 @@
34 u64 output_req; 35 u64 output_req;
35 u64 input_words; 36 u64 input_words;
36 u64 output_words; 37 u64 output_words;
37} bfa_driver_stats_t; 38};
38 39
39 40
40#endif /* __BFA_DEFS_DRIVER_H__ */ 41#endif /* __BFA_DEFS_DRIVER_H__ */
diff --git a/drivers/scsi/bfa/include/defs/bfa_defs_ethport.h b/drivers/scsi/bfa/include/defs/bfa_defs_ethport.h
index 79f9b3e146f7..b4fa0923aa89 100644
--- a/drivers/scsi/bfa/include/defs/bfa_defs_ethport.h
+++ b/drivers/scsi/bfa/include/defs/bfa_defs_ethport.h
@@ -19,6 +19,7 @@
19#define __BFA_DEFS_ETHPORT_H__ 19#define __BFA_DEFS_ETHPORT_H__
20 20
21#include <defs/bfa_defs_status.h> 21#include <defs/bfa_defs_status.h>
22#include <defs/bfa_defs_port.h>
22#include <protocol/types.h> 23#include <protocol/types.h>
23#include <cna/pstats/phyport_defs.h> 24#include <cna/pstats/phyport_defs.h>
24#include <cna/pstats/ethport_defs.h> 25#include <cna/pstats/ethport_defs.h>
diff --git a/drivers/scsi/bfa/include/defs/bfa_defs_fcport.h b/drivers/scsi/bfa/include/defs/bfa_defs_fcport.h
new file mode 100644
index 000000000000..a07ef4a3cd78
--- /dev/null
+++ b/drivers/scsi/bfa/include/defs/bfa_defs_fcport.h
@@ -0,0 +1,94 @@
1/*
2 * Copyright (c) 2005-2009 Brocade Communications Systems, Inc.
3 * All rights reserved
4 * www.brocade.com
5 *
6 * bfa_defs_fcport.h
7 *
8 * Linux driver for Brocade Fibre Channel Host Bus Adapter.
9 *
10 * This program is free software; you can redistribute it and/or modify it
11 * under the terms of the GNU General Public License (GPL) Version 2 as
12 * published by the Free Software Foundation
13 *
14 * This program is distributed in the hope that it will be useful, but
15 * WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * General Public License for more details.
18 */
19#ifndef __BFA_DEFS_FCPORT_H__
20#define __BFA_DEFS_FCPORT_H__
21
22#include <defs/bfa_defs_types.h>
23#include <protocol/types.h>
24
25#pragma pack(1)
26
27/**
28 * FCoE statistics
29 */
30struct bfa_fcoe_stats_s {
31 u64 secs_reset; /* Seconds since stats reset */
32 u64 cee_linkups; /* CEE link up */
33 u64 cee_linkdns; /* CEE link down */
34 u64 fip_linkups; /* FIP link up */
35 u64 fip_linkdns; /* FIP link down */
36 u64 fip_fails; /* FIP failures */
37 u64 mac_invalids; /* Invalid mac assignments */
38 u64 vlan_req; /* Vlan requests */
39 u64 vlan_notify; /* Vlan notifications */
40 u64 vlan_err; /* Vlan notification errors */
41 u64 vlan_timeouts; /* Vlan request timeouts */
42 u64 vlan_invalids; /* Vlan invalids */
43 u64 disc_req; /* Discovery requests */
44 u64 disc_rsp; /* Discovery responses */
45 u64 disc_err; /* Discovery error frames */
46 u64 disc_unsol; /* Discovery unsolicited */
47 u64 disc_timeouts; /* Discovery timeouts */
48 u64 disc_fcf_unavail; /* Discovery FCF not avail */
49 u64 linksvc_unsupp; /* FIP link service req unsupp. */
50 u64 linksvc_err; /* FIP link service req errors */
51 u64 logo_req; /* FIP logo */
52 u64 clrvlink_req; /* Clear virtual link requests */
53 u64 op_unsupp; /* FIP operation unsupp. */
54 u64 untagged; /* FIP untagged frames */
55 u64 txf_ucast; /* Tx FCoE unicast frames */
56 u64 txf_ucast_vlan; /* Tx FCoE unicast vlan frames */
57 u64 txf_ucast_octets; /* Tx FCoE unicast octets */
58 u64 txf_mcast; /* Tx FCoE mutlicast frames */
59 u64 txf_mcast_vlan; /* Tx FCoE mutlicast vlan frames */
60 u64 txf_mcast_octets; /* Tx FCoE multicast octets */
61 u64 txf_bcast; /* Tx FCoE broadcast frames */
62 u64 txf_bcast_vlan; /* Tx FCoE broadcast vlan frames */
63 u64 txf_bcast_octets; /* Tx FCoE broadcast octets */
64 u64 txf_timeout; /* Tx timeouts */
65 u64 txf_parity_errors; /* Transmit parity err */
66 u64 txf_fid_parity_errors; /* Transmit FID parity err */
67 u64 tx_pause; /* Tx pause frames */
68 u64 tx_zero_pause; /* Tx zero pause frames */
69 u64 tx_first_pause; /* Tx first pause frames */
70 u64 rx_pause; /* Rx pause frames */
71 u64 rx_zero_pause; /* Rx zero pause frames */
72 u64 rx_first_pause; /* Rx first pause frames */
73 u64 rxf_ucast_octets; /* Rx unicast octets */
74 u64 rxf_ucast; /* Rx unicast frames */
75 u64 rxf_ucast_vlan; /* Rx unicast vlan frames */
76 u64 rxf_mcast_octets; /* Rx multicast octets */
77 u64 rxf_mcast; /* Rx multicast frames */
78 u64 rxf_mcast_vlan; /* Rx multicast vlan frames */
79 u64 rxf_bcast_octets; /* Rx broadcast octests */
80 u64 rxf_bcast; /* Rx broadcast frames */
81 u64 rxf_bcast_vlan; /* Rx broadcast vlan frames */
82};
83
84/**
85 * QoS or FCoE stats (fcport stats excluding physical FC port stats)
86 */
87union bfa_fcport_stats_u {
88 struct bfa_qos_stats_s fcqos;
89 struct bfa_fcoe_stats_s fcoe;
90};
91
92#pragma pack()
93
94#endif /* __BFA_DEFS_FCPORT_H__ */
diff --git a/drivers/scsi/bfa/include/defs/bfa_defs_im_common.h b/drivers/scsi/bfa/include/defs/bfa_defs_im_common.h
deleted file mode 100644
index 9ccf53bef65a..000000000000
--- a/drivers/scsi/bfa/include/defs/bfa_defs_im_common.h
+++ /dev/null
@@ -1,32 +0,0 @@
1/*
2 * Copyright (c) 2005-2009 Brocade Communications Systems, Inc.
3 * All rights reserved
4 * www.brocade.com
5 *
6 * Linux driver for Brocade Fibre Channel Host Bus Adapter.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License (GPL) Version 2 as
10 * published by the Free Software Foundation
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 */
17
18#ifndef __BFA_DEFS_IM_COMMON_H__
19#define __BFA_DEFS_IM_COMMON_H__
20
21#define BFA_ADAPTER_NAME_LEN 256
22#define BFA_ADAPTER_GUID_LEN 256
23#define RESERVED_VLAN_NAME L"PORT VLAN"
24#define PASSTHRU_VLAN_NAME L"PASSTHRU VLAN"
25
26 u64 tx_pkt_cnt;
27 u64 rx_pkt_cnt;
28 u32 duration;
29 u8 status;
30} bfa_im_stats_t, *pbfa_im_stats_t;
31
32#endif /* __BFA_DEFS_IM_COMMON_H__ */
diff --git a/drivers/scsi/bfa/include/defs/bfa_defs_im_team.h b/drivers/scsi/bfa/include/defs/bfa_defs_im_team.h
deleted file mode 100644
index a486a7eb81d6..000000000000
--- a/drivers/scsi/bfa/include/defs/bfa_defs_im_team.h
+++ /dev/null
@@ -1,72 +0,0 @@
1/*
2 * Copyright (c) 2005-2009 Brocade Communications Systems, Inc.
3 * All rights reserved
4 * www.brocade.com
5 *
6 * Linux driver for Brocade Fibre Channel Host Bus Adapter.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License (GPL) Version 2 as
10 * published by the Free Software Foundation
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 */
17
18#ifndef __BFA_DEFS_IM_TEAM_H__
19#define __BFA_DEFS_IM_TEAM_H__
20
21#include <protocol/types.h>
22
23#define BFA_TEAM_MAX_PORTS 8
24#define BFA_TEAM_NAME_LEN 256
25#define BFA_MAX_NUM_TEAMS 16
26#define BFA_TEAM_INVALID_DELAY -1
27
28 BFA_LACP_RATE_SLOW = 1,
29 BFA_LACP_RATE_FAST
30} bfa_im_lacp_rate_t;
31
32 BFA_TEAM_MODE_FAIL_OVER = 1,
33 BFA_TEAM_MODE_FAIL_BACK,
34 BFA_TEAM_MODE_LACP,
35 BFA_TEAM_MODE_NONE
36} bfa_im_team_mode_t;
37
38 BFA_XMIT_POLICY_L2 = 1,
39 BFA_XMIT_POLICY_L3_L4
40} bfa_im_xmit_policy_t;
41
42 bfa_im_team_mode_t team_mode;
43 bfa_im_lacp_rate_t lacp_rate;
44 bfa_im_xmit_policy_t xmit_policy;
45 int delay;
46 wchar_t primary[BFA_ADAPTER_NAME_LEN];
47 wchar_t preferred_primary[BFA_ADAPTER_NAME_LEN];
48 mac_t mac;
49 u16 num_ports;
50 u16 num_vlans;
51 u16 vlan_list[BFA_MAX_VLANS_PER_PORT];
52 wchar_t team_guid_list[BFA_TEAM_MAX_PORTS][BFA_ADAPTER_GUID_LEN];
53 wchar_t ioc_name_list[BFA_TEAM_MAX_PORTS][BFA_ADAPTER_NAME_LEN];
54} bfa_im_team_attr_t;
55
56 wchar_t team_name[BFA_TEAM_NAME_LEN];
57 bfa_im_xmit_policy_t xmit_policy;
58 int delay;
59 wchar_t primary[BFA_ADAPTER_NAME_LEN];
60 wchar_t preferred_primary[BFA_ADAPTER_NAME_LEN];
61} bfa_im_team_edit_t, *pbfa_im_team_edit_t;
62
63 wchar_t team_name[BFA_TEAM_NAME_LEN];
64 bfa_im_team_mode_t team_mode;
65 mac_t mac;
66} bfa_im_team_info_t;
67
68 bfa_im_team_info_t team_info[BFA_MAX_NUM_TEAMS];
69 u16 num_teams;
70} bfa_im_team_list_t, *pbfa_im_team_list_t;
71
72#endif /* __BFA_DEFS_IM_TEAM_H__ */
diff --git a/drivers/scsi/bfa/include/defs/bfa_defs_ioc.h b/drivers/scsi/bfa/include/defs/bfa_defs_ioc.h
index b1d532da3a9d..8d8e6a966537 100644
--- a/drivers/scsi/bfa/include/defs/bfa_defs_ioc.h
+++ b/drivers/scsi/bfa/include/defs/bfa_defs_ioc.h
@@ -126,6 +126,7 @@ struct bfa_ioc_attr_s {
126 struct bfa_ioc_driver_attr_s driver_attr; /* driver attr */ 126 struct bfa_ioc_driver_attr_s driver_attr; /* driver attr */
127 struct bfa_ioc_pci_attr_s pci_attr; 127 struct bfa_ioc_pci_attr_s pci_attr;
128 u8 port_id; /* port number */ 128 u8 port_id; /* port number */
129 u8 rsvd[7]; /*!< 64bit align */
129}; 130};
130 131
131/** 132/**
@@ -143,8 +144,8 @@ enum bfa_ioc_aen_event {
143 * BFA IOC level event data, now just a place holder 144 * BFA IOC level event data, now just a place holder
144 */ 145 */
145struct bfa_ioc_aen_data_s { 146struct bfa_ioc_aen_data_s {
146 enum bfa_ioc_type_e ioc_type;
147 wwn_t pwwn; 147 wwn_t pwwn;
148 s16 ioc_type;
148 mac_t mac; 149 mac_t mac;
149}; 150};
150 151
diff --git a/drivers/scsi/bfa/include/defs/bfa_defs_iocfc.h b/drivers/scsi/bfa/include/defs/bfa_defs_iocfc.h
index d76bcbd9820f..c290fb13d2d1 100644
--- a/drivers/scsi/bfa/include/defs/bfa_defs_iocfc.h
+++ b/drivers/scsi/bfa/include/defs/bfa_defs_iocfc.h
@@ -26,6 +26,8 @@
26 26
27#define BFA_IOCFC_INTR_DELAY 1125 27#define BFA_IOCFC_INTR_DELAY 1125
28#define BFA_IOCFC_INTR_LATENCY 225 28#define BFA_IOCFC_INTR_LATENCY 225
29#define BFA_IOCFCOE_INTR_DELAY 25
30#define BFA_IOCFCOE_INTR_LATENCY 5
29 31
30/** 32/**
31 * Interrupt coalescing configuration. 33 * Interrupt coalescing configuration.
@@ -50,7 +52,7 @@ struct bfa_iocfc_fwcfg_s {
50 u16 num_fcxp_reqs; /* unassisted FC exchanges */ 52 u16 num_fcxp_reqs; /* unassisted FC exchanges */
51 u16 num_uf_bufs; /* unsolicited recv buffers */ 53 u16 num_uf_bufs; /* unsolicited recv buffers */
52 u8 num_cqs; 54 u8 num_cqs;
53 u8 rsvd; 55 u8 rsvd[5];
54}; 56};
55 57
56struct bfa_iocfc_drvcfg_s { 58struct bfa_iocfc_drvcfg_s {
@@ -224,18 +226,24 @@ struct bfa_fw_port_physm_stats_s {
224 226
225 227
226struct bfa_fw_fip_stats_s { 228struct bfa_fw_fip_stats_s {
229 u32 vlan_req; /* vlan discovery requests */
230 u32 vlan_notify; /* vlan notifications */
231 u32 vlan_err; /* vlan response error */
232 u32 vlan_timeouts; /* vlan disvoery timeouts */
233 u32 vlan_invalids; /* invalid vlan in discovery advert. */
227 u32 disc_req; /* Discovery solicit requests */ 234 u32 disc_req; /* Discovery solicit requests */
228 u32 disc_rsp; /* Discovery solicit response */ 235 u32 disc_rsp; /* Discovery solicit response */
229 u32 disc_err; /* Discovery advt. parse errors */ 236 u32 disc_err; /* Discovery advt. parse errors */
230 u32 disc_unsol; /* Discovery unsolicited */ 237 u32 disc_unsol; /* Discovery unsolicited */
231 u32 disc_timeouts; /* Discovery timeouts */ 238 u32 disc_timeouts; /* Discovery timeouts */
239 u32 disc_fcf_unavail; /* Discovery FCF Not Avail. */
232 u32 linksvc_unsupp; /* Unsupported link service req */ 240 u32 linksvc_unsupp; /* Unsupported link service req */
233 u32 linksvc_err; /* Parse error in link service req */ 241 u32 linksvc_err; /* Parse error in link service req */
234 u32 logo_req; /* Number of FIP logos received */ 242 u32 logo_req; /* Number of FIP logos received */
235 u32 clrvlink_req; /* Clear virtual link req */ 243 u32 clrvlink_req; /* Clear virtual link req */
236 u32 op_unsupp; /* Unsupported FIP operation */ 244 u32 op_unsupp; /* Unsupported FIP operation */
237 u32 untagged; /* Untagged frames (ignored) */ 245 u32 untagged; /* Untagged frames (ignored) */
238 u32 rsvd; 246 u32 invalid_version; /*!< Invalid FIP version */
239}; 247};
240 248
241 249
diff --git a/drivers/scsi/bfa/include/defs/bfa_defs_lport.h b/drivers/scsi/bfa/include/defs/bfa_defs_lport.h
index 7359f82aacfc..0952a139c47c 100644
--- a/drivers/scsi/bfa/include/defs/bfa_defs_lport.h
+++ b/drivers/scsi/bfa/include/defs/bfa_defs_lport.h
@@ -59,8 +59,8 @@ enum bfa_lport_aen_event {
59 */ 59 */
60struct bfa_lport_aen_data_s { 60struct bfa_lport_aen_data_s {
61 u16 vf_id; /* vf_id of this logical port */ 61 u16 vf_id; /* vf_id of this logical port */
62 u16 rsvd; 62 s16 roles; /* Logical port mode,IM/TM/IP etc */
63 enum bfa_port_role roles; /* Logical port mode,IM/TM/IP etc */ 63 u32 rsvd;
64 wwn_t ppwwn; /* WWN of its physical port */ 64 wwn_t ppwwn; /* WWN of its physical port */
65 wwn_t lpwwn; /* WWN of this logical port */ 65 wwn_t lpwwn; /* WWN of this logical port */
66}; 66};
diff --git a/drivers/scsi/bfa/include/defs/bfa_defs_mfg.h b/drivers/scsi/bfa/include/defs/bfa_defs_mfg.h
index 13fd4ab6aae2..c5bd9c36ad4d 100644
--- a/drivers/scsi/bfa/include/defs/bfa_defs_mfg.h
+++ b/drivers/scsi/bfa/include/defs/bfa_defs_mfg.h
@@ -22,7 +22,47 @@
22/** 22/**
23 * Manufacturing block version 23 * Manufacturing block version
24 */ 24 */
25#define BFA_MFG_VERSION 1 25#define BFA_MFG_VERSION 2
26
27/**
28 * Manufacturing block encrypted version
29 */
30#define BFA_MFG_ENC_VER 2
31
32/**
33 * Manufacturing block version 1 length
34 */
35#define BFA_MFG_VER1_LEN 128
36
37/**
38 * Manufacturing block header length
39 */
40#define BFA_MFG_HDR_LEN 4
41
42/**
43 * Checksum size
44 */
45#define BFA_MFG_CHKSUM_SIZE 16
46
47/**
48 * Manufacturing block encrypted version
49 */
50#define BFA_MFG_ENC_VER 2
51
52/**
53 * Manufacturing block version 1 length
54 */
55#define BFA_MFG_VER1_LEN 128
56
57/**
58 * Manufacturing block header length
59 */
60#define BFA_MFG_HDR_LEN 4
61
62/**
63 * Checksum size
64 */
65#define BFA_MFG_CHKSUM_SIZE 16
26 66
27/** 67/**
28 * Manufacturing block format 68 * Manufacturing block format
@@ -30,29 +70,74 @@
30#define BFA_MFG_SERIALNUM_SIZE 11 70#define BFA_MFG_SERIALNUM_SIZE 11
31#define BFA_MFG_PARTNUM_SIZE 14 71#define BFA_MFG_PARTNUM_SIZE 14
32#define BFA_MFG_SUPPLIER_ID_SIZE 10 72#define BFA_MFG_SUPPLIER_ID_SIZE 10
33#define BFA_MFG_SUPPLIER_PARTNUM_SIZE 20 73#define BFA_MFG_SUPPLIER_PARTNUM_SIZE 20
34#define BFA_MFG_SUPPLIER_SERIALNUM_SIZE 20 74#define BFA_MFG_SUPPLIER_SERIALNUM_SIZE 20
35#define BFA_MFG_SUPPLIER_REVISION_SIZE 4 75#define BFA_MFG_SUPPLIER_REVISION_SIZE 4
36#define STRSZ(_n) (((_n) + 4) & ~3) 76#define STRSZ(_n) (((_n) + 4) & ~3)
37 77
38/** 78/**
79 * Manufacturing card type
80 */
81enum {
82 BFA_MFG_TYPE_CB_MAX = 825, /* Crossbow card type max */
83 BFA_MFG_TYPE_FC8P2 = 825, /* 8G 2port FC card */
84 BFA_MFG_TYPE_FC8P1 = 815, /* 8G 1port FC card */
85 BFA_MFG_TYPE_FC4P2 = 425, /* 4G 2port FC card */
86 BFA_MFG_TYPE_FC4P1 = 415, /* 4G 1port FC card */
87 BFA_MFG_TYPE_CNA10P2 = 1020, /* 10G 2port CNA card */
88 BFA_MFG_TYPE_CNA10P1 = 1010, /* 10G 1port CNA card */
89};
90
91#pragma pack(1)
92
93/**
94 * Card type to port number conversion
95 */
96#define bfa_mfg_type2port_num(card_type) (((card_type) / 10) % 10)
97
98
99/**
100 * All numerical fields are in big-endian format.
101 */
102struct bfa_mfg_block_s {
103};
104
105/**
39 * VPD data length 106 * VPD data length
40 */ 107 */
41#define BFA_MFG_VPD_LEN 256 108#define BFA_MFG_VPD_LEN 512
109
110#define BFA_MFG_VPD_PCI_HDR_OFF 137
111#define BFA_MFG_VPD_PCI_VER_MASK 0x07 /* version mask 3 bits */
112#define BFA_MFG_VPD_PCI_VDR_MASK 0xf8 /* vendor mask 5 bits */
113
114/**
115 * VPD vendor tag
116 */
117enum {
118 BFA_MFG_VPD_UNKNOWN = 0, /* vendor unknown */
119 BFA_MFG_VPD_IBM = 1, /* vendor IBM */
120 BFA_MFG_VPD_HP = 2, /* vendor HP */
121 BFA_MFG_VPD_DELL = 3, /* vendor DELL */
122 BFA_MFG_VPD_PCI_IBM = 0x08, /* PCI VPD IBM */
123 BFA_MFG_VPD_PCI_HP = 0x10, /* PCI VPD HP */
124 BFA_MFG_VPD_PCI_DELL = 0x20, /* PCI VPD DELL */
125 BFA_MFG_VPD_PCI_BRCD = 0xf8, /* PCI VPD Brocade */
126};
42 127
43/** 128/**
44 * All numerical fields are in big-endian format. 129 * All numerical fields are in big-endian format.
45 */ 130 */
46struct bfa_mfg_vpd_s { 131struct bfa_mfg_vpd_s {
47 u8 version; /* vpd data version */ 132 u8 version; /* vpd data version */
48 u8 vpd_sig[3]; /* characters 'V', 'P', 'D' */ 133 u8 vpd_sig[3]; /* characters 'V', 'P', 'D' */
49 u8 chksum; /* u8 checksum */ 134 u8 chksum; /* u8 checksum */
50 u8 vendor; /* vendor */ 135 u8 vendor; /* vendor */
51 u8 len; /* vpd data length excluding header */ 136 u8 len; /* vpd data length excluding header */
52 u8 rsv; 137 u8 rsv;
53 u8 data[BFA_MFG_VPD_LEN]; /* vpd data */ 138 u8 data[BFA_MFG_VPD_LEN]; /* vpd data */
54}; 139};
55 140
56#pragma pack(1) 141#pragma pack()
57 142
58#endif /* __BFA_DEFS_MFG_H__ */ 143#endif /* __BFA_DEFS_MFG_H__ */
diff --git a/drivers/scsi/bfa/include/defs/bfa_defs_port.h b/drivers/scsi/bfa/include/defs/bfa_defs_port.h
index de0696c81bc4..501bc9739d9d 100644
--- a/drivers/scsi/bfa/include/defs/bfa_defs_port.h
+++ b/drivers/scsi/bfa/include/defs/bfa_defs_port.h
@@ -185,6 +185,8 @@ struct bfa_port_attr_s {
185 wwn_t fabric_name; /* attached switch's nwwn */ 185 wwn_t fabric_name; /* attached switch's nwwn */
186 u8 fabric_ip_addr[BFA_FCS_FABRIC_IPADDR_SZ]; /* attached 186 u8 fabric_ip_addr[BFA_FCS_FABRIC_IPADDR_SZ]; /* attached
187 * fabric's ip addr */ 187 * fabric's ip addr */
188 struct mac_s fpma_mac; /* Lport's FPMA Mac address */
189 u16 authfail; /* auth failed state */
188}; 190};
189 191
190/** 192/**
@@ -232,14 +234,15 @@ enum bfa_port_aen_sfp_pom {
232}; 234};
233 235
234struct bfa_port_aen_data_s { 236struct bfa_port_aen_data_s {
235 enum bfa_ioc_type_e ioc_type; 237 wwn_t pwwn; /* WWN of the physical port */
236 wwn_t pwwn; /* WWN of the physical port */ 238 wwn_t fwwn; /* WWN of the fabric port */
237 wwn_t fwwn; /* WWN of the fabric port */ 239 s32 phy_port_num; /*! For SFP related events */
238 mac_t mac; /* MAC addres of the ethernet port, 240 s16 ioc_type;
239 * applicable to CNA port only */ 241 s16 level; /* Only transitions will
240 int phy_port_num; /*! For SFP related events */ 242 * be informed */
241 enum bfa_port_aen_sfp_pom level; /* Only transitions will 243 struct mac_s mac; /* MAC address of the ethernet port,
242 * be informed */ 244 * applicable to CNA port only */
245 s16 rsvd;
243}; 246};
244 247
245#endif /* __BFA_DEFS_PORT_H__ */ 248#endif /* __BFA_DEFS_PORT_H__ */
diff --git a/drivers/scsi/bfa/include/defs/bfa_defs_pport.h b/drivers/scsi/bfa/include/defs/bfa_defs_pport.h
index bf320412ee24..26e5cc78095d 100644
--- a/drivers/scsi/bfa/include/defs/bfa_defs_pport.h
+++ b/drivers/scsi/bfa/include/defs/bfa_defs_pport.h
@@ -232,7 +232,7 @@ struct bfa_pport_attr_s {
232 u32 pid; /* port ID */ 232 u32 pid; /* port ID */
233 enum bfa_pport_type port_type; /* current topology */ 233 enum bfa_pport_type port_type; /* current topology */
234 u32 loopback; /* external loopback */ 234 u32 loopback; /* external loopback */
235 u32 rsvd1; 235 u32 authfail; /* auth fail state */
236 u32 rsvd2; /* padding for 64 bit */ 236 u32 rsvd2; /* padding for 64 bit */
237}; 237};
238 238
@@ -240,73 +240,79 @@ struct bfa_pport_attr_s {
240 * FC Port statistics. 240 * FC Port statistics.
241 */ 241 */
242struct bfa_pport_fc_stats_s { 242struct bfa_pport_fc_stats_s {
243 u64 secs_reset; /* seconds since stats is reset */ 243 u64 secs_reset; /* Seconds since stats is reset */
244 u64 tx_frames; /* transmitted frames */ 244 u64 tx_frames; /* Tx frames */
245 u64 tx_words; /* transmitted words */ 245 u64 tx_words; /* Tx words */
246 u64 rx_frames; /* received frames */ 246 u64 tx_lip; /* TX LIP */
247 u64 rx_words; /* received words */ 247 u64 tx_nos; /* Tx NOS */
248 u64 lip_count; /* LIPs seen */ 248 u64 tx_ols; /* Tx OLS */
249 u64 nos_count; /* NOS count */ 249 u64 tx_lr; /* Tx LR */
250 u64 error_frames; /* errored frames (sent?) */ 250 u64 tx_lrr; /* Tx LRR */
251 u64 dropped_frames; /* dropped frames */ 251 u64 rx_frames; /* Rx frames */
252 u64 link_failures; /* link failure count */ 252 u64 rx_words; /* Rx words */
253 u64 loss_of_syncs; /* loss of sync count */ 253 u64 lip_count; /* Rx LIP */
254 u64 loss_of_signals;/* loss of signal count */ 254 u64 nos_count; /* Rx NOS */
255 u64 primseq_errs; /* primitive sequence protocol */ 255 u64 ols_count; /* Rx OLS */
256 u64 bad_os_count; /* invalid ordered set */ 256 u64 lr_count; /* Rx LR */
257 u64 err_enc_out; /* Encoding error outside frame */ 257 u64 lrr_count; /* Rx LRR */
258 u64 invalid_crcs; /* frames received with invalid CRC*/ 258 u64 invalid_crcs; /* Rx CRC err frames */
259 u64 undersized_frm; /* undersized frames */ 259 u64 invalid_crc_gd_eof; /* Rx CRC err good EOF frames */
260 u64 oversized_frm; /* oversized frames */ 260 u64 undersized_frm; /* Rx undersized frames */
261 u64 bad_eof_frm; /* frames with bad EOF */ 261 u64 oversized_frm; /* Rx oversized frames */
262 struct bfa_qos_stats_s qos_stats; /* QoS statistics */ 262 u64 bad_eof_frm; /* Rx frames with bad EOF */
263 u64 error_frames; /* Errored frames */
264 u64 dropped_frames; /* Dropped frames */
265 u64 link_failures; /* Link Failure (LF) count */
266 u64 loss_of_syncs; /* Loss of sync count */
267 u64 loss_of_signals;/* Loss of signal count */
268 u64 primseq_errs; /* Primitive sequence protocol err. */
269 u64 bad_os_count; /* Invalid ordered sets */
270 u64 err_enc_out; /* Encoding err nonframe_8b10b */
271 u64 err_enc; /* Encoding err frame_8b10b */
263}; 272};
264 273
265/** 274/**
266 * Eth Port statistics. 275 * Eth Port statistics.
267 */ 276 */
268struct bfa_pport_eth_stats_s { 277struct bfa_pport_eth_stats_s {
269 u64 secs_reset; /* seconds since stats is reset */ 278 u64 secs_reset; /* Seconds since stats is reset */
270 u64 frame_64; /* both rx and tx counter */ 279 u64 frame_64; /* Frames 64 bytes */
271 u64 frame_65_127; /* both rx and tx counter */ 280 u64 frame_65_127; /* Frames 65-127 bytes */
272 u64 frame_128_255; /* both rx and tx counter */ 281 u64 frame_128_255; /* Frames 128-255 bytes */
273 u64 frame_256_511; /* both rx and tx counter */ 282 u64 frame_256_511; /* Frames 256-511 bytes */
274 u64 frame_512_1023; /* both rx and tx counter */ 283 u64 frame_512_1023; /* Frames 512-1023 bytes */
275 u64 frame_1024_1518; /* both rx and tx counter */ 284 u64 frame_1024_1518; /* Frames 1024-1518 bytes */
276 u64 frame_1519_1522; /* both rx and tx counter */ 285 u64 frame_1519_1522; /* Frames 1519-1522 bytes */
277 286 u64 tx_bytes; /* Tx bytes */
278 u64 tx_bytes; 287 u64 tx_packets; /* Tx packets */
279 u64 tx_packets; 288 u64 tx_mcast_packets; /* Tx multicast packets */
280 u64 tx_mcast_packets; 289 u64 tx_bcast_packets; /* Tx broadcast packets */
281 u64 tx_bcast_packets; 290 u64 tx_control_frame; /* Tx control frame */
282 u64 tx_control_frame; 291 u64 tx_drop; /* Tx drops */
283 u64 tx_drop; 292 u64 tx_jabber; /* Tx jabber */
284 u64 tx_jabber; 293 u64 tx_fcs_error; /* Tx FCS error */
285 u64 tx_fcs_error; 294 u64 tx_fragments; /* Tx fragments */
286 u64 tx_fragments; 295 u64 rx_bytes; /* Rx bytes */
287 296 u64 rx_packets; /* Rx packets */
288 u64 rx_bytes; 297 u64 rx_mcast_packets; /* Rx multicast packets */
289 u64 rx_packets; 298 u64 rx_bcast_packets; /* Rx broadcast packets */
290 u64 rx_mcast_packets; 299 u64 rx_control_frames; /* Rx control frames */
291 u64 rx_bcast_packets; 300 u64 rx_unknown_opcode; /* Rx unknown opcode */
292 u64 rx_control_frames; 301 u64 rx_drop; /* Rx drops */
293 u64 rx_unknown_opcode; 302 u64 rx_jabber; /* Rx jabber */
294 u64 rx_drop; 303 u64 rx_fcs_error; /* Rx FCS errors */
295 u64 rx_jabber; 304 u64 rx_alignment_error; /* Rx alignment errors */
296 u64 rx_fcs_error; 305 u64 rx_frame_length_error; /* Rx frame len errors */
297 u64 rx_alignment_error; 306 u64 rx_code_error; /* Rx code errors */
298 u64 rx_frame_length_error; 307 u64 rx_fragments; /* Rx fragments */
299 u64 rx_code_error; 308 u64 rx_pause; /* Rx pause */
300 u64 rx_fragments; 309 u64 rx_zero_pause; /* Rx zero pause */
301 310 u64 tx_pause; /* Tx pause */
302 u64 rx_pause; /* BPC */ 311 u64 tx_zero_pause; /* Tx zero pause */
303 u64 rx_zero_pause; /* BPC Pause cancellation */ 312 u64 rx_fcoe_pause; /* Rx fcoe pause */
304 u64 tx_pause; /* BPC */ 313 u64 rx_fcoe_zero_pause; /* Rx FCoE zero pause */
305 u64 tx_zero_pause; /* BPC Pause cancellation */ 314 u64 tx_fcoe_pause; /* Tx FCoE pause */
306 u64 rx_fcoe_pause; /* BPC */ 315 u64 tx_fcoe_zero_pause; /* Tx FCoE zero pause */
307 u64 rx_fcoe_zero_pause; /* BPC Pause cancellation */
308 u64 tx_fcoe_pause; /* BPC */
309 u64 tx_fcoe_zero_pause; /* BPC Pause cancellation */
310}; 316};
311 317
312/** 318/**
@@ -333,8 +339,7 @@ struct bfa_pport_fcpmap_s {
333}; 339};
334 340
335/** 341/**
336 * Port RNID info. 342 * Port RNI */
337 */
338struct bfa_pport_rnid_s { 343struct bfa_pport_rnid_s {
339 wwn_t wwn; 344 wwn_t wwn;
340 u32 unittype; 345 u32 unittype;
@@ -347,6 +352,23 @@ struct bfa_pport_rnid_s {
347 u16 topologydiscoveryflags; 352 u16 topologydiscoveryflags;
348}; 353};
349 354
355struct bfa_fcport_fcf_s {
356 wwn_t name; /* FCF name */
357 wwn_t fabric_name; /* Fabric Name */
358 u8 fipenabled; /* FIP enabled or not */
359 u8 fipfailed; /* FIP failed or not */
360 u8 resv[2];
361 u8 pri; /* FCF priority */
362 u8 version; /* FIP version used */
363 u8 available; /* Available for login */
364 u8 fka_disabled; /* FKA is disabled */
365 u8 maxsz_verified; /* FCoE max size verified */
366 u8 fc_map[3]; /* FC map */
367 u16 vlan; /* FCoE vlan tag/priority */
368 u32 fka_adv_per; /* FIP ka advert. period */
369 struct mac_s mac; /* FCF mac */
370};
371
350/** 372/**
351 * Link state information 373 * Link state information
352 */ 374 */
@@ -378,6 +400,7 @@ struct bfa_pport_link_s {
378 struct fc_alpabm_s alpabm; /* alpa bitmap */ 400 struct fc_alpabm_s alpabm; /* alpa bitmap */
379 } loop_info; 401 } loop_info;
380 } tl; 402 } tl;
403 struct bfa_fcport_fcf_s fcf; /*!< FCF information (for FCoE) */
381}; 404};
382 405
383#endif /* __BFA_DEFS_PPORT_H__ */ 406#endif /* __BFA_DEFS_PPORT_H__ */
diff --git a/drivers/scsi/bfa/include/defs/bfa_defs_status.h b/drivers/scsi/bfa/include/defs/bfa_defs_status.h
index cdceaeb9f4b8..4374494bd566 100644
--- a/drivers/scsi/bfa/include/defs/bfa_defs_status.h
+++ b/drivers/scsi/bfa/include/defs/bfa_defs_status.h
@@ -180,8 +180,8 @@ enum bfa_status {
180 BFA_STATUS_IM_ADAPT_ALREADY_IN_TEAM = 114, /* Given adapter is part 180 BFA_STATUS_IM_ADAPT_ALREADY_IN_TEAM = 114, /* Given adapter is part
181 * of another team */ 181 * of another team */
182 BFA_STATUS_IM_ADAPT_HAS_VLANS = 115, /* Adapter has VLANs configured. 182 BFA_STATUS_IM_ADAPT_HAS_VLANS = 115, /* Adapter has VLANs configured.
183 * Delete all VLANs before 183 * Delete all VLANs to become
184 * creating team */ 184 * part of the team */
185 BFA_STATUS_IM_PVID_MISMATCH = 116, /* Mismatching PVIDs configured 185 BFA_STATUS_IM_PVID_MISMATCH = 116, /* Mismatching PVIDs configured
186 * for adapters */ 186 * for adapters */
187 BFA_STATUS_IM_LINK_SPEED_MISMATCH = 117, /* Mismatching link speeds 187 BFA_STATUS_IM_LINK_SPEED_MISMATCH = 117, /* Mismatching link speeds
@@ -213,7 +213,7 @@ enum bfa_status {
213 * loaded */ 213 * loaded */
214 BFA_STATUS_CARD_TYPE_MISMATCH = 131, /* Card type mismatch */ 214 BFA_STATUS_CARD_TYPE_MISMATCH = 131, /* Card type mismatch */
215 BFA_STATUS_BAD_ASICBLK = 132, /* Bad ASIC block */ 215 BFA_STATUS_BAD_ASICBLK = 132, /* Bad ASIC block */
216 BFA_STATUS_NO_DRIVER = 133, /* Storage/Ethernet driver not loaded */ 216 BFA_STATUS_NO_DRIVER = 133, /* Brocade adapter/driver not installed or loaded */
217 BFA_STATUS_INVALID_MAC = 134, /* Invalid mac address */ 217 BFA_STATUS_INVALID_MAC = 134, /* Invalid mac address */
218 BFA_STATUS_IM_NO_VLAN = 135, /* No VLANs configured on the adapter */ 218 BFA_STATUS_IM_NO_VLAN = 135, /* No VLANs configured on the adapter */
219 BFA_STATUS_IM_ETH_LB_FAILED = 136, /* Ethernet loopback test failed */ 219 BFA_STATUS_IM_ETH_LB_FAILED = 136, /* Ethernet loopback test failed */
@@ -228,8 +228,7 @@ enum bfa_status {
228 BFA_STATUS_IM_GET_INETCFG_FAILED = 142, /* Acquiring Network Subsytem 228 BFA_STATUS_IM_GET_INETCFG_FAILED = 142, /* Acquiring Network Subsytem
229 * handle Failed. Please try 229 * handle Failed. Please try
230 * after some time */ 230 * after some time */
231 BFA_STATUS_IM_NOT_BOUND = 143, /* Brocade 10G Ethernet Service is not 231 BFA_STATUS_IM_NOT_BOUND = 143, /* IM driver is not active */
232 * Enabled on this port */
233 BFA_STATUS_INSUFFICIENT_PERMS = 144, /* User doesn't have sufficient 232 BFA_STATUS_INSUFFICIENT_PERMS = 144, /* User doesn't have sufficient
234 * permissions to execute the BCU 233 * permissions to execute the BCU
235 * application */ 234 * application */
@@ -242,6 +241,14 @@ enum bfa_status {
242 * failed */ 241 * failed */
243 BFA_STATUS_IM_UNBIND_FAILED = 149, /* ! < IM Driver unbind operation 242 BFA_STATUS_IM_UNBIND_FAILED = 149, /* ! < IM Driver unbind operation
244 * failed */ 243 * failed */
244 BFA_STATUS_IM_PORT_IN_TEAM = 150, /* Port is already part of the
245 * team */
246 BFA_STATUS_IM_VLAN_NOT_FOUND = 151, /* VLAN ID doesn't exists */
247 BFA_STATUS_IM_TEAM_NOT_FOUND = 152, /* Teaming configuration doesn't
248 * exists */
249 BFA_STATUS_IM_TEAM_CFG_NOT_ALLOWED = 153, /* Given settings are not
250 * allowed for the current
251 * Teaming mode */
245 BFA_STATUS_MAX_VAL /* Unknown error code */ 252 BFA_STATUS_MAX_VAL /* Unknown error code */
246}; 253};
247#define bfa_status_t enum bfa_status 254#define bfa_status_t enum bfa_status
diff --git a/drivers/scsi/bfa/include/fcb/bfa_fcb_fcpim.h b/drivers/scsi/bfa/include/fcb/bfa_fcb_fcpim.h
index a6c70aee0aa3..52585d3dd891 100644
--- a/drivers/scsi/bfa/include/fcb/bfa_fcb_fcpim.h
+++ b/drivers/scsi/bfa/include/fcb/bfa_fcb_fcpim.h
@@ -70,7 +70,6 @@ void bfa_fcb_itnim_online(struct bfad_itnim_s *itnim_drv);
70 */ 70 */
71void bfa_fcb_itnim_offline(struct bfad_itnim_s *itnim_drv); 71void bfa_fcb_itnim_offline(struct bfad_itnim_s *itnim_drv);
72 72
73void bfa_fcb_itnim_tov_begin(struct bfad_itnim_s *itnim_drv);
74void bfa_fcb_itnim_tov(struct bfad_itnim_s *itnim_drv); 73void bfa_fcb_itnim_tov(struct bfad_itnim_s *itnim_drv);
75 74
76#endif /* __BFAD_FCB_FCPIM_H__ */ 75#endif /* __BFAD_FCB_FCPIM_H__ */
diff --git a/drivers/scsi/bfa/include/fcs/bfa_fcs.h b/drivers/scsi/bfa/include/fcs/bfa_fcs.h
index 627669c65546..f2fd35fdee28 100644
--- a/drivers/scsi/bfa/include/fcs/bfa_fcs.h
+++ b/drivers/scsi/bfa/include/fcs/bfa_fcs.h
@@ -49,6 +49,7 @@ struct bfa_fcs_s {
49 struct bfa_trc_mod_s *trcmod; /* tracing module */ 49 struct bfa_trc_mod_s *trcmod; /* tracing module */
50 struct bfa_aen_s *aen; /* aen component */ 50 struct bfa_aen_s *aen; /* aen component */
51 bfa_boolean_t vf_enabled; /* VF mode is enabled */ 51 bfa_boolean_t vf_enabled; /* VF mode is enabled */
52 bfa_boolean_t fdmi_enabled; /*!< FDMI is enabled */
52 bfa_boolean_t min_cfg; /* min cfg enabled/disabled */ 53 bfa_boolean_t min_cfg; /* min cfg enabled/disabled */
53 u16 port_vfid; /* port default VF ID */ 54 u16 port_vfid; /* port default VF ID */
54 struct bfa_fcs_driver_info_s driver_info; 55 struct bfa_fcs_driver_info_s driver_info;
@@ -60,10 +61,12 @@ struct bfa_fcs_s {
60/* 61/*
61 * bfa fcs API functions 62 * bfa fcs API functions
62 */ 63 */
63void bfa_fcs_init(struct bfa_fcs_s *fcs, struct bfa_s *bfa, struct bfad_s *bfad, 64void bfa_fcs_attach(struct bfa_fcs_s *fcs, struct bfa_s *bfa, struct bfad_s *bfad,
64 bfa_boolean_t min_cfg); 65 bfa_boolean_t min_cfg);
66void bfa_fcs_init(struct bfa_fcs_s *fcs);
65void bfa_fcs_driver_info_init(struct bfa_fcs_s *fcs, 67void bfa_fcs_driver_info_init(struct bfa_fcs_s *fcs,
66 struct bfa_fcs_driver_info_s *driver_info); 68 struct bfa_fcs_driver_info_s *driver_info);
69void bfa_fcs_set_fdmi_param(struct bfa_fcs_s *fcs, bfa_boolean_t fdmi_enable);
67void bfa_fcs_exit(struct bfa_fcs_s *fcs); 70void bfa_fcs_exit(struct bfa_fcs_s *fcs);
68void bfa_fcs_trc_init(struct bfa_fcs_s *fcs, struct bfa_trc_mod_s *trcmod); 71void bfa_fcs_trc_init(struct bfa_fcs_s *fcs, struct bfa_trc_mod_s *trcmod);
69void bfa_fcs_log_init(struct bfa_fcs_s *fcs, struct bfa_log_mod_s *logmod); 72void bfa_fcs_log_init(struct bfa_fcs_s *fcs, struct bfa_log_mod_s *logmod);
diff --git a/drivers/scsi/bfa/include/fcs/bfa_fcs_lport.h b/drivers/scsi/bfa/include/fcs/bfa_fcs_lport.h
index 967ceb0eb074..ceaefd3060f4 100644
--- a/drivers/scsi/bfa/include/fcs/bfa_fcs_lport.h
+++ b/drivers/scsi/bfa/include/fcs/bfa_fcs_lport.h
@@ -34,14 +34,6 @@ struct bfa_fcs_s;
34struct bfa_fcs_fabric_s; 34struct bfa_fcs_fabric_s;
35 35
36/* 36/*
37* @todo : need to move to a global config file.
38 * Maximum Vports supported per physical port or vf.
39 */
40#define BFA_FCS_MAX_VPORTS_SUPP_CB 255
41#define BFA_FCS_MAX_VPORTS_SUPP_CT 191
42
43/*
44* @todo : need to move to a global config file.
45 * Maximum Rports supported per port (physical/logical). 37 * Maximum Rports supported per port (physical/logical).
46 */ 38 */
47#define BFA_FCS_MAX_RPORTS_SUPP 256 /* @todo : tentative value */ 39#define BFA_FCS_MAX_RPORTS_SUPP 256 /* @todo : tentative value */
diff --git a/drivers/scsi/bfa/include/log/bfa_log_hal.h b/drivers/scsi/bfa/include/log/bfa_log_hal.h
index 0412aea2ec30..5f8f5e30b9e8 100644
--- a/drivers/scsi/bfa/include/log/bfa_log_hal.h
+++ b/drivers/scsi/bfa/include/log/bfa_log_hal.h
@@ -27,4 +27,10 @@
27 (((u32) BFA_LOG_HAL_ID << BFA_LOG_MODID_OFFSET) | 3) 27 (((u32) BFA_LOG_HAL_ID << BFA_LOG_MODID_OFFSET) | 3)
28#define BFA_LOG_HAL_SM_ASSERT \ 28#define BFA_LOG_HAL_SM_ASSERT \
29 (((u32) BFA_LOG_HAL_ID << BFA_LOG_MODID_OFFSET) | 4) 29 (((u32) BFA_LOG_HAL_ID << BFA_LOG_MODID_OFFSET) | 4)
30#define BFA_LOG_HAL_DRIVER_ERROR \
31 (((u32) BFA_LOG_HAL_ID << BFA_LOG_MODID_OFFSET) | 5)
32#define BFA_LOG_HAL_DRIVER_CONFIG_ERROR \
33 (((u32) BFA_LOG_HAL_ID << BFA_LOG_MODID_OFFSET) | 6)
34#define BFA_LOG_HAL_MBOX_ERROR \
35 (((u32) BFA_LOG_HAL_ID << BFA_LOG_MODID_OFFSET) | 7)
30#endif 36#endif
diff --git a/drivers/scsi/bfa/include/log/bfa_log_linux.h b/drivers/scsi/bfa/include/log/bfa_log_linux.h
index 317c0547ee16..bd451db4c30a 100644
--- a/drivers/scsi/bfa/include/log/bfa_log_linux.h
+++ b/drivers/scsi/bfa/include/log/bfa_log_linux.h
@@ -41,4 +41,20 @@
41 (((u32) BFA_LOG_LINUX_ID << BFA_LOG_MODID_OFFSET) | 10) 41 (((u32) BFA_LOG_LINUX_ID << BFA_LOG_MODID_OFFSET) | 10)
42#define BFA_LOG_LINUX_SCSI_ABORT_COMP \ 42#define BFA_LOG_LINUX_SCSI_ABORT_COMP \
43 (((u32) BFA_LOG_LINUX_ID << BFA_LOG_MODID_OFFSET) | 11) 43 (((u32) BFA_LOG_LINUX_ID << BFA_LOG_MODID_OFFSET) | 11)
44#define BFA_LOG_LINUX_DRIVER_CONFIG_ERROR \
45 (((u32) BFA_LOG_LINUX_ID << BFA_LOG_MODID_OFFSET) | 12)
46#define BFA_LOG_LINUX_BNA_STATE_MACHINE \
47 (((u32) BFA_LOG_LINUX_ID << BFA_LOG_MODID_OFFSET) | 13)
48#define BFA_LOG_LINUX_IOC_ERROR \
49 (((u32) BFA_LOG_LINUX_ID << BFA_LOG_MODID_OFFSET) | 14)
50#define BFA_LOG_LINUX_RESOURCE_ALLOC_ERROR \
51 (((u32) BFA_LOG_LINUX_ID << BFA_LOG_MODID_OFFSET) | 15)
52#define BFA_LOG_LINUX_RING_BUFFER_ERROR \
53 (((u32) BFA_LOG_LINUX_ID << BFA_LOG_MODID_OFFSET) | 16)
54#define BFA_LOG_LINUX_DRIVER_ERROR \
55 (((u32) BFA_LOG_LINUX_ID << BFA_LOG_MODID_OFFSET) | 17)
56#define BFA_LOG_LINUX_DRIVER_DIAG \
57 (((u32) BFA_LOG_LINUX_ID << BFA_LOG_MODID_OFFSET) | 18)
58#define BFA_LOG_LINUX_DRIVER_AEN \
59 (((u32) BFA_LOG_LINUX_ID << BFA_LOG_MODID_OFFSET) | 19)
44#endif 60#endif
diff --git a/drivers/scsi/bfa/include/protocol/fc.h b/drivers/scsi/bfa/include/protocol/fc.h
index 14969eecf6a9..8d1038035a76 100644
--- a/drivers/scsi/bfa/include/protocol/fc.h
+++ b/drivers/scsi/bfa/include/protocol/fc.h
@@ -50,6 +50,11 @@ struct fchs_s {
50 50
51 u32 ro; /* relative offset */ 51 u32 ro; /* relative offset */
52}; 52};
53
54#define FC_SOF_LEN 4
55#define FC_EOF_LEN 4
56#define FC_CRC_LEN 4
57
53/* 58/*
54 * Fibre Channel BB_E Header Structure 59 * Fibre Channel BB_E Header Structure
55 */ 60 */
diff --git a/drivers/scsi/bfa/include/protocol/pcifw.h b/drivers/scsi/bfa/include/protocol/pcifw.h
deleted file mode 100644
index 6830dc3ee58a..000000000000
--- a/drivers/scsi/bfa/include/protocol/pcifw.h
+++ /dev/null
@@ -1,75 +0,0 @@
1/*
2 * Copyright (c) 2005-2009 Brocade Communications Systems, Inc.
3 * All rights reserved
4 * www.brocade.com
5 *
6 * Linux driver for Brocade Fibre Channel Host Bus Adapter.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License (GPL) Version 2 as
10 * published by the Free Software Foundation
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 */
17
18/**
19 * pcifw.h PCI FW related headers
20 */
21
22#ifndef __PCIFW_H__
23#define __PCIFW_H__
24
25#pragma pack(1)
26
27struct pnp_hdr_s{
28 u32 signature; /* "$PnP" */
29 u8 rev; /* Struct revision */
30 u8 len; /* Header structure len in multiples
31 * of 16 bytes */
32 u16 off; /* Offset to next header 00 if none */
33 u8 rsvd; /* Reserved byte */
34 u8 cksum; /* 8-bit checksum for this header */
35 u32 pnp_dev_id; /* PnP Device Id */
36 u16 mfstr; /* Pointer to manufacturer string */
37 u16 prstr; /* Pointer to product string */
38 u8 devtype[3]; /* Device Type Code */
39 u8 devind; /* Device Indicator */
40 u16 bcventr; /* Bootstrap entry vector */
41 u16 rsvd2; /* Reserved */
42 u16 sriv; /* Static resource information vector */
43};
44
45struct pci_3_0_ds_s{
46 u32 sig; /* Signature "PCIR" */
47 u16 vendid; /* Vendor ID */
48 u16 devid; /* Device ID */
49 u16 devlistoff; /* Device List Offset */
50 u16 len; /* PCI Data Structure Length */
51 u8 rev; /* PCI Data Structure Revision */
52 u8 clcode[3]; /* Class Code */
53 u16 imglen; /* Code image length in multiples of
54 * 512 bytes */
55 u16 coderev; /* Revision level of code/data */
56 u8 codetype; /* Code type 0x00 - BIOS */
57 u8 indr; /* Last image indicator */
58 u16 mrtimglen; /* Max Run Time Image Length */
59 u16 cuoff; /* Config Utility Code Header Offset */
60 u16 dmtfclp; /* DMTF CLP entry point offset */
61};
62
63struct pci_optrom_hdr_s{
64 u16 sig; /* Signature 0x55AA */
65 u8 len; /* Option ROM length in units of 512 bytes */
66 u8 inivec[3]; /* Initialization vector */
67 u8 rsvd[16]; /* Reserved field */
68 u16 verptr; /* Pointer to version string - private */
69 u16 pcids; /* Pointer to PCI data structure */
70 u16 pnphdr; /* Pointer to PnP expansion header */
71};
72
73#pragma pack()
74
75#endif
diff --git a/drivers/scsi/bfa/loop.c b/drivers/scsi/bfa/loop.c
index f7c7f4f3c640..f6342efb6a90 100644
--- a/drivers/scsi/bfa/loop.c
+++ b/drivers/scsi/bfa/loop.c
@@ -162,7 +162,7 @@ bfa_fcs_port_loop_send_plogi(struct bfa_fcs_port_s *port, u8 alpa)
162 len = fc_plogi_build(&fchs, bfa_fcxp_get_reqbuf(fcxp), alpa, 162 len = fc_plogi_build(&fchs, bfa_fcxp_get_reqbuf(fcxp), alpa,
163 bfa_fcs_port_get_fcid(port), 0, 163 bfa_fcs_port_get_fcid(port), 0,
164 port->port_cfg.pwwn, port->port_cfg.nwwn, 164 port->port_cfg.pwwn, port->port_cfg.nwwn,
165 bfa_pport_get_maxfrsize(port->fcs->bfa)); 165 bfa_fcport_get_maxfrsize(port->fcs->bfa));
166 166
167 bfa_fcxp_send(fcxp, NULL, port->fabric->vf_id, port->lp_tag, BFA_FALSE, 167 bfa_fcxp_send(fcxp, NULL, port->fabric->vf_id, port->lp_tag, BFA_FALSE,
168 FC_CLASS_3, len, &fchs, 168 FC_CLASS_3, len, &fchs,
diff --git a/drivers/scsi/bfa/lport_api.c b/drivers/scsi/bfa/lport_api.c
index 1e06792cd4c2..d3907d184e2b 100644
--- a/drivers/scsi/bfa/lport_api.c
+++ b/drivers/scsi/bfa/lport_api.c
@@ -156,7 +156,7 @@ bfa_fcs_port_get_rport_max_speed(struct bfa_fcs_port_s *port)
156 /* 156 /*
157 * Get Physical port's current speed 157 * Get Physical port's current speed
158 */ 158 */
159 bfa_pport_get_attr(port->fcs->bfa, &pport_attr); 159 bfa_fcport_get_attr(port->fcs->bfa, &pport_attr);
160 pport_speed = pport_attr.speed; 160 pport_speed = pport_attr.speed;
161 bfa_trc(fcs, pport_speed); 161 bfa_trc(fcs, pport_speed);
162 162
@@ -235,7 +235,8 @@ bfa_fcs_port_get_info(struct bfa_fcs_port_s *port,
235 port_info->port_wwn = bfa_fcs_port_get_pwwn(port); 235 port_info->port_wwn = bfa_fcs_port_get_pwwn(port);
236 port_info->node_wwn = bfa_fcs_port_get_nwwn(port); 236 port_info->node_wwn = bfa_fcs_port_get_nwwn(port);
237 237
238 port_info->max_vports_supp = bfa_fcs_vport_get_max(port->fcs); 238 port_info->max_vports_supp =
239 bfa_lps_get_max_vport(port->fcs->bfa);
239 port_info->num_vports_inuse = 240 port_info->num_vports_inuse =
240 bfa_fcs_fabric_vport_count(port->fabric); 241 bfa_fcs_fabric_vport_count(port->fabric);
241 port_info->max_rports_supp = BFA_FCS_MAX_RPORTS_SUPP; 242 port_info->max_rports_supp = BFA_FCS_MAX_RPORTS_SUPP;
diff --git a/drivers/scsi/bfa/ms.c b/drivers/scsi/bfa/ms.c
index c96b3ca007ae..5e8c8dee6c97 100644
--- a/drivers/scsi/bfa/ms.c
+++ b/drivers/scsi/bfa/ms.c
@@ -118,7 +118,7 @@ bfa_fcs_port_ms_sm_offline(struct bfa_fcs_port_ms_s *ms,
118 break; 118 break;
119 119
120 default: 120 default:
121 bfa_assert(0); 121 bfa_sm_fault(ms->port->fcs, event);
122 } 122 }
123} 123}
124 124
@@ -141,7 +141,7 @@ bfa_fcs_port_ms_sm_plogi_sending(struct bfa_fcs_port_ms_s *ms,
141 break; 141 break;
142 142
143 default: 143 default:
144 bfa_assert(0); 144 bfa_sm_fault(ms->port->fcs, event);
145 } 145 }
146} 146}
147 147
@@ -190,7 +190,7 @@ bfa_fcs_port_ms_sm_plogi(struct bfa_fcs_port_ms_s *ms, enum port_ms_event event)
190 break; 190 break;
191 191
192 default: 192 default:
193 bfa_assert(0); 193 bfa_sm_fault(ms->port->fcs, event);
194 } 194 }
195} 195}
196 196
@@ -216,7 +216,7 @@ bfa_fcs_port_ms_sm_plogi_retry(struct bfa_fcs_port_ms_s *ms,
216 break; 216 break;
217 217
218 default: 218 default:
219 bfa_assert(0); 219 bfa_sm_fault(ms->port->fcs, event);
220 } 220 }
221} 221}
222 222
@@ -230,10 +230,6 @@ bfa_fcs_port_ms_sm_online(struct bfa_fcs_port_ms_s *ms,
230 switch (event) { 230 switch (event) {
231 case MSSM_EVENT_PORT_OFFLINE: 231 case MSSM_EVENT_PORT_OFFLINE:
232 bfa_sm_set_state(ms, bfa_fcs_port_ms_sm_offline); 232 bfa_sm_set_state(ms, bfa_fcs_port_ms_sm_offline);
233 /*
234 * now invoke MS related sub-modules
235 */
236 bfa_fcs_port_fdmi_offline(ms);
237 break; 233 break;
238 234
239 case MSSM_EVENT_PORT_FABRIC_RSCN: 235 case MSSM_EVENT_PORT_FABRIC_RSCN:
@@ -243,7 +239,7 @@ bfa_fcs_port_ms_sm_online(struct bfa_fcs_port_ms_s *ms,
243 break; 239 break;
244 240
245 default: 241 default:
246 bfa_assert(0); 242 bfa_sm_fault(ms->port->fcs, event);
247 } 243 }
248} 244}
249 245
@@ -266,7 +262,7 @@ bfa_fcs_port_ms_sm_gmal_sending(struct bfa_fcs_port_ms_s *ms,
266 break; 262 break;
267 263
268 default: 264 default:
269 bfa_assert(0); 265 bfa_sm_fault(ms->port->fcs, event);
270 } 266 }
271} 267}
272 268
@@ -304,7 +300,7 @@ bfa_fcs_port_ms_sm_gmal(struct bfa_fcs_port_ms_s *ms, enum port_ms_event event)
304 break; 300 break;
305 301
306 default: 302 default:
307 bfa_assert(0); 303 bfa_sm_fault(ms->port->fcs, event);
308 } 304 }
309} 305}
310 306
@@ -330,7 +326,7 @@ bfa_fcs_port_ms_sm_gmal_retry(struct bfa_fcs_port_ms_s *ms,
330 break; 326 break;
331 327
332 default: 328 default:
333 bfa_assert(0); 329 bfa_sm_fault(ms->port->fcs, event);
334 } 330 }
335} 331}
336 332
@@ -466,7 +462,7 @@ bfa_fcs_port_ms_sm_gfn_sending(struct bfa_fcs_port_ms_s *ms,
466 break; 462 break;
467 463
468 default: 464 default:
469 bfa_assert(0); 465 bfa_sm_fault(ms->port->fcs, event);
470 } 466 }
471} 467}
472 468
@@ -502,7 +498,7 @@ bfa_fcs_port_ms_sm_gfn(struct bfa_fcs_port_ms_s *ms, enum port_ms_event event)
502 break; 498 break;
503 499
504 default: 500 default:
505 bfa_assert(0); 501 bfa_sm_fault(ms->port->fcs, event);
506 } 502 }
507} 503}
508 504
@@ -528,7 +524,7 @@ bfa_fcs_port_ms_sm_gfn_retry(struct bfa_fcs_port_ms_s *ms,
528 break; 524 break;
529 525
530 default: 526 default:
531 bfa_assert(0); 527 bfa_sm_fault(ms->port->fcs, event);
532 } 528 }
533} 529}
534 530
@@ -637,7 +633,7 @@ bfa_fcs_port_ms_send_plogi(void *ms_cbarg, struct bfa_fcxp_s *fcxp_alloced)
637 bfa_os_hton3b(FC_MGMT_SERVER), 633 bfa_os_hton3b(FC_MGMT_SERVER),
638 bfa_fcs_port_get_fcid(port), 0, 634 bfa_fcs_port_get_fcid(port), 0,
639 port->port_cfg.pwwn, port->port_cfg.nwwn, 635 port->port_cfg.pwwn, port->port_cfg.nwwn,
640 bfa_pport_get_maxfrsize(port->fcs->bfa)); 636 bfa_fcport_get_maxfrsize(port->fcs->bfa));
641 637
642 bfa_fcxp_send(fcxp, NULL, port->fabric->vf_id, port->lp_tag, BFA_FALSE, 638 bfa_fcxp_send(fcxp, NULL, port->fabric->vf_id, port->lp_tag, BFA_FALSE,
643 FC_CLASS_3, len, &fchs, bfa_fcs_port_ms_plogi_response, 639 FC_CLASS_3, len, &fchs, bfa_fcs_port_ms_plogi_response,
@@ -735,6 +731,7 @@ bfa_fcs_port_ms_offline(struct bfa_fcs_port_s *port)
735 731
736 ms->port = port; 732 ms->port = port;
737 bfa_sm_send_event(ms, MSSM_EVENT_PORT_OFFLINE); 733 bfa_sm_send_event(ms, MSSM_EVENT_PORT_OFFLINE);
734 bfa_fcs_port_fdmi_offline(ms);
738} 735}
739 736
740void 737void
diff --git a/drivers/scsi/bfa/ns.c b/drivers/scsi/bfa/ns.c
index 2f8b880060bb..d20dd7e15742 100644
--- a/drivers/scsi/bfa/ns.c
+++ b/drivers/scsi/bfa/ns.c
@@ -164,7 +164,7 @@ bfa_fcs_port_ns_sm_offline(struct bfa_fcs_port_ns_s *ns,
164 break; 164 break;
165 165
166 default: 166 default:
167 bfa_assert(0); 167 bfa_sm_fault(ns->port->fcs, event);
168 } 168 }
169} 169}
170 170
@@ -187,7 +187,7 @@ bfa_fcs_port_ns_sm_plogi_sending(struct bfa_fcs_port_ns_s *ns,
187 break; 187 break;
188 188
189 default: 189 default:
190 bfa_assert(0); 190 bfa_sm_fault(ns->port->fcs, event);
191 } 191 }
192} 192}
193 193
@@ -221,7 +221,7 @@ bfa_fcs_port_ns_sm_plogi(struct bfa_fcs_port_ns_s *ns,
221 break; 221 break;
222 222
223 default: 223 default:
224 bfa_assert(0); 224 bfa_sm_fault(ns->port->fcs, event);
225 } 225 }
226} 226}
227 227
@@ -247,7 +247,7 @@ bfa_fcs_port_ns_sm_plogi_retry(struct bfa_fcs_port_ns_s *ns,
247 break; 247 break;
248 248
249 default: 249 default:
250 bfa_assert(0); 250 bfa_sm_fault(ns->port->fcs, event);
251 } 251 }
252} 252}
253 253
@@ -270,7 +270,7 @@ bfa_fcs_port_ns_sm_sending_rspn_id(struct bfa_fcs_port_ns_s *ns,
270 break; 270 break;
271 271
272 default: 272 default:
273 bfa_assert(0); 273 bfa_sm_fault(ns->port->fcs, event);
274 } 274 }
275} 275}
276 276
@@ -304,7 +304,7 @@ bfa_fcs_port_ns_sm_rspn_id(struct bfa_fcs_port_ns_s *ns,
304 break; 304 break;
305 305
306 default: 306 default:
307 bfa_assert(0); 307 bfa_sm_fault(ns->port->fcs, event);
308 } 308 }
309} 309}
310 310
@@ -330,7 +330,7 @@ bfa_fcs_port_ns_sm_rspn_id_retry(struct bfa_fcs_port_ns_s *ns,
330 break; 330 break;
331 331
332 default: 332 default:
333 bfa_assert(0); 333 bfa_sm_fault(ns->port->fcs, event);
334 } 334 }
335} 335}
336 336
@@ -353,7 +353,7 @@ bfa_fcs_port_ns_sm_sending_rft_id(struct bfa_fcs_port_ns_s *ns,
353 break; 353 break;
354 354
355 default: 355 default:
356 bfa_assert(0); 356 bfa_sm_fault(ns->port->fcs, event);
357 } 357 }
358} 358}
359 359
@@ -390,7 +390,7 @@ bfa_fcs_port_ns_sm_rft_id(struct bfa_fcs_port_ns_s *ns,
390 break; 390 break;
391 391
392 default: 392 default:
393 bfa_assert(0); 393 bfa_sm_fault(ns->port->fcs, event);
394 } 394 }
395} 395}
396 396
@@ -413,7 +413,7 @@ bfa_fcs_port_ns_sm_rft_id_retry(struct bfa_fcs_port_ns_s *ns,
413 break; 413 break;
414 414
415 default: 415 default:
416 bfa_assert(0); 416 bfa_sm_fault(ns->port->fcs, event);
417 } 417 }
418} 418}
419 419
@@ -436,7 +436,7 @@ bfa_fcs_port_ns_sm_sending_rff_id(struct bfa_fcs_port_ns_s *ns,
436 break; 436 break;
437 437
438 default: 438 default:
439 bfa_assert(0); 439 bfa_sm_fault(ns->port->fcs, event);
440 } 440 }
441} 441}
442 442
@@ -494,7 +494,7 @@ bfa_fcs_port_ns_sm_rff_id(struct bfa_fcs_port_ns_s *ns,
494 break; 494 break;
495 495
496 default: 496 default:
497 bfa_assert(0); 497 bfa_sm_fault(ns->port->fcs, event);
498 } 498 }
499} 499}
500 500
@@ -517,7 +517,7 @@ bfa_fcs_port_ns_sm_rff_id_retry(struct bfa_fcs_port_ns_s *ns,
517 break; 517 break;
518 518
519 default: 519 default:
520 bfa_assert(0); 520 bfa_sm_fault(ns->port->fcs, event);
521 } 521 }
522} 522}
523static void 523static void
@@ -539,7 +539,7 @@ bfa_fcs_port_ns_sm_sending_gid_ft(struct bfa_fcs_port_ns_s *ns,
539 break; 539 break;
540 540
541 default: 541 default:
542 bfa_assert(0); 542 bfa_sm_fault(ns->port->fcs, event);
543 } 543 }
544} 544}
545 545
@@ -575,7 +575,7 @@ bfa_fcs_port_ns_sm_gid_ft(struct bfa_fcs_port_ns_s *ns,
575 break; 575 break;
576 576
577 default: 577 default:
578 bfa_assert(0); 578 bfa_sm_fault(ns->port->fcs, event);
579 } 579 }
580} 580}
581 581
@@ -598,7 +598,7 @@ bfa_fcs_port_ns_sm_gid_ft_retry(struct bfa_fcs_port_ns_s *ns,
598 break; 598 break;
599 599
600 default: 600 default:
601 bfa_assert(0); 601 bfa_sm_fault(ns->port->fcs, event);
602 } 602 }
603} 603}
604 604
@@ -626,7 +626,7 @@ bfa_fcs_port_ns_sm_online(struct bfa_fcs_port_ns_s *ns,
626 break; 626 break;
627 627
628 default: 628 default:
629 bfa_assert(0); 629 bfa_sm_fault(ns->port->fcs, event);
630 } 630 }
631} 631}
632 632
@@ -660,7 +660,7 @@ bfa_fcs_port_ns_send_plogi(void *ns_cbarg, struct bfa_fcxp_s *fcxp_alloced)
660 bfa_os_hton3b(FC_NAME_SERVER), 660 bfa_os_hton3b(FC_NAME_SERVER),
661 bfa_fcs_port_get_fcid(port), 0, 661 bfa_fcs_port_get_fcid(port), 0,
662 port->port_cfg.pwwn, port->port_cfg.nwwn, 662 port->port_cfg.pwwn, port->port_cfg.nwwn,
663 bfa_pport_get_maxfrsize(port->fcs->bfa)); 663 bfa_fcport_get_maxfrsize(port->fcs->bfa));
664 664
665 bfa_fcxp_send(fcxp, NULL, port->fabric->vf_id, port->lp_tag, BFA_FALSE, 665 bfa_fcxp_send(fcxp, NULL, port->fabric->vf_id, port->lp_tag, BFA_FALSE,
666 FC_CLASS_3, len, &fchs, bfa_fcs_port_ns_plogi_response, 666 FC_CLASS_3, len, &fchs, bfa_fcs_port_ns_plogi_response,
diff --git a/drivers/scsi/bfa/rport.c b/drivers/scsi/bfa/rport.c
index 9cf58bb138dc..8e73dd9a625a 100644
--- a/drivers/scsi/bfa/rport.c
+++ b/drivers/scsi/bfa/rport.c
@@ -224,7 +224,7 @@ bfa_fcs_rport_sm_uninit(struct bfa_fcs_rport_s *rport, enum rport_event event)
224 break; 224 break;
225 225
226 default: 226 default:
227 bfa_assert(0); 227 bfa_sm_fault(rport->fcs, event);
228 } 228 }
229} 229}
230 230
@@ -276,7 +276,7 @@ bfa_fcs_rport_sm_plogi_sending(struct bfa_fcs_rport_s *rport,
276 break; 276 break;
277 277
278 default: 278 default:
279 bfa_assert(0); 279 bfa_sm_fault(rport->fcs, event);
280 } 280 }
281} 281}
282 282
@@ -332,7 +332,7 @@ bfa_fcs_rport_sm_plogiacc_sending(struct bfa_fcs_rport_s *rport,
332 break; 332 break;
333 333
334 default: 334 default:
335 bfa_assert(0); 335 bfa_sm_fault(rport->fcs, event);
336 } 336 }
337} 337}
338 338
@@ -406,7 +406,7 @@ bfa_fcs_rport_sm_plogi_retry(struct bfa_fcs_rport_s *rport,
406 break; 406 break;
407 407
408 default: 408 default:
409 bfa_assert(0); 409 bfa_sm_fault(rport->fcs, event);
410 } 410 }
411} 411}
412 412
@@ -481,7 +481,7 @@ bfa_fcs_rport_sm_plogi(struct bfa_fcs_rport_s *rport, enum rport_event event)
481 break; 481 break;
482 482
483 default: 483 default:
484 bfa_assert(0); 484 bfa_sm_fault(rport->fcs, event);
485 } 485 }
486} 486}
487 487
@@ -534,7 +534,7 @@ bfa_fcs_rport_sm_hal_online(struct bfa_fcs_rport_s *rport,
534 break; 534 break;
535 535
536 default: 536 default:
537 bfa_assert(0); 537 bfa_sm_fault(rport->fcs, event);
538 } 538 }
539} 539}
540 540
@@ -589,7 +589,7 @@ bfa_fcs_rport_sm_online(struct bfa_fcs_rport_s *rport, enum rport_event event)
589 break; 589 break;
590 590
591 default: 591 default:
592 bfa_assert(0); 592 bfa_sm_fault(rport->fcs, event);
593 } 593 }
594} 594}
595 595
@@ -646,7 +646,7 @@ bfa_fcs_rport_sm_nsquery_sending(struct bfa_fcs_rport_s *rport,
646 break; 646 break;
647 647
648 default: 648 default:
649 bfa_assert(0); 649 bfa_sm_fault(rport->fcs, event);
650 } 650 }
651} 651}
652 652
@@ -704,7 +704,7 @@ bfa_fcs_rport_sm_nsquery(struct bfa_fcs_rport_s *rport, enum rport_event event)
704 break; 704 break;
705 705
706 default: 706 default:
707 bfa_assert(0); 707 bfa_sm_fault(rport->fcs, event);
708 } 708 }
709} 709}
710 710
@@ -754,7 +754,7 @@ bfa_fcs_rport_sm_adisc_sending(struct bfa_fcs_rport_s *rport,
754 break; 754 break;
755 755
756 default: 756 default:
757 bfa_assert(0); 757 bfa_sm_fault(rport->fcs, event);
758 } 758 }
759} 759}
760 760
@@ -816,7 +816,7 @@ bfa_fcs_rport_sm_adisc(struct bfa_fcs_rport_s *rport, enum rport_event event)
816 break; 816 break;
817 817
818 default: 818 default:
819 bfa_assert(0); 819 bfa_sm_fault(rport->fcs, event);
820 } 820 }
821} 821}
822 822
@@ -846,7 +846,7 @@ bfa_fcs_rport_sm_fc4_logorcv(struct bfa_fcs_rport_s *rport,
846 break; 846 break;
847 847
848 default: 848 default:
849 bfa_assert(0); 849 bfa_sm_fault(rport->fcs, event);
850 } 850 }
851} 851}
852 852
@@ -869,7 +869,7 @@ bfa_fcs_rport_sm_fc4_logosend(struct bfa_fcs_rport_s *rport,
869 break; 869 break;
870 870
871 default: 871 default:
872 bfa_assert(0); 872 bfa_sm_fault(rport->fcs, event);
873 } 873 }
874} 874}
875 875
@@ -905,7 +905,7 @@ bfa_fcs_rport_sm_fc4_offline(struct bfa_fcs_rport_s *rport,
905 break; 905 break;
906 906
907 default: 907 default:
908 bfa_assert(0); 908 bfa_sm_fault(rport->fcs, event);
909 } 909 }
910} 910}
911 911
@@ -925,10 +925,17 @@ bfa_fcs_rport_sm_hcb_offline(struct bfa_fcs_rport_s *rport,
925 case RPSM_EVENT_HCB_OFFLINE: 925 case RPSM_EVENT_HCB_OFFLINE:
926 case RPSM_EVENT_ADDRESS_CHANGE: 926 case RPSM_EVENT_ADDRESS_CHANGE:
927 if (bfa_fcs_port_is_online(rport->port)) { 927 if (bfa_fcs_port_is_online(rport->port)) {
928 bfa_sm_set_state(rport, 928 if (bfa_fcs_fabric_is_switched(rport->port->fabric)) {
929 bfa_fcs_rport_sm_nsdisc_sending); 929 bfa_sm_set_state(rport,
930 rport->ns_retries = 0; 930 bfa_fcs_rport_sm_nsdisc_sending);
931 bfa_fcs_rport_send_gidpn(rport, NULL); 931 rport->ns_retries = 0;
932 bfa_fcs_rport_send_gidpn(rport, NULL);
933 } else {
934 bfa_sm_set_state(rport,
935 bfa_fcs_rport_sm_plogi_sending);
936 rport->plogi_retries = 0;
937 bfa_fcs_rport_send_plogi(rport, NULL);
938 }
932 } else { 939 } else {
933 rport->pid = 0; 940 rport->pid = 0;
934 bfa_sm_set_state(rport, bfa_fcs_rport_sm_offline); 941 bfa_sm_set_state(rport, bfa_fcs_rport_sm_offline);
@@ -951,7 +958,7 @@ bfa_fcs_rport_sm_hcb_offline(struct bfa_fcs_rport_s *rport,
951 break; 958 break;
952 959
953 default: 960 default:
954 bfa_assert(0); 961 bfa_sm_fault(rport->fcs, event);
955 } 962 }
956} 963}
957 964
@@ -1011,7 +1018,7 @@ bfa_fcs_rport_sm_hcb_logorcv(struct bfa_fcs_rport_s *rport,
1011 break; 1018 break;
1012 1019
1013 default: 1020 default:
1014 bfa_assert(0); 1021 bfa_sm_fault(rport->fcs, event);
1015 } 1022 }
1016} 1023}
1017 1024
@@ -1038,7 +1045,7 @@ bfa_fcs_rport_sm_hcb_logosend(struct bfa_fcs_rport_s *rport,
1038 break; 1045 break;
1039 1046
1040 default: 1047 default:
1041 bfa_assert(0); 1048 bfa_sm_fault(rport->fcs, event);
1042 } 1049 }
1043} 1050}
1044 1051
@@ -1073,7 +1080,7 @@ bfa_fcs_rport_sm_logo_sending(struct bfa_fcs_rport_s *rport,
1073 break; 1080 break;
1074 1081
1075 default: 1082 default:
1076 bfa_assert(0); 1083 bfa_sm_fault(rport->fcs, event);
1077 } 1084 }
1078} 1085}
1079 1086
@@ -1132,7 +1139,7 @@ bfa_fcs_rport_sm_offline(struct bfa_fcs_rport_s *rport, enum rport_event event)
1132 break; 1139 break;
1133 1140
1134 default: 1141 default:
1135 bfa_assert(0); 1142 bfa_sm_fault(rport->fcs, event);
1136 } 1143 }
1137} 1144}
1138 1145
@@ -1188,7 +1195,7 @@ bfa_fcs_rport_sm_nsdisc_sending(struct bfa_fcs_rport_s *rport,
1188 break; 1195 break;
1189 1196
1190 default: 1197 default:
1191 bfa_assert(0); 1198 bfa_sm_fault(rport->fcs, event);
1192 } 1199 }
1193} 1200}
1194 1201
@@ -1249,7 +1256,7 @@ bfa_fcs_rport_sm_nsdisc_retry(struct bfa_fcs_rport_s *rport,
1249 break; 1256 break;
1250 1257
1251 default: 1258 default:
1252 bfa_assert(0); 1259 bfa_sm_fault(rport->fcs, event);
1253 } 1260 }
1254} 1261}
1255 1262
@@ -1334,7 +1341,7 @@ bfa_fcs_rport_sm_nsdisc_sent(struct bfa_fcs_rport_s *rport,
1334 break; 1341 break;
1335 1342
1336 default: 1343 default:
1337 bfa_assert(0); 1344 bfa_sm_fault(rport->fcs, event);
1338 } 1345 }
1339} 1346}
1340 1347
@@ -1366,7 +1373,7 @@ bfa_fcs_rport_send_plogi(void *rport_cbarg, struct bfa_fcxp_s *fcxp_alloced)
1366 len = fc_plogi_build(&fchs, bfa_fcxp_get_reqbuf(fcxp), rport->pid, 1373 len = fc_plogi_build(&fchs, bfa_fcxp_get_reqbuf(fcxp), rport->pid,
1367 bfa_fcs_port_get_fcid(port), 0, 1374 bfa_fcs_port_get_fcid(port), 0,
1368 port->port_cfg.pwwn, port->port_cfg.nwwn, 1375 port->port_cfg.pwwn, port->port_cfg.nwwn,
1369 bfa_pport_get_maxfrsize(port->fcs->bfa)); 1376 bfa_fcport_get_maxfrsize(port->fcs->bfa));
1370 1377
1371 bfa_fcxp_send(fcxp, NULL, port->fabric->vf_id, port->lp_tag, BFA_FALSE, 1378 bfa_fcxp_send(fcxp, NULL, port->fabric->vf_id, port->lp_tag, BFA_FALSE,
1372 FC_CLASS_3, len, &fchs, bfa_fcs_rport_plogi_response, 1379 FC_CLASS_3, len, &fchs, bfa_fcs_rport_plogi_response,
@@ -1478,7 +1485,7 @@ bfa_fcs_rport_send_plogiacc(void *rport_cbarg, struct bfa_fcxp_s *fcxp_alloced)
1478 len = fc_plogi_acc_build(&fchs, bfa_fcxp_get_reqbuf(fcxp), rport->pid, 1485 len = fc_plogi_acc_build(&fchs, bfa_fcxp_get_reqbuf(fcxp), rport->pid,
1479 bfa_fcs_port_get_fcid(port), rport->reply_oxid, 1486 bfa_fcs_port_get_fcid(port), rport->reply_oxid,
1480 port->port_cfg.pwwn, port->port_cfg.nwwn, 1487 port->port_cfg.pwwn, port->port_cfg.nwwn,
1481 bfa_pport_get_maxfrsize(port->fcs->bfa)); 1488 bfa_fcport_get_maxfrsize(port->fcs->bfa));
1482 1489
1483 bfa_fcxp_send(fcxp, NULL, port->fabric->vf_id, port->lp_tag, BFA_FALSE, 1490 bfa_fcxp_send(fcxp, NULL, port->fabric->vf_id, port->lp_tag, BFA_FALSE,
1484 FC_CLASS_3, len, &fchs, NULL, NULL, FC_MAX_PDUSZ, 0); 1491 FC_CLASS_3, len, &fchs, NULL, NULL, FC_MAX_PDUSZ, 0);
@@ -1813,7 +1820,7 @@ bfa_fcs_rport_process_rpsc(struct bfa_fcs_rport_s *rport,
1813 /* 1820 /*
1814 * get curent speed from pport attributes from BFA 1821 * get curent speed from pport attributes from BFA
1815 */ 1822 */
1816 bfa_pport_get_attr(port->fcs->bfa, &pport_attr); 1823 bfa_fcport_get_attr(port->fcs->bfa, &pport_attr);
1817 1824
1818 speeds.port_op_speed = fc_bfa_speed_to_rpsc_operspeed(pport_attr.speed); 1825 speeds.port_op_speed = fc_bfa_speed_to_rpsc_operspeed(pport_attr.speed);
1819 1826
@@ -2032,13 +2039,10 @@ bfa_fcs_rport_aen_post(struct bfa_fcs_rport_s *rport,
2032 2039
2033 switch (event) { 2040 switch (event) {
2034 case BFA_RPORT_AEN_ONLINE: 2041 case BFA_RPORT_AEN_ONLINE:
2035 bfa_log(logmod, BFA_AEN_RPORT_ONLINE, rpwwn_ptr, lpwwn_ptr);
2036 break;
2037 case BFA_RPORT_AEN_OFFLINE: 2042 case BFA_RPORT_AEN_OFFLINE:
2038 bfa_log(logmod, BFA_AEN_RPORT_OFFLINE, rpwwn_ptr, lpwwn_ptr);
2039 break;
2040 case BFA_RPORT_AEN_DISCONNECT: 2043 case BFA_RPORT_AEN_DISCONNECT:
2041 bfa_log(logmod, BFA_AEN_RPORT_DISCONNECT, rpwwn_ptr, lpwwn_ptr); 2044 bfa_log(logmod, BFA_LOG_CREATE_ID(BFA_AEN_CAT_RPORT, event),
2045 rpwwn_ptr, lpwwn_ptr);
2042 break; 2046 break;
2043 case BFA_RPORT_AEN_QOS_PRIO: 2047 case BFA_RPORT_AEN_QOS_PRIO:
2044 aen_data.rport.priv.qos = data->priv.qos; 2048 aen_data.rport.priv.qos = data->priv.qos;
@@ -2164,7 +2168,7 @@ bfa_fcs_rport_update(struct bfa_fcs_rport_s *rport, struct fc_logi_s *plogi)
2164 bfa_trc(port->fcs, port->fabric->bb_credit); 2168 bfa_trc(port->fcs, port->fabric->bb_credit);
2165 2169
2166 port->fabric->bb_credit = bfa_os_ntohs(plogi->csp.bbcred); 2170 port->fabric->bb_credit = bfa_os_ntohs(plogi->csp.bbcred);
2167 bfa_pport_set_tx_bbcredit(port->fcs->bfa, 2171 bfa_fcport_set_tx_bbcredit(port->fcs->bfa,
2168 port->fabric->bb_credit); 2172 port->fabric->bb_credit);
2169 } 2173 }
2170 2174
@@ -2575,23 +2579,6 @@ bfa_fcs_rport_send_ls_rjt(struct bfa_fcs_rport_s *rport, struct fchs_s *rx_fchs,
2575} 2579}
2576 2580
2577/** 2581/**
2578 * Module initialization
2579 */
2580void
2581bfa_fcs_rport_modinit(struct bfa_fcs_s *fcs)
2582{
2583}
2584
2585/**
2586 * Module cleanup
2587 */
2588void
2589bfa_fcs_rport_modexit(struct bfa_fcs_s *fcs)
2590{
2591 bfa_fcs_modexit_comp(fcs);
2592}
2593
2594/**
2595 * Return state of rport. 2582 * Return state of rport.
2596 */ 2583 */
2597int 2584int
diff --git a/drivers/scsi/bfa/rport_api.c b/drivers/scsi/bfa/rport_api.c
index 3dae1774181e..a441f41d2a64 100644
--- a/drivers/scsi/bfa/rport_api.c
+++ b/drivers/scsi/bfa/rport_api.c
@@ -102,7 +102,7 @@ bfa_fcs_rport_get_attr(struct bfa_fcs_rport_s *rport,
102 rport_attr->qos_attr = qos_attr; 102 rport_attr->qos_attr = qos_attr;
103 103
104 rport_attr->trl_enforced = BFA_FALSE; 104 rport_attr->trl_enforced = BFA_FALSE;
105 if (bfa_pport_is_ratelim(port->fcs->bfa)) { 105 if (bfa_fcport_is_ratelim(port->fcs->bfa)) {
106 if ((rport->rpf.rpsc_speed == BFA_PPORT_SPEED_UNKNOWN) || 106 if ((rport->rpf.rpsc_speed == BFA_PPORT_SPEED_UNKNOWN) ||
107 (rport->rpf.rpsc_speed < 107 (rport->rpf.rpsc_speed <
108 bfa_fcs_port_get_rport_max_speed(port))) 108 bfa_fcs_port_get_rport_max_speed(port)))
diff --git a/drivers/scsi/bfa/rport_ftrs.c b/drivers/scsi/bfa/rport_ftrs.c
index e1932c885ac2..ae7bba67ae2a 100644
--- a/drivers/scsi/bfa/rport_ftrs.c
+++ b/drivers/scsi/bfa/rport_ftrs.c
@@ -91,7 +91,7 @@ bfa_fcs_rpf_sm_uninit(struct bfa_fcs_rpf_s *rpf, enum rpf_event event)
91 break; 91 break;
92 92
93 default: 93 default:
94 bfa_assert(0); 94 bfa_sm_fault(rport->fcs, event);
95 } 95 }
96} 96}
97 97
@@ -114,7 +114,7 @@ bfa_fcs_rpf_sm_rpsc_sending(struct bfa_fcs_rpf_s *rpf, enum rpf_event event)
114 break; 114 break;
115 115
116 default: 116 default:
117 bfa_assert(0); 117 bfa_sm_fault(rport->fcs, event);
118 } 118 }
119} 119}
120 120
@@ -160,7 +160,7 @@ bfa_fcs_rpf_sm_rpsc(struct bfa_fcs_rpf_s *rpf, enum rpf_event event)
160 break; 160 break;
161 161
162 default: 162 default:
163 bfa_assert(0); 163 bfa_sm_fault(rport->fcs, event);
164 } 164 }
165} 165}
166 166
@@ -186,7 +186,7 @@ bfa_fcs_rpf_sm_rpsc_retry(struct bfa_fcs_rpf_s *rpf, enum rpf_event event)
186 break; 186 break;
187 187
188 default: 188 default:
189 bfa_assert(0); 189 bfa_sm_fault(rport->fcs, event);
190 } 190 }
191} 191}
192 192
@@ -206,7 +206,7 @@ bfa_fcs_rpf_sm_online(struct bfa_fcs_rpf_s *rpf, enum rpf_event event)
206 break; 206 break;
207 207
208 default: 208 default:
209 bfa_assert(0); 209 bfa_sm_fault(rport->fcs, event);
210 } 210 }
211} 211}
212 212
@@ -229,7 +229,7 @@ bfa_fcs_rpf_sm_offline(struct bfa_fcs_rpf_s *rpf, enum rpf_event event)
229 break; 229 break;
230 230
231 default: 231 default:
232 bfa_assert(0); 232 bfa_sm_fault(rport->fcs, event);
233 } 233 }
234} 234}
235/** 235/**
diff --git a/drivers/scsi/bfa/scn.c b/drivers/scsi/bfa/scn.c
index bd4771ff62c8..8fe09ba88a91 100644
--- a/drivers/scsi/bfa/scn.c
+++ b/drivers/scsi/bfa/scn.c
@@ -90,7 +90,7 @@ bfa_fcs_port_scn_sm_offline(struct bfa_fcs_port_scn_s *scn,
90 break; 90 break;
91 91
92 default: 92 default:
93 bfa_assert(0); 93 bfa_sm_fault(scn->port->fcs, event);
94 } 94 }
95} 95}
96 96
@@ -109,7 +109,7 @@ bfa_fcs_port_scn_sm_sending_scr(struct bfa_fcs_port_scn_s *scn,
109 break; 109 break;
110 110
111 default: 111 default:
112 bfa_assert(0); 112 bfa_sm_fault(scn->port->fcs, event);
113 } 113 }
114} 114}
115 115
@@ -137,7 +137,7 @@ bfa_fcs_port_scn_sm_scr(struct bfa_fcs_port_scn_s *scn,
137 break; 137 break;
138 138
139 default: 139 default:
140 bfa_assert(0); 140 bfa_sm_fault(scn->port->fcs, event);
141 } 141 }
142} 142}
143 143
@@ -157,7 +157,7 @@ bfa_fcs_port_scn_sm_scr_retry(struct bfa_fcs_port_scn_s *scn,
157 break; 157 break;
158 158
159 default: 159 default:
160 bfa_assert(0); 160 bfa_sm_fault(scn->port->fcs, event);
161 } 161 }
162} 162}
163 163
@@ -171,7 +171,7 @@ bfa_fcs_port_scn_sm_online(struct bfa_fcs_port_scn_s *scn,
171 break; 171 break;
172 172
173 default: 173 default:
174 bfa_assert(0); 174 bfa_sm_fault(scn->port->fcs, event);
175 } 175 }
176} 176}
177 177
diff --git a/drivers/scsi/bfa/vport.c b/drivers/scsi/bfa/vport.c
index e90f1e38c32d..27cd619a227a 100644
--- a/drivers/scsi/bfa/vport.c
+++ b/drivers/scsi/bfa/vport.c
@@ -122,7 +122,7 @@ bfa_fcs_vport_sm_uninit(struct bfa_fcs_vport_s *vport,
122 break; 122 break;
123 123
124 default: 124 default:
125 bfa_assert(0); 125 bfa_sm_fault(__vport_fcs(vport), event);
126 } 126 }
127} 127}
128 128
@@ -165,7 +165,7 @@ bfa_fcs_vport_sm_created(struct bfa_fcs_vport_s *vport,
165 break; 165 break;
166 166
167 default: 167 default:
168 bfa_assert(0); 168 bfa_sm_fault(__vport_fcs(vport), event);
169 } 169 }
170} 170}
171 171
@@ -202,7 +202,7 @@ bfa_fcs_vport_sm_offline(struct bfa_fcs_vport_s *vport,
202 break; 202 break;
203 203
204 default: 204 default:
205 bfa_assert(0); 205 bfa_sm_fault(__vport_fcs(vport), event);
206 } 206 }
207} 207}
208 208
@@ -249,7 +249,7 @@ bfa_fcs_vport_sm_fdisc(struct bfa_fcs_vport_s *vport,
249 break; 249 break;
250 250
251 default: 251 default:
252 bfa_assert(0); 252 bfa_sm_fault(__vport_fcs(vport), event);
253 } 253 }
254} 254}
255 255
@@ -283,7 +283,7 @@ bfa_fcs_vport_sm_fdisc_retry(struct bfa_fcs_vport_s *vport,
283 break; 283 break;
284 284
285 default: 285 default:
286 bfa_assert(0); 286 bfa_sm_fault(__vport_fcs(vport), event);
287 } 287 }
288} 288}
289 289
@@ -310,7 +310,7 @@ bfa_fcs_vport_sm_online(struct bfa_fcs_vport_s *vport,
310 break; 310 break;
311 311
312 default: 312 default:
313 bfa_assert(0); 313 bfa_sm_fault(__vport_fcs(vport), event);
314 } 314 }
315} 315}
316 316
@@ -339,7 +339,7 @@ bfa_fcs_vport_sm_deleting(struct bfa_fcs_vport_s *vport,
339 break; 339 break;
340 340
341 default: 341 default:
342 bfa_assert(0); 342 bfa_sm_fault(__vport_fcs(vport), event);
343 } 343 }
344} 344}
345 345
@@ -387,7 +387,7 @@ bfa_fcs_vport_sm_cleanup(struct bfa_fcs_vport_s *vport,
387 break; 387 break;
388 388
389 default: 389 default:
390 bfa_assert(0); 390 bfa_sm_fault(__vport_fcs(vport), event);
391 } 391 }
392} 392}
393 393
@@ -419,7 +419,7 @@ bfa_fcs_vport_sm_logo(struct bfa_fcs_vport_s *vport,
419 break; 419 break;
420 420
421 default: 421 default:
422 bfa_assert(0); 422 bfa_sm_fault(__vport_fcs(vport), event);
423 } 423 }
424} 424}
425 425
@@ -447,22 +447,8 @@ bfa_fcs_vport_aen_post(bfa_fcs_lport_t *port, enum bfa_lport_aen_event event)
447 447
448 bfa_assert(role <= BFA_PORT_ROLE_FCP_MAX); 448 bfa_assert(role <= BFA_PORT_ROLE_FCP_MAX);
449 449
450 switch (event) { 450 bfa_log(logmod, BFA_LOG_CREATE_ID(BFA_AEN_CAT_LPORT, event), lpwwn_ptr,
451 case BFA_LPORT_AEN_NPIV_DUP_WWN: 451 role_str[role/2]);
452 bfa_log(logmod, BFA_AEN_LPORT_NPIV_DUP_WWN, lpwwn_ptr,
453 role_str[role / 2]);
454 break;
455 case BFA_LPORT_AEN_NPIV_FABRIC_MAX:
456 bfa_log(logmod, BFA_AEN_LPORT_NPIV_FABRIC_MAX, lpwwn_ptr,
457 role_str[role / 2]);
458 break;
459 case BFA_LPORT_AEN_NPIV_UNKNOWN:
460 bfa_log(logmod, BFA_AEN_LPORT_NPIV_UNKNOWN, lpwwn_ptr,
461 role_str[role / 2]);
462 break;
463 default:
464 break;
465 }
466 452
467 aen_data.lport.vf_id = port->fabric->vf_id; 453 aen_data.lport.vf_id = port->fabric->vf_id;
468 aen_data.lport.roles = role; 454 aen_data.lport.roles = role;
@@ -478,7 +464,7 @@ static void
478bfa_fcs_vport_do_fdisc(struct bfa_fcs_vport_s *vport) 464bfa_fcs_vport_do_fdisc(struct bfa_fcs_vport_s *vport)
479{ 465{
480 bfa_lps_fdisc(vport->lps, vport, 466 bfa_lps_fdisc(vport->lps, vport,
481 bfa_pport_get_maxfrsize(__vport_bfa(vport)), 467 bfa_fcport_get_maxfrsize(__vport_bfa(vport)),
482 __vport_pwwn(vport), __vport_nwwn(vport)); 468 __vport_pwwn(vport), __vport_nwwn(vport));
483 vport->vport_stats.fdisc_sent++; 469 vport->vport_stats.fdisc_sent++;
484} 470}
@@ -617,38 +603,6 @@ bfa_fcs_vport_delete_comp(struct bfa_fcs_vport_s *vport)
617} 603}
618 604
619/** 605/**
620 * Module initialization
621 */
622void
623bfa_fcs_vport_modinit(struct bfa_fcs_s *fcs)
624{
625}
626
627/**
628 * Module cleanup
629 */
630void
631bfa_fcs_vport_modexit(struct bfa_fcs_s *fcs)
632{
633 bfa_fcs_modexit_comp(fcs);
634}
635
636u32
637bfa_fcs_vport_get_max(struct bfa_fcs_s *fcs)
638{
639 struct bfa_ioc_attr_s ioc_attr;
640
641 bfa_get_attr(fcs->bfa, &ioc_attr);
642
643 if (ioc_attr.pci_attr.device_id == BFA_PCI_DEVICE_ID_CT)
644 return BFA_FCS_MAX_VPORTS_SUPP_CT;
645 else
646 return BFA_FCS_MAX_VPORTS_SUPP_CB;
647}
648
649
650
651/**
652 * fcs_vport_api Virtual port API 606 * fcs_vport_api Virtual port API
653 */ 607 */
654 608
@@ -684,7 +638,7 @@ bfa_fcs_vport_create(struct bfa_fcs_vport_s *vport, struct bfa_fcs_s *fcs,
684 return BFA_STATUS_VPORT_EXISTS; 638 return BFA_STATUS_VPORT_EXISTS;
685 639
686 if (bfa_fcs_fabric_vport_count(&fcs->fabric) == 640 if (bfa_fcs_fabric_vport_count(&fcs->fabric) ==
687 bfa_fcs_vport_get_max(fcs)) 641 bfa_lps_get_max_vport(fcs->bfa))
688 return BFA_STATUS_VPORT_MAX; 642 return BFA_STATUS_VPORT_MAX;
689 643
690 vport->lps = bfa_lps_alloc(fcs->bfa); 644 vport->lps = bfa_lps_alloc(fcs->bfa);
@@ -694,7 +648,8 @@ bfa_fcs_vport_create(struct bfa_fcs_vport_s *vport, struct bfa_fcs_s *fcs,
694 vport->vport_drv = vport_drv; 648 vport->vport_drv = vport_drv;
695 bfa_sm_set_state(vport, bfa_fcs_vport_sm_uninit); 649 bfa_sm_set_state(vport, bfa_fcs_vport_sm_uninit);
696 650
697 bfa_fcs_lport_init(&vport->lport, fcs, vf_id, vport_cfg, vport); 651 bfa_fcs_lport_attach(&vport->lport, fcs, vf_id, vport);
652 bfa_fcs_lport_init(&vport->lport, vport_cfg);
698 653
699 bfa_sm_send_event(vport, BFA_FCS_VPORT_SM_CREATE); 654 bfa_sm_send_event(vport, BFA_FCS_VPORT_SM_CREATE);
700 655
@@ -888,4 +843,15 @@ bfa_cb_lps_fdisclogo_comp(void *bfad, void *uarg)
888 bfa_sm_send_event(vport, BFA_FCS_VPORT_SM_RSP_OK); 843 bfa_sm_send_event(vport, BFA_FCS_VPORT_SM_RSP_OK);
889} 844}
890 845
846/**
847 * Received clear virtual link
848 */
849void
850bfa_cb_lps_cvl_event(void *bfad, void *uarg)
851{
852 struct bfa_fcs_vport_s *vport = uarg;
891 853
854 /* Send an Offline followed by an ONLINE */
855 bfa_sm_send_event(vport, BFA_FCS_VPORT_SM_OFFLINE);
856 bfa_sm_send_event(vport, BFA_FCS_VPORT_SM_ONLINE);
857}
diff --git a/drivers/scsi/bnx2i/bnx2i_iscsi.c b/drivers/scsi/bnx2i/bnx2i_iscsi.c
index 1c4d1215769d..cb71dc984797 100644
--- a/drivers/scsi/bnx2i/bnx2i_iscsi.c
+++ b/drivers/scsi/bnx2i/bnx2i_iscsi.c
@@ -1989,7 +1989,7 @@ static struct scsi_host_template bnx2i_host_template = {
1989 .queuecommand = iscsi_queuecommand, 1989 .queuecommand = iscsi_queuecommand,
1990 .eh_abort_handler = iscsi_eh_abort, 1990 .eh_abort_handler = iscsi_eh_abort,
1991 .eh_device_reset_handler = iscsi_eh_device_reset, 1991 .eh_device_reset_handler = iscsi_eh_device_reset,
1992 .eh_target_reset_handler = iscsi_eh_target_reset, 1992 .eh_target_reset_handler = iscsi_eh_recover_target,
1993 .change_queue_depth = iscsi_change_queue_depth, 1993 .change_queue_depth = iscsi_change_queue_depth,
1994 .can_queue = 1024, 1994 .can_queue = 1024,
1995 .max_sectors = 127, 1995 .max_sectors = 127,
diff --git a/drivers/scsi/cxgb3i/cxgb3i_iscsi.c b/drivers/scsi/cxgb3i/cxgb3i_iscsi.c
index 412853c65372..b7c30585dadd 100644
--- a/drivers/scsi/cxgb3i/cxgb3i_iscsi.c
+++ b/drivers/scsi/cxgb3i/cxgb3i_iscsi.c
@@ -915,7 +915,7 @@ static struct scsi_host_template cxgb3i_host_template = {
915 .cmd_per_lun = ISCSI_DEF_CMD_PER_LUN, 915 .cmd_per_lun = ISCSI_DEF_CMD_PER_LUN,
916 .eh_abort_handler = iscsi_eh_abort, 916 .eh_abort_handler = iscsi_eh_abort,
917 .eh_device_reset_handler = iscsi_eh_device_reset, 917 .eh_device_reset_handler = iscsi_eh_device_reset,
918 .eh_target_reset_handler = iscsi_eh_target_reset, 918 .eh_target_reset_handler = iscsi_eh_recover_target,
919 .target_alloc = iscsi_target_alloc, 919 .target_alloc = iscsi_target_alloc,
920 .use_clustering = DISABLE_CLUSTERING, 920 .use_clustering = DISABLE_CLUSTERING,
921 .this_id = -1, 921 .this_id = -1,
diff --git a/drivers/scsi/device_handler/scsi_dh_emc.c b/drivers/scsi/device_handler/scsi_dh_emc.c
index 61966750bd60..63032ec3db92 100644
--- a/drivers/scsi/device_handler/scsi_dh_emc.c
+++ b/drivers/scsi/device_handler/scsi_dh_emc.c
@@ -272,7 +272,7 @@ static struct request *get_req(struct scsi_device *sdev, int cmd,
272 int len = 0; 272 int len = 0;
273 273
274 rq = blk_get_request(sdev->request_queue, 274 rq = blk_get_request(sdev->request_queue,
275 (cmd == MODE_SELECT) ? WRITE : READ, GFP_NOIO); 275 (cmd != INQUIRY) ? WRITE : READ, GFP_NOIO);
276 if (!rq) { 276 if (!rq) {
277 sdev_printk(KERN_INFO, sdev, "get_req: blk_get_request failed"); 277 sdev_printk(KERN_INFO, sdev, "get_req: blk_get_request failed");
278 return NULL; 278 return NULL;
@@ -286,14 +286,17 @@ static struct request *get_req(struct scsi_device *sdev, int cmd,
286 len = sizeof(short_trespass); 286 len = sizeof(short_trespass);
287 rq->cmd_flags |= REQ_RW; 287 rq->cmd_flags |= REQ_RW;
288 rq->cmd[1] = 0x10; 288 rq->cmd[1] = 0x10;
289 rq->cmd[4] = len;
289 break; 290 break;
290 case MODE_SELECT_10: 291 case MODE_SELECT_10:
291 len = sizeof(long_trespass); 292 len = sizeof(long_trespass);
292 rq->cmd_flags |= REQ_RW; 293 rq->cmd_flags |= REQ_RW;
293 rq->cmd[1] = 0x10; 294 rq->cmd[1] = 0x10;
295 rq->cmd[8] = len;
294 break; 296 break;
295 case INQUIRY: 297 case INQUIRY:
296 len = CLARIION_BUFFER_SIZE; 298 len = CLARIION_BUFFER_SIZE;
299 rq->cmd[4] = len;
297 memset(buffer, 0, len); 300 memset(buffer, 0, len);
298 break; 301 break;
299 default: 302 default:
@@ -301,7 +304,6 @@ static struct request *get_req(struct scsi_device *sdev, int cmd,
301 break; 304 break;
302 } 305 }
303 306
304 rq->cmd[4] = len;
305 rq->cmd_type = REQ_TYPE_BLOCK_PC; 307 rq->cmd_type = REQ_TYPE_BLOCK_PC;
306 rq->cmd_flags |= REQ_FAILFAST_DEV | REQ_FAILFAST_TRANSPORT | 308 rq->cmd_flags |= REQ_FAILFAST_DEV | REQ_FAILFAST_TRANSPORT |
307 REQ_FAILFAST_DRIVER; 309 REQ_FAILFAST_DRIVER;
diff --git a/drivers/scsi/hpsa.c b/drivers/scsi/hpsa.c
index 03697ba94251..183d3a43c280 100644
--- a/drivers/scsi/hpsa.c
+++ b/drivers/scsi/hpsa.c
@@ -43,6 +43,7 @@
43#include <scsi/scsi_cmnd.h> 43#include <scsi/scsi_cmnd.h>
44#include <scsi/scsi_device.h> 44#include <scsi/scsi_device.h>
45#include <scsi/scsi_host.h> 45#include <scsi/scsi_host.h>
46#include <scsi/scsi_tcq.h>
46#include <linux/cciss_ioctl.h> 47#include <linux/cciss_ioctl.h>
47#include <linux/string.h> 48#include <linux/string.h>
48#include <linux/bitmap.h> 49#include <linux/bitmap.h>
@@ -52,7 +53,7 @@
52#include "hpsa.h" 53#include "hpsa.h"
53 54
54/* HPSA_DRIVER_VERSION must be 3 byte values (0-255) separated by '.' */ 55/* HPSA_DRIVER_VERSION must be 3 byte values (0-255) separated by '.' */
55#define HPSA_DRIVER_VERSION "2.0.1-3" 56#define HPSA_DRIVER_VERSION "2.0.2-1"
56#define DRIVER_NAME "HP HPSA Driver (v " HPSA_DRIVER_VERSION ")" 57#define DRIVER_NAME "HP HPSA Driver (v " HPSA_DRIVER_VERSION ")"
57 58
58/* How long to wait (in milliseconds) for board to go into simple mode */ 59/* How long to wait (in milliseconds) for board to go into simple mode */
@@ -134,6 +135,8 @@ static int hpsa_scsi_queue_command(struct scsi_cmnd *cmd,
134static void hpsa_scan_start(struct Scsi_Host *); 135static void hpsa_scan_start(struct Scsi_Host *);
135static int hpsa_scan_finished(struct Scsi_Host *sh, 136static int hpsa_scan_finished(struct Scsi_Host *sh,
136 unsigned long elapsed_time); 137 unsigned long elapsed_time);
138static int hpsa_change_queue_depth(struct scsi_device *sdev,
139 int qdepth, int reason);
137 140
138static int hpsa_eh_device_reset_handler(struct scsi_cmnd *scsicmd); 141static int hpsa_eh_device_reset_handler(struct scsi_cmnd *scsicmd);
139static int hpsa_slave_alloc(struct scsi_device *sdev); 142static int hpsa_slave_alloc(struct scsi_device *sdev);
@@ -182,8 +185,8 @@ static struct scsi_host_template hpsa_driver_template = {
182 .queuecommand = hpsa_scsi_queue_command, 185 .queuecommand = hpsa_scsi_queue_command,
183 .scan_start = hpsa_scan_start, 186 .scan_start = hpsa_scan_start,
184 .scan_finished = hpsa_scan_finished, 187 .scan_finished = hpsa_scan_finished,
188 .change_queue_depth = hpsa_change_queue_depth,
185 .this_id = -1, 189 .this_id = -1,
186 .sg_tablesize = MAXSGENTRIES,
187 .use_clustering = ENABLE_CLUSTERING, 190 .use_clustering = ENABLE_CLUSTERING,
188 .eh_device_reset_handler = hpsa_eh_device_reset_handler, 191 .eh_device_reset_handler = hpsa_eh_device_reset_handler,
189 .ioctl = hpsa_ioctl, 192 .ioctl = hpsa_ioctl,
@@ -208,133 +211,6 @@ static inline struct ctlr_info *shost_to_hba(struct Scsi_Host *sh)
208 return (struct ctlr_info *) *priv; 211 return (struct ctlr_info *) *priv;
209} 212}
210 213
211static struct task_struct *hpsa_scan_thread;
212static DEFINE_MUTEX(hpsa_scan_mutex);
213static LIST_HEAD(hpsa_scan_q);
214static int hpsa_scan_func(void *data);
215
216/**
217 * add_to_scan_list() - add controller to rescan queue
218 * @h: Pointer to the controller.
219 *
220 * Adds the controller to the rescan queue if not already on the queue.
221 *
222 * returns 1 if added to the queue, 0 if skipped (could be on the
223 * queue already, or the controller could be initializing or shutting
224 * down).
225 **/
226static int add_to_scan_list(struct ctlr_info *h)
227{
228 struct ctlr_info *test_h;
229 int found = 0;
230 int ret = 0;
231
232 if (h->busy_initializing)
233 return 0;
234
235 /*
236 * If we don't get the lock, it means the driver is unloading
237 * and there's no point in scheduling a new scan.
238 */
239 if (!mutex_trylock(&h->busy_shutting_down))
240 return 0;
241
242 mutex_lock(&hpsa_scan_mutex);
243 list_for_each_entry(test_h, &hpsa_scan_q, scan_list) {
244 if (test_h == h) {
245 found = 1;
246 break;
247 }
248 }
249 if (!found && !h->busy_scanning) {
250 INIT_COMPLETION(h->scan_wait);
251 list_add_tail(&h->scan_list, &hpsa_scan_q);
252 ret = 1;
253 }
254 mutex_unlock(&hpsa_scan_mutex);
255 mutex_unlock(&h->busy_shutting_down);
256
257 return ret;
258}
259
260/**
261 * remove_from_scan_list() - remove controller from rescan queue
262 * @h: Pointer to the controller.
263 *
264 * Removes the controller from the rescan queue if present. Blocks if
265 * the controller is currently conducting a rescan. The controller
266 * can be in one of three states:
267 * 1. Doesn't need a scan
268 * 2. On the scan list, but not scanning yet (we remove it)
269 * 3. Busy scanning (and not on the list). In this case we want to wait for
270 * the scan to complete to make sure the scanning thread for this
271 * controller is completely idle.
272 **/
273static void remove_from_scan_list(struct ctlr_info *h)
274{
275 struct ctlr_info *test_h, *tmp_h;
276
277 mutex_lock(&hpsa_scan_mutex);
278 list_for_each_entry_safe(test_h, tmp_h, &hpsa_scan_q, scan_list) {
279 if (test_h == h) { /* state 2. */
280 list_del(&h->scan_list);
281 complete_all(&h->scan_wait);
282 mutex_unlock(&hpsa_scan_mutex);
283 return;
284 }
285 }
286 if (h->busy_scanning) { /* state 3. */
287 mutex_unlock(&hpsa_scan_mutex);
288 wait_for_completion(&h->scan_wait);
289 } else { /* state 1, nothing to do. */
290 mutex_unlock(&hpsa_scan_mutex);
291 }
292}
293
294/* hpsa_scan_func() - kernel thread used to rescan controllers
295 * @data: Ignored.
296 *
297 * A kernel thread used scan for drive topology changes on
298 * controllers. The thread processes only one controller at a time
299 * using a queue. Controllers are added to the queue using
300 * add_to_scan_list() and removed from the queue either after done
301 * processing or using remove_from_scan_list().
302 *
303 * returns 0.
304 **/
305static int hpsa_scan_func(__attribute__((unused)) void *data)
306{
307 struct ctlr_info *h;
308 int host_no;
309
310 while (1) {
311 set_current_state(TASK_INTERRUPTIBLE);
312 schedule();
313 if (kthread_should_stop())
314 break;
315
316 while (1) {
317 mutex_lock(&hpsa_scan_mutex);
318 if (list_empty(&hpsa_scan_q)) {
319 mutex_unlock(&hpsa_scan_mutex);
320 break;
321 }
322 h = list_entry(hpsa_scan_q.next, struct ctlr_info,
323 scan_list);
324 list_del(&h->scan_list);
325 h->busy_scanning = 1;
326 mutex_unlock(&hpsa_scan_mutex);
327 host_no = h->scsi_host ? h->scsi_host->host_no : -1;
328 hpsa_scan_start(h->scsi_host);
329 complete_all(&h->scan_wait);
330 mutex_lock(&hpsa_scan_mutex);
331 h->busy_scanning = 0;
332 mutex_unlock(&hpsa_scan_mutex);
333 }
334 }
335 return 0;
336}
337
338static int check_for_unit_attention(struct ctlr_info *h, 214static int check_for_unit_attention(struct ctlr_info *h,
339 struct CommandList *c) 215 struct CommandList *c)
340{ 216{
@@ -352,21 +228,8 @@ static int check_for_unit_attention(struct ctlr_info *h,
352 break; 228 break;
353 case REPORT_LUNS_CHANGED: 229 case REPORT_LUNS_CHANGED:
354 dev_warn(&h->pdev->dev, "hpsa%d: report LUN data " 230 dev_warn(&h->pdev->dev, "hpsa%d: report LUN data "
355 "changed\n", h->ctlr); 231 "changed, action required\n", h->ctlr);
356 /* 232 /*
357 * Here, we could call add_to_scan_list and wake up the scan thread,
358 * except that it's quite likely that we will get more than one
359 * REPORT_LUNS_CHANGED condition in quick succession, which means
360 * that those which occur after the first one will likely happen
361 * *during* the hpsa_scan_thread's rescan. And the rescan code is not
362 * robust enough to restart in the middle, undoing what it has already
363 * done, and it's not clear that it's even possible to do this, since
364 * part of what it does is notify the SCSI mid layer, which starts
365 * doing it's own i/o to read partition tables and so on, and the
366 * driver doesn't have visibility to know what might need undoing.
367 * In any event, if possible, it is horribly complicated to get right
368 * so we just don't do it for now.
369 *
370 * Note: this REPORT_LUNS_CHANGED condition only occurs on the MSA2012. 233 * Note: this REPORT_LUNS_CHANGED condition only occurs on the MSA2012.
371 */ 234 */
372 break; 235 break;
@@ -393,10 +256,7 @@ static ssize_t host_store_rescan(struct device *dev,
393 struct ctlr_info *h; 256 struct ctlr_info *h;
394 struct Scsi_Host *shost = class_to_shost(dev); 257 struct Scsi_Host *shost = class_to_shost(dev);
395 h = shost_to_hba(shost); 258 h = shost_to_hba(shost);
396 if (add_to_scan_list(h)) { 259 hpsa_scan_start(h->scsi_host);
397 wake_up_process(hpsa_scan_thread);
398 wait_for_completion_interruptible(&h->scan_wait);
399 }
400 return count; 260 return count;
401} 261}
402 262
@@ -983,6 +843,76 @@ static void hpsa_scsi_setup(struct ctlr_info *h)
983 spin_lock_init(&h->devlock); 843 spin_lock_init(&h->devlock);
984} 844}
985 845
846static void hpsa_free_sg_chain_blocks(struct ctlr_info *h)
847{
848 int i;
849
850 if (!h->cmd_sg_list)
851 return;
852 for (i = 0; i < h->nr_cmds; i++) {
853 kfree(h->cmd_sg_list[i]);
854 h->cmd_sg_list[i] = NULL;
855 }
856 kfree(h->cmd_sg_list);
857 h->cmd_sg_list = NULL;
858}
859
860static int hpsa_allocate_sg_chain_blocks(struct ctlr_info *h)
861{
862 int i;
863
864 if (h->chainsize <= 0)
865 return 0;
866
867 h->cmd_sg_list = kzalloc(sizeof(*h->cmd_sg_list) * h->nr_cmds,
868 GFP_KERNEL);
869 if (!h->cmd_sg_list)
870 return -ENOMEM;
871 for (i = 0; i < h->nr_cmds; i++) {
872 h->cmd_sg_list[i] = kmalloc(sizeof(*h->cmd_sg_list[i]) *
873 h->chainsize, GFP_KERNEL);
874 if (!h->cmd_sg_list[i])
875 goto clean;
876 }
877 return 0;
878
879clean:
880 hpsa_free_sg_chain_blocks(h);
881 return -ENOMEM;
882}
883
884static void hpsa_map_sg_chain_block(struct ctlr_info *h,
885 struct CommandList *c)
886{
887 struct SGDescriptor *chain_sg, *chain_block;
888 u64 temp64;
889
890 chain_sg = &c->SG[h->max_cmd_sg_entries - 1];
891 chain_block = h->cmd_sg_list[c->cmdindex];
892 chain_sg->Ext = HPSA_SG_CHAIN;
893 chain_sg->Len = sizeof(*chain_sg) *
894 (c->Header.SGTotal - h->max_cmd_sg_entries);
895 temp64 = pci_map_single(h->pdev, chain_block, chain_sg->Len,
896 PCI_DMA_TODEVICE);
897 chain_sg->Addr.lower = (u32) (temp64 & 0x0FFFFFFFFULL);
898 chain_sg->Addr.upper = (u32) ((temp64 >> 32) & 0x0FFFFFFFFULL);
899}
900
901static void hpsa_unmap_sg_chain_block(struct ctlr_info *h,
902 struct CommandList *c)
903{
904 struct SGDescriptor *chain_sg;
905 union u64bit temp64;
906
907 if (c->Header.SGTotal <= h->max_cmd_sg_entries)
908 return;
909
910 chain_sg = &c->SG[h->max_cmd_sg_entries - 1];
911 temp64.val32.lower = chain_sg->Addr.lower;
912 temp64.val32.upper = chain_sg->Addr.upper;
913 pci_unmap_single(h->pdev, temp64.val, chain_sg->Len, PCI_DMA_TODEVICE);
914}
915
986static void complete_scsi_command(struct CommandList *cp, 916static void complete_scsi_command(struct CommandList *cp,
987 int timeout, u32 tag) 917 int timeout, u32 tag)
988{ 918{
@@ -999,10 +929,12 @@ static void complete_scsi_command(struct CommandList *cp,
999 h = cp->h; 929 h = cp->h;
1000 930
1001 scsi_dma_unmap(cmd); /* undo the DMA mappings */ 931 scsi_dma_unmap(cmd); /* undo the DMA mappings */
932 if (cp->Header.SGTotal > h->max_cmd_sg_entries)
933 hpsa_unmap_sg_chain_block(h, cp);
1002 934
1003 cmd->result = (DID_OK << 16); /* host byte */ 935 cmd->result = (DID_OK << 16); /* host byte */
1004 cmd->result |= (COMMAND_COMPLETE << 8); /* msg byte */ 936 cmd->result |= (COMMAND_COMPLETE << 8); /* msg byte */
1005 cmd->result |= (ei->ScsiStatus << 1); 937 cmd->result |= ei->ScsiStatus;
1006 938
1007 /* copy the sense data whether we need to or not. */ 939 /* copy the sense data whether we need to or not. */
1008 memcpy(cmd->sense_buffer, ei->SenseInfo, 940 memcpy(cmd->sense_buffer, ei->SenseInfo,
@@ -1203,6 +1135,7 @@ static int hpsa_scsi_detect(struct ctlr_info *h)
1203 sh->max_id = HPSA_MAX_LUN; 1135 sh->max_id = HPSA_MAX_LUN;
1204 sh->can_queue = h->nr_cmds; 1136 sh->can_queue = h->nr_cmds;
1205 sh->cmd_per_lun = h->nr_cmds; 1137 sh->cmd_per_lun = h->nr_cmds;
1138 sh->sg_tablesize = h->maxsgentries;
1206 h->scsi_host = sh; 1139 h->scsi_host = sh;
1207 sh->hostdata[0] = (unsigned long) h; 1140 sh->hostdata[0] = (unsigned long) h;
1208 sh->irq = h->intr[PERF_MODE_INT]; 1141 sh->irq = h->intr[PERF_MODE_INT];
@@ -1382,7 +1315,7 @@ static int hpsa_send_reset(struct ctlr_info *h, unsigned char *scsi3addr)
1382 1315
1383 if (c == NULL) { /* trouble... */ 1316 if (c == NULL) { /* trouble... */
1384 dev_warn(&h->pdev->dev, "cmd_special_alloc returned NULL!\n"); 1317 dev_warn(&h->pdev->dev, "cmd_special_alloc returned NULL!\n");
1385 return -1; 1318 return -ENOMEM;
1386 } 1319 }
1387 1320
1388 fill_cmd(c, HPSA_DEVICE_RESET_MSG, h, NULL, 0, 0, scsi3addr, TYPE_MSG); 1321 fill_cmd(c, HPSA_DEVICE_RESET_MSG, h, NULL, 0, 0, scsi3addr, TYPE_MSG);
@@ -1904,16 +1837,17 @@ out:
1904 * dma mapping and fills in the scatter gather entries of the 1837 * dma mapping and fills in the scatter gather entries of the
1905 * hpsa command, cp. 1838 * hpsa command, cp.
1906 */ 1839 */
1907static int hpsa_scatter_gather(struct pci_dev *pdev, 1840static int hpsa_scatter_gather(struct ctlr_info *h,
1908 struct CommandList *cp, 1841 struct CommandList *cp,
1909 struct scsi_cmnd *cmd) 1842 struct scsi_cmnd *cmd)
1910{ 1843{
1911 unsigned int len; 1844 unsigned int len;
1912 struct scatterlist *sg; 1845 struct scatterlist *sg;
1913 u64 addr64; 1846 u64 addr64;
1914 int use_sg, i; 1847 int use_sg, i, sg_index, chained;
1848 struct SGDescriptor *curr_sg;
1915 1849
1916 BUG_ON(scsi_sg_count(cmd) > MAXSGENTRIES); 1850 BUG_ON(scsi_sg_count(cmd) > h->maxsgentries);
1917 1851
1918 use_sg = scsi_dma_map(cmd); 1852 use_sg = scsi_dma_map(cmd);
1919 if (use_sg < 0) 1853 if (use_sg < 0)
@@ -1922,15 +1856,33 @@ static int hpsa_scatter_gather(struct pci_dev *pdev,
1922 if (!use_sg) 1856 if (!use_sg)
1923 goto sglist_finished; 1857 goto sglist_finished;
1924 1858
1859 curr_sg = cp->SG;
1860 chained = 0;
1861 sg_index = 0;
1925 scsi_for_each_sg(cmd, sg, use_sg, i) { 1862 scsi_for_each_sg(cmd, sg, use_sg, i) {
1863 if (i == h->max_cmd_sg_entries - 1 &&
1864 use_sg > h->max_cmd_sg_entries) {
1865 chained = 1;
1866 curr_sg = h->cmd_sg_list[cp->cmdindex];
1867 sg_index = 0;
1868 }
1926 addr64 = (u64) sg_dma_address(sg); 1869 addr64 = (u64) sg_dma_address(sg);
1927 len = sg_dma_len(sg); 1870 len = sg_dma_len(sg);
1928 cp->SG[i].Addr.lower = 1871 curr_sg->Addr.lower = (u32) (addr64 & 0x0FFFFFFFFULL);
1929 (u32) (addr64 & (u64) 0x00000000FFFFFFFF); 1872 curr_sg->Addr.upper = (u32) ((addr64 >> 32) & 0x0FFFFFFFFULL);
1930 cp->SG[i].Addr.upper = 1873 curr_sg->Len = len;
1931 (u32) ((addr64 >> 32) & (u64) 0x00000000FFFFFFFF); 1874 curr_sg->Ext = 0; /* we are not chaining */
1932 cp->SG[i].Len = len; 1875 curr_sg++;
1933 cp->SG[i].Ext = 0; /* we are not chaining */ 1876 }
1877
1878 if (use_sg + chained > h->maxSG)
1879 h->maxSG = use_sg + chained;
1880
1881 if (chained) {
1882 cp->Header.SGList = h->max_cmd_sg_entries;
1883 cp->Header.SGTotal = (u16) (use_sg + 1);
1884 hpsa_map_sg_chain_block(h, cp);
1885 return 0;
1934 } 1886 }
1935 1887
1936sglist_finished: 1888sglist_finished:
@@ -2026,7 +1978,7 @@ static int hpsa_scsi_queue_command(struct scsi_cmnd *cmd,
2026 break; 1978 break;
2027 } 1979 }
2028 1980
2029 if (hpsa_scatter_gather(h->pdev, c, cmd) < 0) { /* Fill SG list */ 1981 if (hpsa_scatter_gather(h, c, cmd) < 0) { /* Fill SG list */
2030 cmd_free(h, c); 1982 cmd_free(h, c);
2031 return SCSI_MLQUEUE_HOST_BUSY; 1983 return SCSI_MLQUEUE_HOST_BUSY;
2032 } 1984 }
@@ -2077,6 +2029,23 @@ static int hpsa_scan_finished(struct Scsi_Host *sh,
2077 return finished; 2029 return finished;
2078} 2030}
2079 2031
2032static int hpsa_change_queue_depth(struct scsi_device *sdev,
2033 int qdepth, int reason)
2034{
2035 struct ctlr_info *h = sdev_to_hba(sdev);
2036
2037 if (reason != SCSI_QDEPTH_DEFAULT)
2038 return -ENOTSUPP;
2039
2040 if (qdepth < 1)
2041 qdepth = 1;
2042 else
2043 if (qdepth > h->nr_cmds)
2044 qdepth = h->nr_cmds;
2045 scsi_adjust_queue_depth(sdev, scsi_get_tag_type(sdev), qdepth);
2046 return sdev->queue_depth;
2047}
2048
2080static void hpsa_unregister_scsi(struct ctlr_info *h) 2049static void hpsa_unregister_scsi(struct ctlr_info *h)
2081{ 2050{
2082 /* we are being forcibly unloaded, and may not refuse. */ 2051 /* we are being forcibly unloaded, and may not refuse. */
@@ -2961,7 +2930,7 @@ static irqreturn_t do_hpsa_intr(int irq, void *dev_id)
2961 return IRQ_HANDLED; 2930 return IRQ_HANDLED;
2962} 2931}
2963 2932
2964/* Send a message CDB to the firmwart. */ 2933/* Send a message CDB to the firmware. */
2965static __devinit int hpsa_message(struct pci_dev *pdev, unsigned char opcode, 2934static __devinit int hpsa_message(struct pci_dev *pdev, unsigned char opcode,
2966 unsigned char type) 2935 unsigned char type)
2967{ 2936{
@@ -3296,7 +3265,7 @@ default_int_mode:
3296 h->intr[PERF_MODE_INT] = pdev->irq; 3265 h->intr[PERF_MODE_INT] = pdev->irq;
3297} 3266}
3298 3267
3299static int hpsa_pci_init(struct ctlr_info *h, struct pci_dev *pdev) 3268static int __devinit hpsa_pci_init(struct ctlr_info *h, struct pci_dev *pdev)
3300{ 3269{
3301 ushort subsystem_vendor_id, subsystem_device_id, command; 3270 ushort subsystem_vendor_id, subsystem_device_id, command;
3302 u32 board_id, scratchpad = 0; 3271 u32 board_id, scratchpad = 0;
@@ -3405,6 +3374,23 @@ static int hpsa_pci_init(struct ctlr_info *h, struct pci_dev *pdev)
3405 3374
3406 h->board_id = board_id; 3375 h->board_id = board_id;
3407 h->max_commands = readl(&(h->cfgtable->MaxPerformantModeCommands)); 3376 h->max_commands = readl(&(h->cfgtable->MaxPerformantModeCommands));
3377 h->maxsgentries = readl(&(h->cfgtable->MaxScatterGatherElements));
3378
3379 /*
3380 * Limit in-command s/g elements to 32 save dma'able memory.
3381 * Howvever spec says if 0, use 31
3382 */
3383
3384 h->max_cmd_sg_entries = 31;
3385 if (h->maxsgentries > 512) {
3386 h->max_cmd_sg_entries = 32;
3387 h->chainsize = h->maxsgentries - h->max_cmd_sg_entries + 1;
3388 h->maxsgentries--; /* save one for chain pointer */
3389 } else {
3390 h->maxsgentries = 31; /* default to traditional values */
3391 h->chainsize = 0;
3392 }
3393
3408 h->product_name = products[prod_index].product_name; 3394 h->product_name = products[prod_index].product_name;
3409 h->access = *(products[prod_index].access); 3395 h->access = *(products[prod_index].access);
3410 /* Allow room for some ioctls */ 3396 /* Allow room for some ioctls */
@@ -3532,8 +3518,6 @@ static int __devinit hpsa_init_one(struct pci_dev *pdev,
3532 h->busy_initializing = 1; 3518 h->busy_initializing = 1;
3533 INIT_HLIST_HEAD(&h->cmpQ); 3519 INIT_HLIST_HEAD(&h->cmpQ);
3534 INIT_HLIST_HEAD(&h->reqQ); 3520 INIT_HLIST_HEAD(&h->reqQ);
3535 mutex_init(&h->busy_shutting_down);
3536 init_completion(&h->scan_wait);
3537 rc = hpsa_pci_init(h, pdev); 3521 rc = hpsa_pci_init(h, pdev);
3538 if (rc != 0) 3522 if (rc != 0)
3539 goto clean1; 3523 goto clean1;
@@ -3587,6 +3571,8 @@ static int __devinit hpsa_init_one(struct pci_dev *pdev,
3587 rc = -ENOMEM; 3571 rc = -ENOMEM;
3588 goto clean4; 3572 goto clean4;
3589 } 3573 }
3574 if (hpsa_allocate_sg_chain_blocks(h))
3575 goto clean4;
3590 spin_lock_init(&h->lock); 3576 spin_lock_init(&h->lock);
3591 spin_lock_init(&h->scan_lock); 3577 spin_lock_init(&h->scan_lock);
3592 init_waitqueue_head(&h->scan_wait_queue); 3578 init_waitqueue_head(&h->scan_wait_queue);
@@ -3609,6 +3595,7 @@ static int __devinit hpsa_init_one(struct pci_dev *pdev,
3609 return 1; 3595 return 1;
3610 3596
3611clean4: 3597clean4:
3598 hpsa_free_sg_chain_blocks(h);
3612 kfree(h->cmd_pool_bits); 3599 kfree(h->cmd_pool_bits);
3613 if (h->cmd_pool) 3600 if (h->cmd_pool)
3614 pci_free_consistent(h->pdev, 3601 pci_free_consistent(h->pdev,
@@ -3681,11 +3668,10 @@ static void __devexit hpsa_remove_one(struct pci_dev *pdev)
3681 return; 3668 return;
3682 } 3669 }
3683 h = pci_get_drvdata(pdev); 3670 h = pci_get_drvdata(pdev);
3684 mutex_lock(&h->busy_shutting_down);
3685 remove_from_scan_list(h);
3686 hpsa_unregister_scsi(h); /* unhook from SCSI subsystem */ 3671 hpsa_unregister_scsi(h); /* unhook from SCSI subsystem */
3687 hpsa_shutdown(pdev); 3672 hpsa_shutdown(pdev);
3688 iounmap(h->vaddr); 3673 iounmap(h->vaddr);
3674 hpsa_free_sg_chain_blocks(h);
3689 pci_free_consistent(h->pdev, 3675 pci_free_consistent(h->pdev,
3690 h->nr_cmds * sizeof(struct CommandList), 3676 h->nr_cmds * sizeof(struct CommandList),
3691 h->cmd_pool, h->cmd_pool_dhandle); 3677 h->cmd_pool, h->cmd_pool_dhandle);
@@ -3703,7 +3689,6 @@ static void __devexit hpsa_remove_one(struct pci_dev *pdev)
3703 */ 3689 */
3704 pci_release_regions(pdev); 3690 pci_release_regions(pdev);
3705 pci_set_drvdata(pdev, NULL); 3691 pci_set_drvdata(pdev, NULL);
3706 mutex_unlock(&h->busy_shutting_down);
3707 kfree(h); 3692 kfree(h);
3708} 3693}
3709 3694
@@ -3857,23 +3842,12 @@ clean_up:
3857 */ 3842 */
3858static int __init hpsa_init(void) 3843static int __init hpsa_init(void)
3859{ 3844{
3860 int err; 3845 return pci_register_driver(&hpsa_pci_driver);
3861 /* Start the scan thread */
3862 hpsa_scan_thread = kthread_run(hpsa_scan_func, NULL, "hpsa_scan");
3863 if (IS_ERR(hpsa_scan_thread)) {
3864 err = PTR_ERR(hpsa_scan_thread);
3865 return -ENODEV;
3866 }
3867 err = pci_register_driver(&hpsa_pci_driver);
3868 if (err)
3869 kthread_stop(hpsa_scan_thread);
3870 return err;
3871} 3846}
3872 3847
3873static void __exit hpsa_cleanup(void) 3848static void __exit hpsa_cleanup(void)
3874{ 3849{
3875 pci_unregister_driver(&hpsa_pci_driver); 3850 pci_unregister_driver(&hpsa_pci_driver);
3876 kthread_stop(hpsa_scan_thread);
3877} 3851}
3878 3852
3879module_init(hpsa_init); 3853module_init(hpsa_init);
diff --git a/drivers/scsi/hpsa.h b/drivers/scsi/hpsa.h
index a0502b3ac17e..1bb5233b09a0 100644
--- a/drivers/scsi/hpsa.h
+++ b/drivers/scsi/hpsa.h
@@ -83,6 +83,10 @@ struct ctlr_info {
83 unsigned int maxQsinceinit; 83 unsigned int maxQsinceinit;
84 unsigned int maxSG; 84 unsigned int maxSG;
85 spinlock_t lock; 85 spinlock_t lock;
86 int maxsgentries;
87 u8 max_cmd_sg_entries;
88 int chainsize;
89 struct SGDescriptor **cmd_sg_list;
86 90
87 /* pointers to command and error info pool */ 91 /* pointers to command and error info pool */
88 struct CommandList *cmd_pool; 92 struct CommandList *cmd_pool;
@@ -97,9 +101,6 @@ struct ctlr_info {
97 int scan_finished; 101 int scan_finished;
98 spinlock_t scan_lock; 102 spinlock_t scan_lock;
99 wait_queue_head_t scan_wait_queue; 103 wait_queue_head_t scan_wait_queue;
100 struct mutex busy_shutting_down;
101 struct list_head scan_list;
102 struct completion scan_wait;
103 104
104 struct Scsi_Host *scsi_host; 105 struct Scsi_Host *scsi_host;
105 spinlock_t devlock; /* to protect hba[ctlr]->dev[]; */ 106 spinlock_t devlock; /* to protect hba[ctlr]->dev[]; */
diff --git a/drivers/scsi/hpsa_cmd.h b/drivers/scsi/hpsa_cmd.h
index 3e0abdf76689..56fb9827681e 100644
--- a/drivers/scsi/hpsa_cmd.h
+++ b/drivers/scsi/hpsa_cmd.h
@@ -23,7 +23,8 @@
23 23
24/* general boundary defintions */ 24/* general boundary defintions */
25#define SENSEINFOBYTES 32 /* may vary between hbas */ 25#define SENSEINFOBYTES 32 /* may vary between hbas */
26#define MAXSGENTRIES 31 26#define MAXSGENTRIES 32
27#define HPSA_SG_CHAIN 0x80000000
27#define MAXREPLYQS 256 28#define MAXREPLYQS 256
28 29
29/* Command Status value */ 30/* Command Status value */
@@ -305,20 +306,23 @@ struct CommandList {
305 int cmd_type; 306 int cmd_type;
306 long cmdindex; 307 long cmdindex;
307 struct hlist_node list; 308 struct hlist_node list;
308 struct CommandList *prev;
309 struct CommandList *next;
310 struct request *rq; 309 struct request *rq;
311 struct completion *waiting; 310 struct completion *waiting;
312 int retry_count;
313 void *scsi_cmd; 311 void *scsi_cmd;
314 312
315/* on 64 bit architectures, to get this to be 32-byte-aligned 313/* on 64 bit architectures, to get this to be 32-byte-aligned
316 * it so happens we need no padding, on 32 bit systems, 314 * it so happens we need PAD_64 bytes of padding, on 32 bit systems,
317 * we need 8 bytes of padding. This does that. 315 * we need PAD_32 bytes of padding (see below). This does that.
316 * If it happens that 64 bit and 32 bit systems need different
317 * padding, PAD_32 and PAD_64 can be set independently, and.
318 * the code below will do the right thing.
318 */ 319 */
319#define COMMANDLIST_PAD ((8 - sizeof(long))/4 * 8) 320#define IS_32_BIT ((8 - sizeof(long))/4)
321#define IS_64_BIT (!IS_32_BIT)
322#define PAD_32 (4)
323#define PAD_64 (4)
324#define COMMANDLIST_PAD (IS_32_BIT * PAD_32 + IS_64_BIT * PAD_64)
320 u8 pad[COMMANDLIST_PAD]; 325 u8 pad[COMMANDLIST_PAD];
321
322}; 326};
323 327
324/* Configuration Table Structure */ 328/* Configuration Table Structure */
diff --git a/drivers/scsi/ibmvscsi/ibmvfc.c b/drivers/scsi/ibmvscsi/ibmvfc.c
index 732f6d35b4a8..4e577e2fee38 100644
--- a/drivers/scsi/ibmvscsi/ibmvfc.c
+++ b/drivers/scsi/ibmvscsi/ibmvfc.c
@@ -29,6 +29,7 @@
29#include <linux/interrupt.h> 29#include <linux/interrupt.h>
30#include <linux/kthread.h> 30#include <linux/kthread.h>
31#include <linux/of.h> 31#include <linux/of.h>
32#include <linux/pm.h>
32#include <linux/stringify.h> 33#include <linux/stringify.h>
33#include <asm/firmware.h> 34#include <asm/firmware.h>
34#include <asm/irq.h> 35#include <asm/irq.h>
@@ -4736,6 +4737,27 @@ static int ibmvfc_remove(struct vio_dev *vdev)
4736} 4737}
4737 4738
4738/** 4739/**
4740 * ibmvfc_resume - Resume from suspend
4741 * @dev: device struct
4742 *
4743 * We may have lost an interrupt across suspend/resume, so kick the
4744 * interrupt handler
4745 *
4746 */
4747static int ibmvfc_resume(struct device *dev)
4748{
4749 unsigned long flags;
4750 struct ibmvfc_host *vhost = dev_get_drvdata(dev);
4751 struct vio_dev *vdev = to_vio_dev(dev);
4752
4753 spin_lock_irqsave(vhost->host->host_lock, flags);
4754 vio_disable_interrupts(vdev);
4755 tasklet_schedule(&vhost->tasklet);
4756 spin_unlock_irqrestore(vhost->host->host_lock, flags);
4757 return 0;
4758}
4759
4760/**
4739 * ibmvfc_get_desired_dma - Calculate DMA resources needed by the driver 4761 * ibmvfc_get_desired_dma - Calculate DMA resources needed by the driver
4740 * @vdev: vio device struct 4762 * @vdev: vio device struct
4741 * 4763 *
@@ -4755,6 +4777,10 @@ static struct vio_device_id ibmvfc_device_table[] __devinitdata = {
4755}; 4777};
4756MODULE_DEVICE_TABLE(vio, ibmvfc_device_table); 4778MODULE_DEVICE_TABLE(vio, ibmvfc_device_table);
4757 4779
4780static struct dev_pm_ops ibmvfc_pm_ops = {
4781 .resume = ibmvfc_resume
4782};
4783
4758static struct vio_driver ibmvfc_driver = { 4784static struct vio_driver ibmvfc_driver = {
4759 .id_table = ibmvfc_device_table, 4785 .id_table = ibmvfc_device_table,
4760 .probe = ibmvfc_probe, 4786 .probe = ibmvfc_probe,
@@ -4763,6 +4789,7 @@ static struct vio_driver ibmvfc_driver = {
4763 .driver = { 4789 .driver = {
4764 .name = IBMVFC_NAME, 4790 .name = IBMVFC_NAME,
4765 .owner = THIS_MODULE, 4791 .owner = THIS_MODULE,
4792 .pm = &ibmvfc_pm_ops,
4766 } 4793 }
4767}; 4794};
4768 4795
diff --git a/drivers/scsi/ibmvscsi/ibmvscsi.c b/drivers/scsi/ibmvscsi/ibmvscsi.c
index e3a18e0ef276..dc1bcbe3b176 100644
--- a/drivers/scsi/ibmvscsi/ibmvscsi.c
+++ b/drivers/scsi/ibmvscsi/ibmvscsi.c
@@ -71,6 +71,7 @@
71#include <linux/dma-mapping.h> 71#include <linux/dma-mapping.h>
72#include <linux/delay.h> 72#include <linux/delay.h>
73#include <linux/of.h> 73#include <linux/of.h>
74#include <linux/pm.h>
74#include <asm/firmware.h> 75#include <asm/firmware.h>
75#include <asm/vio.h> 76#include <asm/vio.h>
76#include <scsi/scsi.h> 77#include <scsi/scsi.h>
@@ -1991,6 +1992,19 @@ static int ibmvscsi_remove(struct vio_dev *vdev)
1991} 1992}
1992 1993
1993/** 1994/**
1995 * ibmvscsi_resume: Resume from suspend
1996 * @dev: device struct
1997 *
1998 * We may have lost an interrupt across suspend/resume, so kick the
1999 * interrupt handler
2000 */
2001static int ibmvscsi_resume(struct device *dev)
2002{
2003 struct ibmvscsi_host_data *hostdata = dev_get_drvdata(dev);
2004 return ibmvscsi_ops->resume(hostdata);
2005}
2006
2007/**
1994 * ibmvscsi_device_table: Used by vio.c to match devices in the device tree we 2008 * ibmvscsi_device_table: Used by vio.c to match devices in the device tree we
1995 * support. 2009 * support.
1996 */ 2010 */
@@ -2000,6 +2014,10 @@ static struct vio_device_id ibmvscsi_device_table[] __devinitdata = {
2000}; 2014};
2001MODULE_DEVICE_TABLE(vio, ibmvscsi_device_table); 2015MODULE_DEVICE_TABLE(vio, ibmvscsi_device_table);
2002 2016
2017static struct dev_pm_ops ibmvscsi_pm_ops = {
2018 .resume = ibmvscsi_resume
2019};
2020
2003static struct vio_driver ibmvscsi_driver = { 2021static struct vio_driver ibmvscsi_driver = {
2004 .id_table = ibmvscsi_device_table, 2022 .id_table = ibmvscsi_device_table,
2005 .probe = ibmvscsi_probe, 2023 .probe = ibmvscsi_probe,
@@ -2008,6 +2026,7 @@ static struct vio_driver ibmvscsi_driver = {
2008 .driver = { 2026 .driver = {
2009 .name = "ibmvscsi", 2027 .name = "ibmvscsi",
2010 .owner = THIS_MODULE, 2028 .owner = THIS_MODULE,
2029 .pm = &ibmvscsi_pm_ops,
2011 } 2030 }
2012}; 2031};
2013 2032
diff --git a/drivers/scsi/ibmvscsi/ibmvscsi.h b/drivers/scsi/ibmvscsi/ibmvscsi.h
index 76425303def0..9cb7c6a773e1 100644
--- a/drivers/scsi/ibmvscsi/ibmvscsi.h
+++ b/drivers/scsi/ibmvscsi/ibmvscsi.h
@@ -120,6 +120,7 @@ struct ibmvscsi_ops {
120 struct ibmvscsi_host_data *hostdata); 120 struct ibmvscsi_host_data *hostdata);
121 int (*send_crq)(struct ibmvscsi_host_data *hostdata, 121 int (*send_crq)(struct ibmvscsi_host_data *hostdata,
122 u64 word1, u64 word2); 122 u64 word1, u64 word2);
123 int (*resume) (struct ibmvscsi_host_data *hostdata);
123}; 124};
124 125
125extern struct ibmvscsi_ops iseriesvscsi_ops; 126extern struct ibmvscsi_ops iseriesvscsi_ops;
diff --git a/drivers/scsi/ibmvscsi/iseries_vscsi.c b/drivers/scsi/ibmvscsi/iseries_vscsi.c
index 0775fdee5fa8..f4776451a754 100644
--- a/drivers/scsi/ibmvscsi/iseries_vscsi.c
+++ b/drivers/scsi/ibmvscsi/iseries_vscsi.c
@@ -158,10 +158,16 @@ static int iseriesvscsi_send_crq(struct ibmvscsi_host_data *hostdata,
158 0); 158 0);
159} 159}
160 160
161static int iseriesvscsi_resume(struct ibmvscsi_host_data *hostdata)
162{
163 return 0;
164}
165
161struct ibmvscsi_ops iseriesvscsi_ops = { 166struct ibmvscsi_ops iseriesvscsi_ops = {
162 .init_crq_queue = iseriesvscsi_init_crq_queue, 167 .init_crq_queue = iseriesvscsi_init_crq_queue,
163 .release_crq_queue = iseriesvscsi_release_crq_queue, 168 .release_crq_queue = iseriesvscsi_release_crq_queue,
164 .reset_crq_queue = iseriesvscsi_reset_crq_queue, 169 .reset_crq_queue = iseriesvscsi_reset_crq_queue,
165 .reenable_crq_queue = iseriesvscsi_reenable_crq_queue, 170 .reenable_crq_queue = iseriesvscsi_reenable_crq_queue,
166 .send_crq = iseriesvscsi_send_crq, 171 .send_crq = iseriesvscsi_send_crq,
172 .resume = iseriesvscsi_resume,
167}; 173};
diff --git a/drivers/scsi/ibmvscsi/rpa_vscsi.c b/drivers/scsi/ibmvscsi/rpa_vscsi.c
index 462a8574dad9..63a30cbbf9de 100644
--- a/drivers/scsi/ibmvscsi/rpa_vscsi.c
+++ b/drivers/scsi/ibmvscsi/rpa_vscsi.c
@@ -334,10 +334,23 @@ static int rpavscsi_reenable_crq_queue(struct crq_queue *queue,
334 return rc; 334 return rc;
335} 335}
336 336
337/**
338 * rpavscsi_resume: - resume after suspend
339 * @hostdata: ibmvscsi_host_data of host
340 *
341 */
342static int rpavscsi_resume(struct ibmvscsi_host_data *hostdata)
343{
344 vio_disable_interrupts(to_vio_dev(hostdata->dev));
345 tasklet_schedule(&hostdata->srp_task);
346 return 0;
347}
348
337struct ibmvscsi_ops rpavscsi_ops = { 349struct ibmvscsi_ops rpavscsi_ops = {
338 .init_crq_queue = rpavscsi_init_crq_queue, 350 .init_crq_queue = rpavscsi_init_crq_queue,
339 .release_crq_queue = rpavscsi_release_crq_queue, 351 .release_crq_queue = rpavscsi_release_crq_queue,
340 .reset_crq_queue = rpavscsi_reset_crq_queue, 352 .reset_crq_queue = rpavscsi_reset_crq_queue,
341 .reenable_crq_queue = rpavscsi_reenable_crq_queue, 353 .reenable_crq_queue = rpavscsi_reenable_crq_queue,
342 .send_crq = rpavscsi_send_crq, 354 .send_crq = rpavscsi_send_crq,
355 .resume = rpavscsi_resume,
343}; 356};
diff --git a/drivers/scsi/ipr.c b/drivers/scsi/ipr.c
index 032f0d0e6cb4..c79cd98eb6bf 100644
--- a/drivers/scsi/ipr.c
+++ b/drivers/scsi/ipr.c
@@ -72,6 +72,8 @@
72#include <linux/moduleparam.h> 72#include <linux/moduleparam.h>
73#include <linux/libata.h> 73#include <linux/libata.h>
74#include <linux/hdreg.h> 74#include <linux/hdreg.h>
75#include <linux/reboot.h>
76#include <linux/stringify.h>
75#include <asm/io.h> 77#include <asm/io.h>
76#include <asm/irq.h> 78#include <asm/irq.h>
77#include <asm/processor.h> 79#include <asm/processor.h>
@@ -91,8 +93,8 @@ static unsigned int ipr_max_speed = 1;
91static int ipr_testmode = 0; 93static int ipr_testmode = 0;
92static unsigned int ipr_fastfail = 0; 94static unsigned int ipr_fastfail = 0;
93static unsigned int ipr_transop_timeout = 0; 95static unsigned int ipr_transop_timeout = 0;
94static unsigned int ipr_enable_cache = 1;
95static unsigned int ipr_debug = 0; 96static unsigned int ipr_debug = 0;
97static unsigned int ipr_max_devs = IPR_DEFAULT_SIS64_DEVS;
96static unsigned int ipr_dual_ioa_raid = 1; 98static unsigned int ipr_dual_ioa_raid = 1;
97static DEFINE_SPINLOCK(ipr_driver_lock); 99static DEFINE_SPINLOCK(ipr_driver_lock);
98 100
@@ -104,13 +106,20 @@ static const struct ipr_chip_cfg_t ipr_chip_cfg[] = {
104 { 106 {
105 .set_interrupt_mask_reg = 0x0022C, 107 .set_interrupt_mask_reg = 0x0022C,
106 .clr_interrupt_mask_reg = 0x00230, 108 .clr_interrupt_mask_reg = 0x00230,
109 .clr_interrupt_mask_reg32 = 0x00230,
107 .sense_interrupt_mask_reg = 0x0022C, 110 .sense_interrupt_mask_reg = 0x0022C,
111 .sense_interrupt_mask_reg32 = 0x0022C,
108 .clr_interrupt_reg = 0x00228, 112 .clr_interrupt_reg = 0x00228,
113 .clr_interrupt_reg32 = 0x00228,
109 .sense_interrupt_reg = 0x00224, 114 .sense_interrupt_reg = 0x00224,
115 .sense_interrupt_reg32 = 0x00224,
110 .ioarrin_reg = 0x00404, 116 .ioarrin_reg = 0x00404,
111 .sense_uproc_interrupt_reg = 0x00214, 117 .sense_uproc_interrupt_reg = 0x00214,
118 .sense_uproc_interrupt_reg32 = 0x00214,
112 .set_uproc_interrupt_reg = 0x00214, 119 .set_uproc_interrupt_reg = 0x00214,
113 .clr_uproc_interrupt_reg = 0x00218 120 .set_uproc_interrupt_reg32 = 0x00214,
121 .clr_uproc_interrupt_reg = 0x00218,
122 .clr_uproc_interrupt_reg32 = 0x00218
114 } 123 }
115 }, 124 },
116 { /* Snipe and Scamp */ 125 { /* Snipe and Scamp */
@@ -119,25 +128,59 @@ static const struct ipr_chip_cfg_t ipr_chip_cfg[] = {
119 { 128 {
120 .set_interrupt_mask_reg = 0x00288, 129 .set_interrupt_mask_reg = 0x00288,
121 .clr_interrupt_mask_reg = 0x0028C, 130 .clr_interrupt_mask_reg = 0x0028C,
131 .clr_interrupt_mask_reg32 = 0x0028C,
122 .sense_interrupt_mask_reg = 0x00288, 132 .sense_interrupt_mask_reg = 0x00288,
133 .sense_interrupt_mask_reg32 = 0x00288,
123 .clr_interrupt_reg = 0x00284, 134 .clr_interrupt_reg = 0x00284,
135 .clr_interrupt_reg32 = 0x00284,
124 .sense_interrupt_reg = 0x00280, 136 .sense_interrupt_reg = 0x00280,
137 .sense_interrupt_reg32 = 0x00280,
125 .ioarrin_reg = 0x00504, 138 .ioarrin_reg = 0x00504,
126 .sense_uproc_interrupt_reg = 0x00290, 139 .sense_uproc_interrupt_reg = 0x00290,
140 .sense_uproc_interrupt_reg32 = 0x00290,
127 .set_uproc_interrupt_reg = 0x00290, 141 .set_uproc_interrupt_reg = 0x00290,
128 .clr_uproc_interrupt_reg = 0x00294 142 .set_uproc_interrupt_reg32 = 0x00290,
143 .clr_uproc_interrupt_reg = 0x00294,
144 .clr_uproc_interrupt_reg32 = 0x00294
145 }
146 },
147 { /* CRoC */
148 .mailbox = 0x00040,
149 .cache_line_size = 0x20,
150 {
151 .set_interrupt_mask_reg = 0x00010,
152 .clr_interrupt_mask_reg = 0x00018,
153 .clr_interrupt_mask_reg32 = 0x0001C,
154 .sense_interrupt_mask_reg = 0x00010,
155 .sense_interrupt_mask_reg32 = 0x00014,
156 .clr_interrupt_reg = 0x00008,
157 .clr_interrupt_reg32 = 0x0000C,
158 .sense_interrupt_reg = 0x00000,
159 .sense_interrupt_reg32 = 0x00004,
160 .ioarrin_reg = 0x00070,
161 .sense_uproc_interrupt_reg = 0x00020,
162 .sense_uproc_interrupt_reg32 = 0x00024,
163 .set_uproc_interrupt_reg = 0x00020,
164 .set_uproc_interrupt_reg32 = 0x00024,
165 .clr_uproc_interrupt_reg = 0x00028,
166 .clr_uproc_interrupt_reg32 = 0x0002C,
167 .init_feedback_reg = 0x0005C,
168 .dump_addr_reg = 0x00064,
169 .dump_data_reg = 0x00068
129 } 170 }
130 }, 171 },
131}; 172};
132 173
133static const struct ipr_chip_t ipr_chip[] = { 174static const struct ipr_chip_t ipr_chip[] = {
134 { PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE, IPR_USE_LSI, &ipr_chip_cfg[0] }, 175 { PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE, IPR_USE_LSI, IPR_SIS32, &ipr_chip_cfg[0] },
135 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE, IPR_USE_LSI, &ipr_chip_cfg[0] }, 176 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE, IPR_USE_LSI, IPR_SIS32, &ipr_chip_cfg[0] },
136 { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_OBSIDIAN, IPR_USE_LSI, &ipr_chip_cfg[0] }, 177 { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_OBSIDIAN, IPR_USE_LSI, IPR_SIS32, &ipr_chip_cfg[0] },
137 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN, IPR_USE_LSI, &ipr_chip_cfg[0] }, 178 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN, IPR_USE_LSI, IPR_SIS32, &ipr_chip_cfg[0] },
138 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN_E, IPR_USE_MSI, &ipr_chip_cfg[0] }, 179 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN_E, IPR_USE_MSI, IPR_SIS32, &ipr_chip_cfg[0] },
139 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_SNIPE, IPR_USE_LSI, &ipr_chip_cfg[1] }, 180 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_SNIPE, IPR_USE_LSI, IPR_SIS32, &ipr_chip_cfg[1] },
140 { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_SCAMP, IPR_USE_LSI, &ipr_chip_cfg[1] } 181 { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_SCAMP, IPR_USE_LSI, IPR_SIS32, &ipr_chip_cfg[1] },
182 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2, IPR_USE_MSI, IPR_SIS64, &ipr_chip_cfg[2] },
183 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_ASIC_E2, IPR_USE_MSI, IPR_SIS64, &ipr_chip_cfg[2] }
141}; 184};
142 185
143static int ipr_max_bus_speeds [] = { 186static int ipr_max_bus_speeds [] = {
@@ -156,12 +199,13 @@ module_param_named(fastfail, ipr_fastfail, int, S_IRUGO | S_IWUSR);
156MODULE_PARM_DESC(fastfail, "Reduce timeouts and retries"); 199MODULE_PARM_DESC(fastfail, "Reduce timeouts and retries");
157module_param_named(transop_timeout, ipr_transop_timeout, int, 0); 200module_param_named(transop_timeout, ipr_transop_timeout, int, 0);
158MODULE_PARM_DESC(transop_timeout, "Time in seconds to wait for adapter to come operational (default: 300)"); 201MODULE_PARM_DESC(transop_timeout, "Time in seconds to wait for adapter to come operational (default: 300)");
159module_param_named(enable_cache, ipr_enable_cache, int, 0);
160MODULE_PARM_DESC(enable_cache, "Enable adapter's non-volatile write cache (default: 1)");
161module_param_named(debug, ipr_debug, int, S_IRUGO | S_IWUSR); 202module_param_named(debug, ipr_debug, int, S_IRUGO | S_IWUSR);
162MODULE_PARM_DESC(debug, "Enable device driver debugging logging. Set to 1 to enable. (default: 0)"); 203MODULE_PARM_DESC(debug, "Enable device driver debugging logging. Set to 1 to enable. (default: 0)");
163module_param_named(dual_ioa_raid, ipr_dual_ioa_raid, int, 0); 204module_param_named(dual_ioa_raid, ipr_dual_ioa_raid, int, 0);
164MODULE_PARM_DESC(dual_ioa_raid, "Enable dual adapter RAID support. Set to 1 to enable. (default: 1)"); 205MODULE_PARM_DESC(dual_ioa_raid, "Enable dual adapter RAID support. Set to 1 to enable. (default: 1)");
206module_param_named(max_devs, ipr_max_devs, int, 0);
207MODULE_PARM_DESC(max_devs, "Specify the maximum number of physical devices. "
208 "[Default=" __stringify(IPR_DEFAULT_SIS64_DEVS) "]");
165MODULE_LICENSE("GPL"); 209MODULE_LICENSE("GPL");
166MODULE_VERSION(IPR_DRIVER_VERSION); 210MODULE_VERSION(IPR_DRIVER_VERSION);
167 211
@@ -180,6 +224,20 @@ struct ipr_error_table_t ipr_error_table[] = {
180 "FFFE: Soft device bus error recovered by the IOA"}, 224 "FFFE: Soft device bus error recovered by the IOA"},
181 {0x01088100, 0, IPR_DEFAULT_LOG_LEVEL, 225 {0x01088100, 0, IPR_DEFAULT_LOG_LEVEL,
182 "4101: Soft device bus fabric error"}, 226 "4101: Soft device bus fabric error"},
227 {0x01100100, 0, IPR_DEFAULT_LOG_LEVEL,
228 "FFFC: Logical block guard error recovered by the device"},
229 {0x01100300, 0, IPR_DEFAULT_LOG_LEVEL,
230 "FFFC: Logical block reference tag error recovered by the device"},
231 {0x01108300, 0, IPR_DEFAULT_LOG_LEVEL,
232 "4171: Recovered scatter list tag / sequence number error"},
233 {0x01109000, 0, IPR_DEFAULT_LOG_LEVEL,
234 "FF3D: Recovered logical block CRC error on IOA to Host transfer"},
235 {0x01109200, 0, IPR_DEFAULT_LOG_LEVEL,
236 "4171: Recovered logical block sequence number error on IOA to Host transfer"},
237 {0x0110A000, 0, IPR_DEFAULT_LOG_LEVEL,
238 "FFFD: Recovered logical block reference tag error detected by the IOA"},
239 {0x0110A100, 0, IPR_DEFAULT_LOG_LEVEL,
240 "FFFD: Logical block guard error recovered by the IOA"},
183 {0x01170600, 0, IPR_DEFAULT_LOG_LEVEL, 241 {0x01170600, 0, IPR_DEFAULT_LOG_LEVEL,
184 "FFF9: Device sector reassign successful"}, 242 "FFF9: Device sector reassign successful"},
185 {0x01170900, 0, IPR_DEFAULT_LOG_LEVEL, 243 {0x01170900, 0, IPR_DEFAULT_LOG_LEVEL,
@@ -236,12 +294,28 @@ struct ipr_error_table_t ipr_error_table[] = {
236 "3120: SCSI bus is not operational"}, 294 "3120: SCSI bus is not operational"},
237 {0x04088100, 0, IPR_DEFAULT_LOG_LEVEL, 295 {0x04088100, 0, IPR_DEFAULT_LOG_LEVEL,
238 "4100: Hard device bus fabric error"}, 296 "4100: Hard device bus fabric error"},
297 {0x04100100, 0, IPR_DEFAULT_LOG_LEVEL,
298 "310C: Logical block guard error detected by the device"},
299 {0x04100300, 0, IPR_DEFAULT_LOG_LEVEL,
300 "310C: Logical block reference tag error detected by the device"},
301 {0x04108300, 1, IPR_DEFAULT_LOG_LEVEL,
302 "4170: Scatter list tag / sequence number error"},
303 {0x04109000, 1, IPR_DEFAULT_LOG_LEVEL,
304 "8150: Logical block CRC error on IOA to Host transfer"},
305 {0x04109200, 1, IPR_DEFAULT_LOG_LEVEL,
306 "4170: Logical block sequence number error on IOA to Host transfer"},
307 {0x0410A000, 0, IPR_DEFAULT_LOG_LEVEL,
308 "310D: Logical block reference tag error detected by the IOA"},
309 {0x0410A100, 0, IPR_DEFAULT_LOG_LEVEL,
310 "310D: Logical block guard error detected by the IOA"},
239 {0x04118000, 0, IPR_DEFAULT_LOG_LEVEL, 311 {0x04118000, 0, IPR_DEFAULT_LOG_LEVEL,
240 "9000: IOA reserved area data check"}, 312 "9000: IOA reserved area data check"},
241 {0x04118100, 0, IPR_DEFAULT_LOG_LEVEL, 313 {0x04118100, 0, IPR_DEFAULT_LOG_LEVEL,
242 "9001: IOA reserved area invalid data pattern"}, 314 "9001: IOA reserved area invalid data pattern"},
243 {0x04118200, 0, IPR_DEFAULT_LOG_LEVEL, 315 {0x04118200, 0, IPR_DEFAULT_LOG_LEVEL,
244 "9002: IOA reserved area LRC error"}, 316 "9002: IOA reserved area LRC error"},
317 {0x04118300, 1, IPR_DEFAULT_LOG_LEVEL,
318 "Hardware Error, IOA metadata access error"},
245 {0x04320000, 0, IPR_DEFAULT_LOG_LEVEL, 319 {0x04320000, 0, IPR_DEFAULT_LOG_LEVEL,
246 "102E: Out of alternate sectors for disk storage"}, 320 "102E: Out of alternate sectors for disk storage"},
247 {0x04330000, 1, IPR_DEFAULT_LOG_LEVEL, 321 {0x04330000, 1, IPR_DEFAULT_LOG_LEVEL,
@@ -306,6 +380,8 @@ struct ipr_error_table_t ipr_error_table[] = {
306 "Illegal request, commands not allowed to this device"}, 380 "Illegal request, commands not allowed to this device"},
307 {0x05258100, 0, 0, 381 {0x05258100, 0, 0,
308 "Illegal request, command not allowed to a secondary adapter"}, 382 "Illegal request, command not allowed to a secondary adapter"},
383 {0x05258200, 0, 0,
384 "Illegal request, command not allowed to a non-optimized resource"},
309 {0x05260000, 0, 0, 385 {0x05260000, 0, 0,
310 "Illegal request, invalid field in parameter list"}, 386 "Illegal request, invalid field in parameter list"},
311 {0x05260100, 0, 0, 387 {0x05260100, 0, 0,
@@ -468,7 +544,10 @@ static void ipr_trc_hook(struct ipr_cmnd *ipr_cmd,
468 trace_entry->time = jiffies; 544 trace_entry->time = jiffies;
469 trace_entry->op_code = ipr_cmd->ioarcb.cmd_pkt.cdb[0]; 545 trace_entry->op_code = ipr_cmd->ioarcb.cmd_pkt.cdb[0];
470 trace_entry->type = type; 546 trace_entry->type = type;
471 trace_entry->ata_op_code = ipr_cmd->ioarcb.add_data.u.regs.command; 547 if (ipr_cmd->ioa_cfg->sis64)
548 trace_entry->ata_op_code = ipr_cmd->i.ata_ioadl.regs.command;
549 else
550 trace_entry->ata_op_code = ipr_cmd->ioarcb.u.add_data.u.regs.command;
472 trace_entry->cmd_index = ipr_cmd->cmd_index & 0xff; 551 trace_entry->cmd_index = ipr_cmd->cmd_index & 0xff;
473 trace_entry->res_handle = ipr_cmd->ioarcb.res_handle; 552 trace_entry->res_handle = ipr_cmd->ioarcb.res_handle;
474 trace_entry->u.add_data = add_data; 553 trace_entry->u.add_data = add_data;
@@ -488,16 +567,23 @@ static void ipr_reinit_ipr_cmnd(struct ipr_cmnd *ipr_cmd)
488{ 567{
489 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb; 568 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
490 struct ipr_ioasa *ioasa = &ipr_cmd->ioasa; 569 struct ipr_ioasa *ioasa = &ipr_cmd->ioasa;
491 dma_addr_t dma_addr = be32_to_cpu(ioarcb->ioarcb_host_pci_addr); 570 dma_addr_t dma_addr = ipr_cmd->dma_addr;
492 571
493 memset(&ioarcb->cmd_pkt, 0, sizeof(struct ipr_cmd_pkt)); 572 memset(&ioarcb->cmd_pkt, 0, sizeof(struct ipr_cmd_pkt));
494 ioarcb->write_data_transfer_length = 0; 573 ioarcb->data_transfer_length = 0;
495 ioarcb->read_data_transfer_length = 0; 574 ioarcb->read_data_transfer_length = 0;
496 ioarcb->write_ioadl_len = 0; 575 ioarcb->ioadl_len = 0;
497 ioarcb->read_ioadl_len = 0; 576 ioarcb->read_ioadl_len = 0;
498 ioarcb->write_ioadl_addr = 577
499 cpu_to_be32(dma_addr + offsetof(struct ipr_cmnd, ioadl)); 578 if (ipr_cmd->ioa_cfg->sis64)
500 ioarcb->read_ioadl_addr = ioarcb->write_ioadl_addr; 579 ioarcb->u.sis64_addr_data.data_ioadl_addr =
580 cpu_to_be64(dma_addr + offsetof(struct ipr_cmnd, i.ioadl64));
581 else {
582 ioarcb->write_ioadl_addr =
583 cpu_to_be32(dma_addr + offsetof(struct ipr_cmnd, i.ioadl));
584 ioarcb->read_ioadl_addr = ioarcb->write_ioadl_addr;
585 }
586
501 ioasa->ioasc = 0; 587 ioasa->ioasc = 0;
502 ioasa->residual_data_len = 0; 588 ioasa->residual_data_len = 0;
503 ioasa->u.gata.status = 0; 589 ioasa->u.gata.status = 0;
@@ -562,10 +648,15 @@ static void ipr_mask_and_clear_interrupts(struct ipr_ioa_cfg *ioa_cfg,
562 ioa_cfg->allow_interrupts = 0; 648 ioa_cfg->allow_interrupts = 0;
563 649
564 /* Set interrupt mask to stop all new interrupts */ 650 /* Set interrupt mask to stop all new interrupts */
565 writel(~0, ioa_cfg->regs.set_interrupt_mask_reg); 651 if (ioa_cfg->sis64)
652 writeq(~0, ioa_cfg->regs.set_interrupt_mask_reg);
653 else
654 writel(~0, ioa_cfg->regs.set_interrupt_mask_reg);
566 655
567 /* Clear any pending interrupts */ 656 /* Clear any pending interrupts */
568 writel(clr_ints, ioa_cfg->regs.clr_interrupt_reg); 657 if (ioa_cfg->sis64)
658 writel(~0, ioa_cfg->regs.clr_interrupt_reg);
659 writel(clr_ints, ioa_cfg->regs.clr_interrupt_reg32);
569 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg); 660 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
570} 661}
571 662
@@ -693,6 +784,35 @@ static void ipr_fail_all_ops(struct ipr_ioa_cfg *ioa_cfg)
693} 784}
694 785
695/** 786/**
787 * ipr_send_command - Send driver initiated requests.
788 * @ipr_cmd: ipr command struct
789 *
790 * This function sends a command to the adapter using the correct write call.
791 * In the case of sis64, calculate the ioarcb size required. Then or in the
792 * appropriate bits.
793 *
794 * Return value:
795 * none
796 **/
797static void ipr_send_command(struct ipr_cmnd *ipr_cmd)
798{
799 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
800 dma_addr_t send_dma_addr = ipr_cmd->dma_addr;
801
802 if (ioa_cfg->sis64) {
803 /* The default size is 256 bytes */
804 send_dma_addr |= 0x1;
805
806 /* If the number of ioadls * size of ioadl > 128 bytes,
807 then use a 512 byte ioarcb */
808 if (ipr_cmd->dma_use_sg * sizeof(struct ipr_ioadl64_desc) > 128 )
809 send_dma_addr |= 0x4;
810 writeq(send_dma_addr, ioa_cfg->regs.ioarrin_reg);
811 } else
812 writel(send_dma_addr, ioa_cfg->regs.ioarrin_reg);
813}
814
815/**
696 * ipr_do_req - Send driver initiated requests. 816 * ipr_do_req - Send driver initiated requests.
697 * @ipr_cmd: ipr command struct 817 * @ipr_cmd: ipr command struct
698 * @done: done function 818 * @done: done function
@@ -724,8 +844,8 @@ static void ipr_do_req(struct ipr_cmnd *ipr_cmd,
724 ipr_trc_hook(ipr_cmd, IPR_TRACE_START, 0); 844 ipr_trc_hook(ipr_cmd, IPR_TRACE_START, 0);
725 845
726 mb(); 846 mb();
727 writel(be32_to_cpu(ipr_cmd->ioarcb.ioarcb_host_pci_addr), 847
728 ioa_cfg->regs.ioarrin_reg); 848 ipr_send_command(ipr_cmd);
729} 849}
730 850
731/** 851/**
@@ -747,6 +867,51 @@ static void ipr_internal_cmd_done(struct ipr_cmnd *ipr_cmd)
747} 867}
748 868
749/** 869/**
870 * ipr_init_ioadl - initialize the ioadl for the correct SIS type
871 * @ipr_cmd: ipr command struct
872 * @dma_addr: dma address
873 * @len: transfer length
874 * @flags: ioadl flag value
875 *
876 * This function initializes an ioadl in the case where there is only a single
877 * descriptor.
878 *
879 * Return value:
880 * nothing
881 **/
882static void ipr_init_ioadl(struct ipr_cmnd *ipr_cmd, dma_addr_t dma_addr,
883 u32 len, int flags)
884{
885 struct ipr_ioadl_desc *ioadl = ipr_cmd->i.ioadl;
886 struct ipr_ioadl64_desc *ioadl64 = ipr_cmd->i.ioadl64;
887
888 ipr_cmd->dma_use_sg = 1;
889
890 if (ipr_cmd->ioa_cfg->sis64) {
891 ioadl64->flags = cpu_to_be32(flags);
892 ioadl64->data_len = cpu_to_be32(len);
893 ioadl64->address = cpu_to_be64(dma_addr);
894
895 ipr_cmd->ioarcb.ioadl_len =
896 cpu_to_be32(sizeof(struct ipr_ioadl64_desc));
897 ipr_cmd->ioarcb.data_transfer_length = cpu_to_be32(len);
898 } else {
899 ioadl->flags_and_data_len = cpu_to_be32(flags | len);
900 ioadl->address = cpu_to_be32(dma_addr);
901
902 if (flags == IPR_IOADL_FLAGS_READ_LAST) {
903 ipr_cmd->ioarcb.read_ioadl_len =
904 cpu_to_be32(sizeof(struct ipr_ioadl_desc));
905 ipr_cmd->ioarcb.read_data_transfer_length = cpu_to_be32(len);
906 } else {
907 ipr_cmd->ioarcb.ioadl_len =
908 cpu_to_be32(sizeof(struct ipr_ioadl_desc));
909 ipr_cmd->ioarcb.data_transfer_length = cpu_to_be32(len);
910 }
911 }
912}
913
914/**
750 * ipr_send_blocking_cmd - Send command and sleep on its completion. 915 * ipr_send_blocking_cmd - Send command and sleep on its completion.
751 * @ipr_cmd: ipr command struct 916 * @ipr_cmd: ipr command struct
752 * @timeout_func: function to invoke if command times out 917 * @timeout_func: function to invoke if command times out
@@ -803,11 +968,8 @@ static void ipr_send_hcam(struct ipr_ioa_cfg *ioa_cfg, u8 type,
803 ioarcb->cmd_pkt.cdb[7] = (sizeof(hostrcb->hcam) >> 8) & 0xff; 968 ioarcb->cmd_pkt.cdb[7] = (sizeof(hostrcb->hcam) >> 8) & 0xff;
804 ioarcb->cmd_pkt.cdb[8] = sizeof(hostrcb->hcam) & 0xff; 969 ioarcb->cmd_pkt.cdb[8] = sizeof(hostrcb->hcam) & 0xff;
805 970
806 ioarcb->read_data_transfer_length = cpu_to_be32(sizeof(hostrcb->hcam)); 971 ipr_init_ioadl(ipr_cmd, hostrcb->hostrcb_dma,
807 ioarcb->read_ioadl_len = cpu_to_be32(sizeof(struct ipr_ioadl_desc)); 972 sizeof(hostrcb->hcam), IPR_IOADL_FLAGS_READ_LAST);
808 ipr_cmd->ioadl[0].flags_and_data_len =
809 cpu_to_be32(IPR_IOADL_FLAGS_READ_LAST | sizeof(hostrcb->hcam));
810 ipr_cmd->ioadl[0].address = cpu_to_be32(hostrcb->hostrcb_dma);
811 973
812 if (type == IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE) 974 if (type == IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE)
813 ipr_cmd->done = ipr_process_ccn; 975 ipr_cmd->done = ipr_process_ccn;
@@ -817,22 +979,54 @@ static void ipr_send_hcam(struct ipr_ioa_cfg *ioa_cfg, u8 type,
817 ipr_trc_hook(ipr_cmd, IPR_TRACE_START, IPR_IOA_RES_ADDR); 979 ipr_trc_hook(ipr_cmd, IPR_TRACE_START, IPR_IOA_RES_ADDR);
818 980
819 mb(); 981 mb();
820 writel(be32_to_cpu(ipr_cmd->ioarcb.ioarcb_host_pci_addr), 982
821 ioa_cfg->regs.ioarrin_reg); 983 ipr_send_command(ipr_cmd);
822 } else { 984 } else {
823 list_add_tail(&hostrcb->queue, &ioa_cfg->hostrcb_free_q); 985 list_add_tail(&hostrcb->queue, &ioa_cfg->hostrcb_free_q);
824 } 986 }
825} 987}
826 988
827/** 989/**
990 * ipr_update_ata_class - Update the ata class in the resource entry
991 * @res: resource entry struct
992 * @proto: cfgte device bus protocol value
993 *
994 * Return value:
995 * none
996 **/
997static void ipr_update_ata_class(struct ipr_resource_entry *res, unsigned int proto)
998{
999 switch(proto) {
1000 case IPR_PROTO_SATA:
1001 case IPR_PROTO_SAS_STP:
1002 res->ata_class = ATA_DEV_ATA;
1003 break;
1004 case IPR_PROTO_SATA_ATAPI:
1005 case IPR_PROTO_SAS_STP_ATAPI:
1006 res->ata_class = ATA_DEV_ATAPI;
1007 break;
1008 default:
1009 res->ata_class = ATA_DEV_UNKNOWN;
1010 break;
1011 };
1012}
1013
1014/**
828 * ipr_init_res_entry - Initialize a resource entry struct. 1015 * ipr_init_res_entry - Initialize a resource entry struct.
829 * @res: resource entry struct 1016 * @res: resource entry struct
1017 * @cfgtew: config table entry wrapper struct
830 * 1018 *
831 * Return value: 1019 * Return value:
832 * none 1020 * none
833 **/ 1021 **/
834static void ipr_init_res_entry(struct ipr_resource_entry *res) 1022static void ipr_init_res_entry(struct ipr_resource_entry *res,
1023 struct ipr_config_table_entry_wrapper *cfgtew)
835{ 1024{
1025 int found = 0;
1026 unsigned int proto;
1027 struct ipr_ioa_cfg *ioa_cfg = res->ioa_cfg;
1028 struct ipr_resource_entry *gscsi_res = NULL;
1029
836 res->needs_sync_complete = 0; 1030 res->needs_sync_complete = 0;
837 res->in_erp = 0; 1031 res->in_erp = 0;
838 res->add_to_ml = 0; 1032 res->add_to_ml = 0;
@@ -840,6 +1034,205 @@ static void ipr_init_res_entry(struct ipr_resource_entry *res)
840 res->resetting_device = 0; 1034 res->resetting_device = 0;
841 res->sdev = NULL; 1035 res->sdev = NULL;
842 res->sata_port = NULL; 1036 res->sata_port = NULL;
1037
1038 if (ioa_cfg->sis64) {
1039 proto = cfgtew->u.cfgte64->proto;
1040 res->res_flags = cfgtew->u.cfgte64->res_flags;
1041 res->qmodel = IPR_QUEUEING_MODEL64(res);
1042 res->type = cfgtew->u.cfgte64->res_type & 0x0f;
1043
1044 memcpy(res->res_path, &cfgtew->u.cfgte64->res_path,
1045 sizeof(res->res_path));
1046
1047 res->bus = 0;
1048 res->lun = scsilun_to_int(&res->dev_lun);
1049
1050 if (res->type == IPR_RES_TYPE_GENERIC_SCSI) {
1051 list_for_each_entry(gscsi_res, &ioa_cfg->used_res_q, queue) {
1052 if (gscsi_res->dev_id == cfgtew->u.cfgte64->dev_id) {
1053 found = 1;
1054 res->target = gscsi_res->target;
1055 break;
1056 }
1057 }
1058 if (!found) {
1059 res->target = find_first_zero_bit(ioa_cfg->target_ids,
1060 ioa_cfg->max_devs_supported);
1061 set_bit(res->target, ioa_cfg->target_ids);
1062 }
1063
1064 memcpy(&res->dev_lun.scsi_lun, &cfgtew->u.cfgte64->lun,
1065 sizeof(res->dev_lun.scsi_lun));
1066 } else if (res->type == IPR_RES_TYPE_IOAFP) {
1067 res->bus = IPR_IOAFP_VIRTUAL_BUS;
1068 res->target = 0;
1069 } else if (res->type == IPR_RES_TYPE_ARRAY) {
1070 res->bus = IPR_ARRAY_VIRTUAL_BUS;
1071 res->target = find_first_zero_bit(ioa_cfg->array_ids,
1072 ioa_cfg->max_devs_supported);
1073 set_bit(res->target, ioa_cfg->array_ids);
1074 } else if (res->type == IPR_RES_TYPE_VOLUME_SET) {
1075 res->bus = IPR_VSET_VIRTUAL_BUS;
1076 res->target = find_first_zero_bit(ioa_cfg->vset_ids,
1077 ioa_cfg->max_devs_supported);
1078 set_bit(res->target, ioa_cfg->vset_ids);
1079 } else {
1080 res->target = find_first_zero_bit(ioa_cfg->target_ids,
1081 ioa_cfg->max_devs_supported);
1082 set_bit(res->target, ioa_cfg->target_ids);
1083 }
1084 } else {
1085 proto = cfgtew->u.cfgte->proto;
1086 res->qmodel = IPR_QUEUEING_MODEL(res);
1087 res->flags = cfgtew->u.cfgte->flags;
1088 if (res->flags & IPR_IS_IOA_RESOURCE)
1089 res->type = IPR_RES_TYPE_IOAFP;
1090 else
1091 res->type = cfgtew->u.cfgte->rsvd_subtype & 0x0f;
1092
1093 res->bus = cfgtew->u.cfgte->res_addr.bus;
1094 res->target = cfgtew->u.cfgte->res_addr.target;
1095 res->lun = cfgtew->u.cfgte->res_addr.lun;
1096 }
1097
1098 ipr_update_ata_class(res, proto);
1099}
1100
1101/**
1102 * ipr_is_same_device - Determine if two devices are the same.
1103 * @res: resource entry struct
1104 * @cfgtew: config table entry wrapper struct
1105 *
1106 * Return value:
1107 * 1 if the devices are the same / 0 otherwise
1108 **/
1109static int ipr_is_same_device(struct ipr_resource_entry *res,
1110 struct ipr_config_table_entry_wrapper *cfgtew)
1111{
1112 if (res->ioa_cfg->sis64) {
1113 if (!memcmp(&res->dev_id, &cfgtew->u.cfgte64->dev_id,
1114 sizeof(cfgtew->u.cfgte64->dev_id)) &&
1115 !memcmp(&res->lun, &cfgtew->u.cfgte64->lun,
1116 sizeof(cfgtew->u.cfgte64->lun))) {
1117 return 1;
1118 }
1119 } else {
1120 if (res->bus == cfgtew->u.cfgte->res_addr.bus &&
1121 res->target == cfgtew->u.cfgte->res_addr.target &&
1122 res->lun == cfgtew->u.cfgte->res_addr.lun)
1123 return 1;
1124 }
1125
1126 return 0;
1127}
1128
1129/**
1130 * ipr_format_resource_path - Format the resource path for printing.
1131 * @res_path: resource path
1132 * @buf: buffer
1133 *
1134 * Return value:
1135 * pointer to buffer
1136 **/
1137static char *ipr_format_resource_path(u8 *res_path, char *buffer)
1138{
1139 int i;
1140
1141 sprintf(buffer, "%02X", res_path[0]);
1142 for (i=1; res_path[i] != 0xff; i++)
1143 sprintf(buffer, "%s-%02X", buffer, res_path[i]);
1144
1145 return buffer;
1146}
1147
1148/**
1149 * ipr_update_res_entry - Update the resource entry.
1150 * @res: resource entry struct
1151 * @cfgtew: config table entry wrapper struct
1152 *
1153 * Return value:
1154 * none
1155 **/
1156static void ipr_update_res_entry(struct ipr_resource_entry *res,
1157 struct ipr_config_table_entry_wrapper *cfgtew)
1158{
1159 char buffer[IPR_MAX_RES_PATH_LENGTH];
1160 unsigned int proto;
1161 int new_path = 0;
1162
1163 if (res->ioa_cfg->sis64) {
1164 res->flags = cfgtew->u.cfgte64->flags;
1165 res->res_flags = cfgtew->u.cfgte64->res_flags;
1166 res->type = cfgtew->u.cfgte64->res_type & 0x0f;
1167
1168 memcpy(&res->std_inq_data, &cfgtew->u.cfgte64->std_inq_data,
1169 sizeof(struct ipr_std_inq_data));
1170
1171 res->qmodel = IPR_QUEUEING_MODEL64(res);
1172 proto = cfgtew->u.cfgte64->proto;
1173 res->res_handle = cfgtew->u.cfgte64->res_handle;
1174 res->dev_id = cfgtew->u.cfgte64->dev_id;
1175
1176 memcpy(&res->dev_lun.scsi_lun, &cfgtew->u.cfgte64->lun,
1177 sizeof(res->dev_lun.scsi_lun));
1178
1179 if (memcmp(res->res_path, &cfgtew->u.cfgte64->res_path,
1180 sizeof(res->res_path))) {
1181 memcpy(res->res_path, &cfgtew->u.cfgte64->res_path,
1182 sizeof(res->res_path));
1183 new_path = 1;
1184 }
1185
1186 if (res->sdev && new_path)
1187 sdev_printk(KERN_INFO, res->sdev, "Resource path: %s\n",
1188 ipr_format_resource_path(&res->res_path[0], &buffer[0]));
1189 } else {
1190 res->flags = cfgtew->u.cfgte->flags;
1191 if (res->flags & IPR_IS_IOA_RESOURCE)
1192 res->type = IPR_RES_TYPE_IOAFP;
1193 else
1194 res->type = cfgtew->u.cfgte->rsvd_subtype & 0x0f;
1195
1196 memcpy(&res->std_inq_data, &cfgtew->u.cfgte->std_inq_data,
1197 sizeof(struct ipr_std_inq_data));
1198
1199 res->qmodel = IPR_QUEUEING_MODEL(res);
1200 proto = cfgtew->u.cfgte->proto;
1201 res->res_handle = cfgtew->u.cfgte->res_handle;
1202 }
1203
1204 ipr_update_ata_class(res, proto);
1205}
1206
1207/**
1208 * ipr_clear_res_target - Clear the bit in the bit map representing the target
1209 * for the resource.
1210 * @res: resource entry struct
1211 * @cfgtew: config table entry wrapper struct
1212 *
1213 * Return value:
1214 * none
1215 **/
1216static void ipr_clear_res_target(struct ipr_resource_entry *res)
1217{
1218 struct ipr_resource_entry *gscsi_res = NULL;
1219 struct ipr_ioa_cfg *ioa_cfg = res->ioa_cfg;
1220
1221 if (!ioa_cfg->sis64)
1222 return;
1223
1224 if (res->bus == IPR_ARRAY_VIRTUAL_BUS)
1225 clear_bit(res->target, ioa_cfg->array_ids);
1226 else if (res->bus == IPR_VSET_VIRTUAL_BUS)
1227 clear_bit(res->target, ioa_cfg->vset_ids);
1228 else if (res->bus == 0 && res->type == IPR_RES_TYPE_GENERIC_SCSI) {
1229 list_for_each_entry(gscsi_res, &ioa_cfg->used_res_q, queue)
1230 if (gscsi_res->dev_id == res->dev_id && gscsi_res != res)
1231 return;
1232 clear_bit(res->target, ioa_cfg->target_ids);
1233
1234 } else if (res->bus == 0)
1235 clear_bit(res->target, ioa_cfg->target_ids);
843} 1236}
844 1237
845/** 1238/**
@@ -851,17 +1244,24 @@ static void ipr_init_res_entry(struct ipr_resource_entry *res)
851 * none 1244 * none
852 **/ 1245 **/
853static void ipr_handle_config_change(struct ipr_ioa_cfg *ioa_cfg, 1246static void ipr_handle_config_change(struct ipr_ioa_cfg *ioa_cfg,
854 struct ipr_hostrcb *hostrcb) 1247 struct ipr_hostrcb *hostrcb)
855{ 1248{
856 struct ipr_resource_entry *res = NULL; 1249 struct ipr_resource_entry *res = NULL;
857 struct ipr_config_table_entry *cfgte; 1250 struct ipr_config_table_entry_wrapper cfgtew;
1251 __be32 cc_res_handle;
1252
858 u32 is_ndn = 1; 1253 u32 is_ndn = 1;
859 1254
860 cfgte = &hostrcb->hcam.u.ccn.cfgte; 1255 if (ioa_cfg->sis64) {
1256 cfgtew.u.cfgte64 = &hostrcb->hcam.u.ccn.u.cfgte64;
1257 cc_res_handle = cfgtew.u.cfgte64->res_handle;
1258 } else {
1259 cfgtew.u.cfgte = &hostrcb->hcam.u.ccn.u.cfgte;
1260 cc_res_handle = cfgtew.u.cfgte->res_handle;
1261 }
861 1262
862 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) { 1263 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
863 if (!memcmp(&res->cfgte.res_addr, &cfgte->res_addr, 1264 if (res->res_handle == cc_res_handle) {
864 sizeof(cfgte->res_addr))) {
865 is_ndn = 0; 1265 is_ndn = 0;
866 break; 1266 break;
867 } 1267 }
@@ -879,20 +1279,22 @@ static void ipr_handle_config_change(struct ipr_ioa_cfg *ioa_cfg,
879 struct ipr_resource_entry, queue); 1279 struct ipr_resource_entry, queue);
880 1280
881 list_del(&res->queue); 1281 list_del(&res->queue);
882 ipr_init_res_entry(res); 1282 ipr_init_res_entry(res, &cfgtew);
883 list_add_tail(&res->queue, &ioa_cfg->used_res_q); 1283 list_add_tail(&res->queue, &ioa_cfg->used_res_q);
884 } 1284 }
885 1285
886 memcpy(&res->cfgte, cfgte, sizeof(struct ipr_config_table_entry)); 1286 ipr_update_res_entry(res, &cfgtew);
887 1287
888 if (hostrcb->hcam.notify_type == IPR_HOST_RCB_NOTIF_TYPE_REM_ENTRY) { 1288 if (hostrcb->hcam.notify_type == IPR_HOST_RCB_NOTIF_TYPE_REM_ENTRY) {
889 if (res->sdev) { 1289 if (res->sdev) {
890 res->del_from_ml = 1; 1290 res->del_from_ml = 1;
891 res->cfgte.res_handle = IPR_INVALID_RES_HANDLE; 1291 res->res_handle = IPR_INVALID_RES_HANDLE;
892 if (ioa_cfg->allow_ml_add_del) 1292 if (ioa_cfg->allow_ml_add_del)
893 schedule_work(&ioa_cfg->work_q); 1293 schedule_work(&ioa_cfg->work_q);
894 } else 1294 } else {
1295 ipr_clear_res_target(res);
895 list_move_tail(&res->queue, &ioa_cfg->free_res_q); 1296 list_move_tail(&res->queue, &ioa_cfg->free_res_q);
1297 }
896 } else if (!res->sdev) { 1298 } else if (!res->sdev) {
897 res->add_to_ml = 1; 1299 res->add_to_ml = 1;
898 if (ioa_cfg->allow_ml_add_del) 1300 if (ioa_cfg->allow_ml_add_del)
@@ -1044,8 +1446,12 @@ static void ipr_log_ext_vpd(struct ipr_ext_vpd *vpd)
1044static void ipr_log_enhanced_cache_error(struct ipr_ioa_cfg *ioa_cfg, 1446static void ipr_log_enhanced_cache_error(struct ipr_ioa_cfg *ioa_cfg,
1045 struct ipr_hostrcb *hostrcb) 1447 struct ipr_hostrcb *hostrcb)
1046{ 1448{
1047 struct ipr_hostrcb_type_12_error *error = 1449 struct ipr_hostrcb_type_12_error *error;
1048 &hostrcb->hcam.u.error.u.type_12_error; 1450
1451 if (ioa_cfg->sis64)
1452 error = &hostrcb->hcam.u.error64.u.type_12_error;
1453 else
1454 error = &hostrcb->hcam.u.error.u.type_12_error;
1049 1455
1050 ipr_err("-----Current Configuration-----\n"); 1456 ipr_err("-----Current Configuration-----\n");
1051 ipr_err("Cache Directory Card Information:\n"); 1457 ipr_err("Cache Directory Card Information:\n");
@@ -1138,6 +1544,48 @@ static void ipr_log_enhanced_config_error(struct ipr_ioa_cfg *ioa_cfg,
1138} 1544}
1139 1545
1140/** 1546/**
1547 * ipr_log_sis64_config_error - Log a device error.
1548 * @ioa_cfg: ioa config struct
1549 * @hostrcb: hostrcb struct
1550 *
1551 * Return value:
1552 * none
1553 **/
1554static void ipr_log_sis64_config_error(struct ipr_ioa_cfg *ioa_cfg,
1555 struct ipr_hostrcb *hostrcb)
1556{
1557 int errors_logged, i;
1558 struct ipr_hostrcb64_device_data_entry_enhanced *dev_entry;
1559 struct ipr_hostrcb_type_23_error *error;
1560 char buffer[IPR_MAX_RES_PATH_LENGTH];
1561
1562 error = &hostrcb->hcam.u.error64.u.type_23_error;
1563 errors_logged = be32_to_cpu(error->errors_logged);
1564
1565 ipr_err("Device Errors Detected/Logged: %d/%d\n",
1566 be32_to_cpu(error->errors_detected), errors_logged);
1567
1568 dev_entry = error->dev;
1569
1570 for (i = 0; i < errors_logged; i++, dev_entry++) {
1571 ipr_err_separator;
1572
1573 ipr_err("Device %d : %s", i + 1,
1574 ipr_format_resource_path(&dev_entry->res_path[0], &buffer[0]));
1575 ipr_log_ext_vpd(&dev_entry->vpd);
1576
1577 ipr_err("-----New Device Information-----\n");
1578 ipr_log_ext_vpd(&dev_entry->new_vpd);
1579
1580 ipr_err("Cache Directory Card Information:\n");
1581 ipr_log_ext_vpd(&dev_entry->ioa_last_with_dev_vpd);
1582
1583 ipr_err("Adapter Card Information:\n");
1584 ipr_log_ext_vpd(&dev_entry->cfc_last_with_dev_vpd);
1585 }
1586}
1587
1588/**
1141 * ipr_log_config_error - Log a configuration error. 1589 * ipr_log_config_error - Log a configuration error.
1142 * @ioa_cfg: ioa config struct 1590 * @ioa_cfg: ioa config struct
1143 * @hostrcb: hostrcb struct 1591 * @hostrcb: hostrcb struct
@@ -1331,7 +1779,11 @@ static void ipr_log_enhanced_dual_ioa_error(struct ipr_ioa_cfg *ioa_cfg,
1331{ 1779{
1332 struct ipr_hostrcb_type_17_error *error; 1780 struct ipr_hostrcb_type_17_error *error;
1333 1781
1334 error = &hostrcb->hcam.u.error.u.type_17_error; 1782 if (ioa_cfg->sis64)
1783 error = &hostrcb->hcam.u.error64.u.type_17_error;
1784 else
1785 error = &hostrcb->hcam.u.error.u.type_17_error;
1786
1335 error->failure_reason[sizeof(error->failure_reason) - 1] = '\0'; 1787 error->failure_reason[sizeof(error->failure_reason) - 1] = '\0';
1336 strim(error->failure_reason); 1788 strim(error->failure_reason);
1337 1789
@@ -1438,6 +1890,42 @@ static void ipr_log_fabric_path(struct ipr_hostrcb *hostrcb,
1438 fabric->ioa_port, fabric->cascaded_expander, fabric->phy); 1890 fabric->ioa_port, fabric->cascaded_expander, fabric->phy);
1439} 1891}
1440 1892
1893/**
1894 * ipr_log64_fabric_path - Log a fabric path error
1895 * @hostrcb: hostrcb struct
1896 * @fabric: fabric descriptor
1897 *
1898 * Return value:
1899 * none
1900 **/
1901static void ipr_log64_fabric_path(struct ipr_hostrcb *hostrcb,
1902 struct ipr_hostrcb64_fabric_desc *fabric)
1903{
1904 int i, j;
1905 u8 path_state = fabric->path_state;
1906 u8 active = path_state & IPR_PATH_ACTIVE_MASK;
1907 u8 state = path_state & IPR_PATH_STATE_MASK;
1908 char buffer[IPR_MAX_RES_PATH_LENGTH];
1909
1910 for (i = 0; i < ARRAY_SIZE(path_active_desc); i++) {
1911 if (path_active_desc[i].active != active)
1912 continue;
1913
1914 for (j = 0; j < ARRAY_SIZE(path_state_desc); j++) {
1915 if (path_state_desc[j].state != state)
1916 continue;
1917
1918 ipr_hcam_err(hostrcb, "%s %s: Resource Path=%s\n",
1919 path_active_desc[i].desc, path_state_desc[j].desc,
1920 ipr_format_resource_path(&fabric->res_path[0], &buffer[0]));
1921 return;
1922 }
1923 }
1924
1925 ipr_err("Path state=%02X Resource Path=%s\n", path_state,
1926 ipr_format_resource_path(&fabric->res_path[0], &buffer[0]));
1927}
1928
1441static const struct { 1929static const struct {
1442 u8 type; 1930 u8 type;
1443 char *desc; 1931 char *desc;
@@ -1547,6 +2035,49 @@ static void ipr_log_path_elem(struct ipr_hostrcb *hostrcb,
1547} 2035}
1548 2036
1549/** 2037/**
2038 * ipr_log64_path_elem - Log a fabric path element.
2039 * @hostrcb: hostrcb struct
2040 * @cfg: fabric path element struct
2041 *
2042 * Return value:
2043 * none
2044 **/
2045static void ipr_log64_path_elem(struct ipr_hostrcb *hostrcb,
2046 struct ipr_hostrcb64_config_element *cfg)
2047{
2048 int i, j;
2049 u8 desc_id = cfg->descriptor_id & IPR_DESCRIPTOR_MASK;
2050 u8 type = cfg->type_status & IPR_PATH_CFG_TYPE_MASK;
2051 u8 status = cfg->type_status & IPR_PATH_CFG_STATUS_MASK;
2052 char buffer[IPR_MAX_RES_PATH_LENGTH];
2053
2054 if (type == IPR_PATH_CFG_NOT_EXIST || desc_id != IPR_DESCRIPTOR_SIS64)
2055 return;
2056
2057 for (i = 0; i < ARRAY_SIZE(path_type_desc); i++) {
2058 if (path_type_desc[i].type != type)
2059 continue;
2060
2061 for (j = 0; j < ARRAY_SIZE(path_status_desc); j++) {
2062 if (path_status_desc[j].status != status)
2063 continue;
2064
2065 ipr_hcam_err(hostrcb, "%s %s: Resource Path=%s, Link rate=%s, WWN=%08X%08X\n",
2066 path_status_desc[j].desc, path_type_desc[i].desc,
2067 ipr_format_resource_path(&cfg->res_path[0], &buffer[0]),
2068 link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2069 be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
2070 return;
2071 }
2072 }
2073 ipr_hcam_err(hostrcb, "Path element=%02X: Resource Path=%s, Link rate=%s "
2074 "WWN=%08X%08X\n", cfg->type_status,
2075 ipr_format_resource_path(&cfg->res_path[0], &buffer[0]),
2076 link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2077 be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
2078}
2079
2080/**
1550 * ipr_log_fabric_error - Log a fabric error. 2081 * ipr_log_fabric_error - Log a fabric error.
1551 * @ioa_cfg: ioa config struct 2082 * @ioa_cfg: ioa config struct
1552 * @hostrcb: hostrcb struct 2083 * @hostrcb: hostrcb struct
@@ -1584,6 +2115,96 @@ static void ipr_log_fabric_error(struct ipr_ioa_cfg *ioa_cfg,
1584} 2115}
1585 2116
1586/** 2117/**
2118 * ipr_log_sis64_array_error - Log a sis64 array error.
2119 * @ioa_cfg: ioa config struct
2120 * @hostrcb: hostrcb struct
2121 *
2122 * Return value:
2123 * none
2124 **/
2125static void ipr_log_sis64_array_error(struct ipr_ioa_cfg *ioa_cfg,
2126 struct ipr_hostrcb *hostrcb)
2127{
2128 int i, num_entries;
2129 struct ipr_hostrcb_type_24_error *error;
2130 struct ipr_hostrcb64_array_data_entry *array_entry;
2131 char buffer[IPR_MAX_RES_PATH_LENGTH];
2132 const u8 zero_sn[IPR_SERIAL_NUM_LEN] = { [0 ... IPR_SERIAL_NUM_LEN-1] = '0' };
2133
2134 error = &hostrcb->hcam.u.error64.u.type_24_error;
2135
2136 ipr_err_separator;
2137
2138 ipr_err("RAID %s Array Configuration: %s\n",
2139 error->protection_level,
2140 ipr_format_resource_path(&error->last_res_path[0], &buffer[0]));
2141
2142 ipr_err_separator;
2143
2144 array_entry = error->array_member;
2145 num_entries = min_t(u32, be32_to_cpu(error->num_entries),
2146 sizeof(error->array_member));
2147
2148 for (i = 0; i < num_entries; i++, array_entry++) {
2149
2150 if (!memcmp(array_entry->vpd.vpd.sn, zero_sn, IPR_SERIAL_NUM_LEN))
2151 continue;
2152
2153 if (error->exposed_mode_adn == i)
2154 ipr_err("Exposed Array Member %d:\n", i);
2155 else
2156 ipr_err("Array Member %d:\n", i);
2157
2158 ipr_err("Array Member %d:\n", i);
2159 ipr_log_ext_vpd(&array_entry->vpd);
2160 ipr_err("Current Location: %s",
2161 ipr_format_resource_path(&array_entry->res_path[0], &buffer[0]));
2162 ipr_err("Expected Location: %s",
2163 ipr_format_resource_path(&array_entry->expected_res_path[0], &buffer[0]));
2164
2165 ipr_err_separator;
2166 }
2167}
2168
2169/**
2170 * ipr_log_sis64_fabric_error - Log a sis64 fabric error.
2171 * @ioa_cfg: ioa config struct
2172 * @hostrcb: hostrcb struct
2173 *
2174 * Return value:
2175 * none
2176 **/
2177static void ipr_log_sis64_fabric_error(struct ipr_ioa_cfg *ioa_cfg,
2178 struct ipr_hostrcb *hostrcb)
2179{
2180 struct ipr_hostrcb_type_30_error *error;
2181 struct ipr_hostrcb64_fabric_desc *fabric;
2182 struct ipr_hostrcb64_config_element *cfg;
2183 int i, add_len;
2184
2185 error = &hostrcb->hcam.u.error64.u.type_30_error;
2186
2187 error->failure_reason[sizeof(error->failure_reason) - 1] = '\0';
2188 ipr_hcam_err(hostrcb, "%s\n", error->failure_reason);
2189
2190 add_len = be32_to_cpu(hostrcb->hcam.length) -
2191 (offsetof(struct ipr_hostrcb64_error, u) +
2192 offsetof(struct ipr_hostrcb_type_30_error, desc));
2193
2194 for (i = 0, fabric = error->desc; i < error->num_entries; i++) {
2195 ipr_log64_fabric_path(hostrcb, fabric);
2196 for_each_fabric_cfg(fabric, cfg)
2197 ipr_log64_path_elem(hostrcb, cfg);
2198
2199 add_len -= be16_to_cpu(fabric->length);
2200 fabric = (struct ipr_hostrcb64_fabric_desc *)
2201 ((unsigned long)fabric + be16_to_cpu(fabric->length));
2202 }
2203
2204 ipr_log_hex_data(ioa_cfg, (u32 *)fabric, add_len);
2205}
2206
2207/**
1587 * ipr_log_generic_error - Log an adapter error. 2208 * ipr_log_generic_error - Log an adapter error.
1588 * @ioa_cfg: ioa config struct 2209 * @ioa_cfg: ioa config struct
1589 * @hostrcb: hostrcb struct 2210 * @hostrcb: hostrcb struct
@@ -1642,13 +2263,16 @@ static void ipr_handle_log_data(struct ipr_ioa_cfg *ioa_cfg,
1642 if (hostrcb->hcam.notifications_lost == IPR_HOST_RCB_NOTIFICATIONS_LOST) 2263 if (hostrcb->hcam.notifications_lost == IPR_HOST_RCB_NOTIFICATIONS_LOST)
1643 dev_err(&ioa_cfg->pdev->dev, "Error notifications lost\n"); 2264 dev_err(&ioa_cfg->pdev->dev, "Error notifications lost\n");
1644 2265
1645 ioasc = be32_to_cpu(hostrcb->hcam.u.error.failing_dev_ioasc); 2266 if (ioa_cfg->sis64)
2267 ioasc = be32_to_cpu(hostrcb->hcam.u.error64.fd_ioasc);
2268 else
2269 ioasc = be32_to_cpu(hostrcb->hcam.u.error.fd_ioasc);
1646 2270
1647 if (ioasc == IPR_IOASC_BUS_WAS_RESET || 2271 if (!ioa_cfg->sis64 && (ioasc == IPR_IOASC_BUS_WAS_RESET ||
1648 ioasc == IPR_IOASC_BUS_WAS_RESET_BY_OTHER) { 2272 ioasc == IPR_IOASC_BUS_WAS_RESET_BY_OTHER)) {
1649 /* Tell the midlayer we had a bus reset so it will handle the UA properly */ 2273 /* Tell the midlayer we had a bus reset so it will handle the UA properly */
1650 scsi_report_bus_reset(ioa_cfg->host, 2274 scsi_report_bus_reset(ioa_cfg->host,
1651 hostrcb->hcam.u.error.failing_dev_res_addr.bus); 2275 hostrcb->hcam.u.error.fd_res_addr.bus);
1652 } 2276 }
1653 2277
1654 error_index = ipr_get_error(ioasc); 2278 error_index = ipr_get_error(ioasc);
@@ -1696,6 +2320,16 @@ static void ipr_handle_log_data(struct ipr_ioa_cfg *ioa_cfg,
1696 case IPR_HOST_RCB_OVERLAY_ID_20: 2320 case IPR_HOST_RCB_OVERLAY_ID_20:
1697 ipr_log_fabric_error(ioa_cfg, hostrcb); 2321 ipr_log_fabric_error(ioa_cfg, hostrcb);
1698 break; 2322 break;
2323 case IPR_HOST_RCB_OVERLAY_ID_23:
2324 ipr_log_sis64_config_error(ioa_cfg, hostrcb);
2325 break;
2326 case IPR_HOST_RCB_OVERLAY_ID_24:
2327 case IPR_HOST_RCB_OVERLAY_ID_26:
2328 ipr_log_sis64_array_error(ioa_cfg, hostrcb);
2329 break;
2330 case IPR_HOST_RCB_OVERLAY_ID_30:
2331 ipr_log_sis64_fabric_error(ioa_cfg, hostrcb);
2332 break;
1699 case IPR_HOST_RCB_OVERLAY_ID_1: 2333 case IPR_HOST_RCB_OVERLAY_ID_1:
1700 case IPR_HOST_RCB_OVERLAY_ID_DEFAULT: 2334 case IPR_HOST_RCB_OVERLAY_ID_DEFAULT:
1701 default: 2335 default:
@@ -1720,7 +2354,12 @@ static void ipr_process_error(struct ipr_cmnd *ipr_cmd)
1720 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; 2354 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
1721 struct ipr_hostrcb *hostrcb = ipr_cmd->u.hostrcb; 2355 struct ipr_hostrcb *hostrcb = ipr_cmd->u.hostrcb;
1722 u32 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc); 2356 u32 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc);
1723 u32 fd_ioasc = be32_to_cpu(hostrcb->hcam.u.error.failing_dev_ioasc); 2357 u32 fd_ioasc;
2358
2359 if (ioa_cfg->sis64)
2360 fd_ioasc = be32_to_cpu(hostrcb->hcam.u.error64.fd_ioasc);
2361 else
2362 fd_ioasc = be32_to_cpu(hostrcb->hcam.u.error.fd_ioasc);
1724 2363
1725 list_del(&hostrcb->queue); 2364 list_del(&hostrcb->queue);
1726 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q); 2365 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
@@ -1845,12 +2484,14 @@ static const struct ipr_ses_table_entry *
1845ipr_find_ses_entry(struct ipr_resource_entry *res) 2484ipr_find_ses_entry(struct ipr_resource_entry *res)
1846{ 2485{
1847 int i, j, matches; 2486 int i, j, matches;
2487 struct ipr_std_inq_vpids *vpids;
1848 const struct ipr_ses_table_entry *ste = ipr_ses_table; 2488 const struct ipr_ses_table_entry *ste = ipr_ses_table;
1849 2489
1850 for (i = 0; i < ARRAY_SIZE(ipr_ses_table); i++, ste++) { 2490 for (i = 0; i < ARRAY_SIZE(ipr_ses_table); i++, ste++) {
1851 for (j = 0, matches = 0; j < IPR_PROD_ID_LEN; j++) { 2491 for (j = 0, matches = 0; j < IPR_PROD_ID_LEN; j++) {
1852 if (ste->compare_product_id_byte[j] == 'X') { 2492 if (ste->compare_product_id_byte[j] == 'X') {
1853 if (res->cfgte.std_inq_data.vpids.product_id[j] == ste->product_id[j]) 2493 vpids = &res->std_inq_data.vpids;
2494 if (vpids->product_id[j] == ste->product_id[j])
1854 matches++; 2495 matches++;
1855 else 2496 else
1856 break; 2497 break;
@@ -1885,10 +2526,10 @@ static u32 ipr_get_max_scsi_speed(struct ipr_ioa_cfg *ioa_cfg, u8 bus, u8 bus_wi
1885 2526
1886 /* Loop through each config table entry in the config table buffer */ 2527 /* Loop through each config table entry in the config table buffer */
1887 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) { 2528 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
1888 if (!(IPR_IS_SES_DEVICE(res->cfgte.std_inq_data))) 2529 if (!(IPR_IS_SES_DEVICE(res->std_inq_data)))
1889 continue; 2530 continue;
1890 2531
1891 if (bus != res->cfgte.res_addr.bus) 2532 if (bus != res->bus)
1892 continue; 2533 continue;
1893 2534
1894 if (!(ste = ipr_find_ses_entry(res))) 2535 if (!(ste = ipr_find_ses_entry(res)))
@@ -1934,6 +2575,31 @@ static int ipr_wait_iodbg_ack(struct ipr_ioa_cfg *ioa_cfg, int max_delay)
1934} 2575}
1935 2576
1936/** 2577/**
2578 * ipr_get_sis64_dump_data_section - Dump IOA memory
2579 * @ioa_cfg: ioa config struct
2580 * @start_addr: adapter address to dump
2581 * @dest: destination kernel buffer
2582 * @length_in_words: length to dump in 4 byte words
2583 *
2584 * Return value:
2585 * 0 on success
2586 **/
2587static int ipr_get_sis64_dump_data_section(struct ipr_ioa_cfg *ioa_cfg,
2588 u32 start_addr,
2589 __be32 *dest, u32 length_in_words)
2590{
2591 int i;
2592
2593 for (i = 0; i < length_in_words; i++) {
2594 writel(start_addr+(i*4), ioa_cfg->regs.dump_addr_reg);
2595 *dest = cpu_to_be32(readl(ioa_cfg->regs.dump_data_reg));
2596 dest++;
2597 }
2598
2599 return 0;
2600}
2601
2602/**
1937 * ipr_get_ldump_data_section - Dump IOA memory 2603 * ipr_get_ldump_data_section - Dump IOA memory
1938 * @ioa_cfg: ioa config struct 2604 * @ioa_cfg: ioa config struct
1939 * @start_addr: adapter address to dump 2605 * @start_addr: adapter address to dump
@@ -1950,9 +2616,13 @@ static int ipr_get_ldump_data_section(struct ipr_ioa_cfg *ioa_cfg,
1950 volatile u32 temp_pcii_reg; 2616 volatile u32 temp_pcii_reg;
1951 int i, delay = 0; 2617 int i, delay = 0;
1952 2618
2619 if (ioa_cfg->sis64)
2620 return ipr_get_sis64_dump_data_section(ioa_cfg, start_addr,
2621 dest, length_in_words);
2622
1953 /* Write IOA interrupt reg starting LDUMP state */ 2623 /* Write IOA interrupt reg starting LDUMP state */
1954 writel((IPR_UPROCI_RESET_ALERT | IPR_UPROCI_IO_DEBUG_ALERT), 2624 writel((IPR_UPROCI_RESET_ALERT | IPR_UPROCI_IO_DEBUG_ALERT),
1955 ioa_cfg->regs.set_uproc_interrupt_reg); 2625 ioa_cfg->regs.set_uproc_interrupt_reg32);
1956 2626
1957 /* Wait for IO debug acknowledge */ 2627 /* Wait for IO debug acknowledge */
1958 if (ipr_wait_iodbg_ack(ioa_cfg, 2628 if (ipr_wait_iodbg_ack(ioa_cfg,
@@ -1971,7 +2641,7 @@ static int ipr_get_ldump_data_section(struct ipr_ioa_cfg *ioa_cfg,
1971 2641
1972 /* Signal address valid - clear IOA Reset alert */ 2642 /* Signal address valid - clear IOA Reset alert */
1973 writel(IPR_UPROCI_RESET_ALERT, 2643 writel(IPR_UPROCI_RESET_ALERT,
1974 ioa_cfg->regs.clr_uproc_interrupt_reg); 2644 ioa_cfg->regs.clr_uproc_interrupt_reg32);
1975 2645
1976 for (i = 0; i < length_in_words; i++) { 2646 for (i = 0; i < length_in_words; i++) {
1977 /* Wait for IO debug acknowledge */ 2647 /* Wait for IO debug acknowledge */
@@ -1996,10 +2666,10 @@ static int ipr_get_ldump_data_section(struct ipr_ioa_cfg *ioa_cfg,
1996 2666
1997 /* Signal end of block transfer. Set reset alert then clear IO debug ack */ 2667 /* Signal end of block transfer. Set reset alert then clear IO debug ack */
1998 writel(IPR_UPROCI_RESET_ALERT, 2668 writel(IPR_UPROCI_RESET_ALERT,
1999 ioa_cfg->regs.set_uproc_interrupt_reg); 2669 ioa_cfg->regs.set_uproc_interrupt_reg32);
2000 2670
2001 writel(IPR_UPROCI_IO_DEBUG_ALERT, 2671 writel(IPR_UPROCI_IO_DEBUG_ALERT,
2002 ioa_cfg->regs.clr_uproc_interrupt_reg); 2672 ioa_cfg->regs.clr_uproc_interrupt_reg32);
2003 2673
2004 /* Signal dump data received - Clear IO debug Ack */ 2674 /* Signal dump data received - Clear IO debug Ack */
2005 writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE, 2675 writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE,
@@ -2008,7 +2678,7 @@ static int ipr_get_ldump_data_section(struct ipr_ioa_cfg *ioa_cfg,
2008 /* Wait for IOA to signal LDUMP exit - IOA reset alert will be cleared */ 2678 /* Wait for IOA to signal LDUMP exit - IOA reset alert will be cleared */
2009 while (delay < IPR_LDUMP_MAX_SHORT_ACK_DELAY_IN_USEC) { 2679 while (delay < IPR_LDUMP_MAX_SHORT_ACK_DELAY_IN_USEC) {
2010 temp_pcii_reg = 2680 temp_pcii_reg =
2011 readl(ioa_cfg->regs.sense_uproc_interrupt_reg); 2681 readl(ioa_cfg->regs.sense_uproc_interrupt_reg32);
2012 2682
2013 if (!(temp_pcii_reg & IPR_UPROCI_RESET_ALERT)) 2683 if (!(temp_pcii_reg & IPR_UPROCI_RESET_ALERT))
2014 return 0; 2684 return 0;
@@ -2207,6 +2877,7 @@ static void ipr_get_ioa_dump(struct ipr_ioa_cfg *ioa_cfg, struct ipr_dump *dump)
2207 u32 num_entries, start_off, end_off; 2877 u32 num_entries, start_off, end_off;
2208 u32 bytes_to_copy, bytes_copied, rc; 2878 u32 bytes_to_copy, bytes_copied, rc;
2209 struct ipr_sdt *sdt; 2879 struct ipr_sdt *sdt;
2880 int valid = 1;
2210 int i; 2881 int i;
2211 2882
2212 ENTER; 2883 ENTER;
@@ -2220,7 +2891,7 @@ static void ipr_get_ioa_dump(struct ipr_ioa_cfg *ioa_cfg, struct ipr_dump *dump)
2220 2891
2221 start_addr = readl(ioa_cfg->ioa_mailbox); 2892 start_addr = readl(ioa_cfg->ioa_mailbox);
2222 2893
2223 if (!ipr_sdt_is_fmt2(start_addr)) { 2894 if (!ioa_cfg->sis64 && !ipr_sdt_is_fmt2(start_addr)) {
2224 dev_err(&ioa_cfg->pdev->dev, 2895 dev_err(&ioa_cfg->pdev->dev,
2225 "Invalid dump table format: %lx\n", start_addr); 2896 "Invalid dump table format: %lx\n", start_addr);
2226 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); 2897 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
@@ -2249,7 +2920,6 @@ static void ipr_get_ioa_dump(struct ipr_ioa_cfg *ioa_cfg, struct ipr_dump *dump)
2249 2920
2250 /* IOA Dump entry */ 2921 /* IOA Dump entry */
2251 ipr_init_dump_entry_hdr(&ioa_dump->hdr); 2922 ipr_init_dump_entry_hdr(&ioa_dump->hdr);
2252 ioa_dump->format = IPR_SDT_FMT2;
2253 ioa_dump->hdr.len = 0; 2923 ioa_dump->hdr.len = 0;
2254 ioa_dump->hdr.data_type = IPR_DUMP_DATA_TYPE_BINARY; 2924 ioa_dump->hdr.data_type = IPR_DUMP_DATA_TYPE_BINARY;
2255 ioa_dump->hdr.id = IPR_DUMP_IOA_DUMP_ID; 2925 ioa_dump->hdr.id = IPR_DUMP_IOA_DUMP_ID;
@@ -2264,7 +2934,8 @@ static void ipr_get_ioa_dump(struct ipr_ioa_cfg *ioa_cfg, struct ipr_dump *dump)
2264 sizeof(struct ipr_sdt) / sizeof(__be32)); 2934 sizeof(struct ipr_sdt) / sizeof(__be32));
2265 2935
2266 /* Smart Dump table is ready to use and the first entry is valid */ 2936 /* Smart Dump table is ready to use and the first entry is valid */
2267 if (rc || (be32_to_cpu(sdt->hdr.state) != IPR_FMT2_SDT_READY_TO_USE)) { 2937 if (rc || ((be32_to_cpu(sdt->hdr.state) != IPR_FMT3_SDT_READY_TO_USE) &&
2938 (be32_to_cpu(sdt->hdr.state) != IPR_FMT2_SDT_READY_TO_USE))) {
2268 dev_err(&ioa_cfg->pdev->dev, 2939 dev_err(&ioa_cfg->pdev->dev,
2269 "Dump of IOA failed. Dump table not valid: %d, %X.\n", 2940 "Dump of IOA failed. Dump table not valid: %d, %X.\n",
2270 rc, be32_to_cpu(sdt->hdr.state)); 2941 rc, be32_to_cpu(sdt->hdr.state));
@@ -2288,12 +2959,19 @@ static void ipr_get_ioa_dump(struct ipr_ioa_cfg *ioa_cfg, struct ipr_dump *dump)
2288 } 2959 }
2289 2960
2290 if (sdt->entry[i].flags & IPR_SDT_VALID_ENTRY) { 2961 if (sdt->entry[i].flags & IPR_SDT_VALID_ENTRY) {
2291 sdt_word = be32_to_cpu(sdt->entry[i].bar_str_offset); 2962 sdt_word = be32_to_cpu(sdt->entry[i].start_token);
2292 start_off = sdt_word & IPR_FMT2_MBX_ADDR_MASK; 2963 if (ioa_cfg->sis64)
2293 end_off = be32_to_cpu(sdt->entry[i].end_offset); 2964 bytes_to_copy = be32_to_cpu(sdt->entry[i].end_token);
2294 2965 else {
2295 if (ipr_sdt_is_fmt2(sdt_word) && sdt_word) { 2966 start_off = sdt_word & IPR_FMT2_MBX_ADDR_MASK;
2296 bytes_to_copy = end_off - start_off; 2967 end_off = be32_to_cpu(sdt->entry[i].end_token);
2968
2969 if (ipr_sdt_is_fmt2(sdt_word) && sdt_word)
2970 bytes_to_copy = end_off - start_off;
2971 else
2972 valid = 0;
2973 }
2974 if (valid) {
2297 if (bytes_to_copy > IPR_MAX_IOA_DUMP_SIZE) { 2975 if (bytes_to_copy > IPR_MAX_IOA_DUMP_SIZE) {
2298 sdt->entry[i].flags &= ~IPR_SDT_VALID_ENTRY; 2976 sdt->entry[i].flags &= ~IPR_SDT_VALID_ENTRY;
2299 continue; 2977 continue;
@@ -2422,9 +3100,9 @@ restart:
2422 3100
2423 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) { 3101 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
2424 if (res->add_to_ml) { 3102 if (res->add_to_ml) {
2425 bus = res->cfgte.res_addr.bus; 3103 bus = res->bus;
2426 target = res->cfgte.res_addr.target; 3104 target = res->target;
2427 lun = res->cfgte.res_addr.lun; 3105 lun = res->lun;
2428 res->add_to_ml = 0; 3106 res->add_to_ml = 0;
2429 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); 3107 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2430 scsi_add_device(ioa_cfg->host, bus, target, lun); 3108 scsi_add_device(ioa_cfg->host, bus, target, lun);
@@ -2478,105 +3156,6 @@ static struct bin_attribute ipr_trace_attr = {
2478}; 3156};
2479#endif 3157#endif
2480 3158
2481static const struct {
2482 enum ipr_cache_state state;
2483 char *name;
2484} cache_state [] = {
2485 { CACHE_NONE, "none" },
2486 { CACHE_DISABLED, "disabled" },
2487 { CACHE_ENABLED, "enabled" }
2488};
2489
2490/**
2491 * ipr_show_write_caching - Show the write caching attribute
2492 * @dev: device struct
2493 * @buf: buffer
2494 *
2495 * Return value:
2496 * number of bytes printed to buffer
2497 **/
2498static ssize_t ipr_show_write_caching(struct device *dev,
2499 struct device_attribute *attr, char *buf)
2500{
2501 struct Scsi_Host *shost = class_to_shost(dev);
2502 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
2503 unsigned long lock_flags = 0;
2504 int i, len = 0;
2505
2506 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2507 for (i = 0; i < ARRAY_SIZE(cache_state); i++) {
2508 if (cache_state[i].state == ioa_cfg->cache_state) {
2509 len = snprintf(buf, PAGE_SIZE, "%s\n", cache_state[i].name);
2510 break;
2511 }
2512 }
2513 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2514 return len;
2515}
2516
2517
2518/**
2519 * ipr_store_write_caching - Enable/disable adapter write cache
2520 * @dev: device struct
2521 * @buf: buffer
2522 * @count: buffer size
2523 *
2524 * This function will enable/disable adapter write cache.
2525 *
2526 * Return value:
2527 * count on success / other on failure
2528 **/
2529static ssize_t ipr_store_write_caching(struct device *dev,
2530 struct device_attribute *attr,
2531 const char *buf, size_t count)
2532{
2533 struct Scsi_Host *shost = class_to_shost(dev);
2534 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
2535 unsigned long lock_flags = 0;
2536 enum ipr_cache_state new_state = CACHE_INVALID;
2537 int i;
2538
2539 if (!capable(CAP_SYS_ADMIN))
2540 return -EACCES;
2541 if (ioa_cfg->cache_state == CACHE_NONE)
2542 return -EINVAL;
2543
2544 for (i = 0; i < ARRAY_SIZE(cache_state); i++) {
2545 if (!strncmp(cache_state[i].name, buf, strlen(cache_state[i].name))) {
2546 new_state = cache_state[i].state;
2547 break;
2548 }
2549 }
2550
2551 if (new_state != CACHE_DISABLED && new_state != CACHE_ENABLED)
2552 return -EINVAL;
2553
2554 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2555 if (ioa_cfg->cache_state == new_state) {
2556 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2557 return count;
2558 }
2559
2560 ioa_cfg->cache_state = new_state;
2561 dev_info(&ioa_cfg->pdev->dev, "%s adapter write cache.\n",
2562 new_state == CACHE_ENABLED ? "Enabling" : "Disabling");
2563 if (!ioa_cfg->in_reset_reload)
2564 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NORMAL);
2565 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2566 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
2567
2568 return count;
2569}
2570
2571static struct device_attribute ipr_ioa_cache_attr = {
2572 .attr = {
2573 .name = "write_cache",
2574 .mode = S_IRUGO | S_IWUSR,
2575 },
2576 .show = ipr_show_write_caching,
2577 .store = ipr_store_write_caching
2578};
2579
2580/** 3159/**
2581 * ipr_show_fw_version - Show the firmware version 3160 * ipr_show_fw_version - Show the firmware version
2582 * @dev: class device struct 3161 * @dev: class device struct
@@ -2976,6 +3555,37 @@ static int ipr_copy_ucode_buffer(struct ipr_sglist *sglist,
2976} 3555}
2977 3556
2978/** 3557/**
3558 * ipr_build_ucode_ioadl64 - Build a microcode download IOADL
3559 * @ipr_cmd: ipr command struct
3560 * @sglist: scatter/gather list
3561 *
3562 * Builds a microcode download IOA data list (IOADL).
3563 *
3564 **/
3565static void ipr_build_ucode_ioadl64(struct ipr_cmnd *ipr_cmd,
3566 struct ipr_sglist *sglist)
3567{
3568 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
3569 struct ipr_ioadl64_desc *ioadl64 = ipr_cmd->i.ioadl64;
3570 struct scatterlist *scatterlist = sglist->scatterlist;
3571 int i;
3572
3573 ipr_cmd->dma_use_sg = sglist->num_dma_sg;
3574 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
3575 ioarcb->data_transfer_length = cpu_to_be32(sglist->buffer_len);
3576
3577 ioarcb->ioadl_len =
3578 cpu_to_be32(sizeof(struct ipr_ioadl64_desc) * ipr_cmd->dma_use_sg);
3579 for (i = 0; i < ipr_cmd->dma_use_sg; i++) {
3580 ioadl64[i].flags = cpu_to_be32(IPR_IOADL_FLAGS_WRITE);
3581 ioadl64[i].data_len = cpu_to_be32(sg_dma_len(&scatterlist[i]));
3582 ioadl64[i].address = cpu_to_be64(sg_dma_address(&scatterlist[i]));
3583 }
3584
3585 ioadl64[i-1].flags |= cpu_to_be32(IPR_IOADL_FLAGS_LAST);
3586}
3587
3588/**
2979 * ipr_build_ucode_ioadl - Build a microcode download IOADL 3589 * ipr_build_ucode_ioadl - Build a microcode download IOADL
2980 * @ipr_cmd: ipr command struct 3590 * @ipr_cmd: ipr command struct
2981 * @sglist: scatter/gather list 3591 * @sglist: scatter/gather list
@@ -2987,14 +3597,15 @@ static void ipr_build_ucode_ioadl(struct ipr_cmnd *ipr_cmd,
2987 struct ipr_sglist *sglist) 3597 struct ipr_sglist *sglist)
2988{ 3598{
2989 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb; 3599 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
2990 struct ipr_ioadl_desc *ioadl = ipr_cmd->ioadl; 3600 struct ipr_ioadl_desc *ioadl = ipr_cmd->i.ioadl;
2991 struct scatterlist *scatterlist = sglist->scatterlist; 3601 struct scatterlist *scatterlist = sglist->scatterlist;
2992 int i; 3602 int i;
2993 3603
2994 ipr_cmd->dma_use_sg = sglist->num_dma_sg; 3604 ipr_cmd->dma_use_sg = sglist->num_dma_sg;
2995 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ; 3605 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
2996 ioarcb->write_data_transfer_length = cpu_to_be32(sglist->buffer_len); 3606 ioarcb->data_transfer_length = cpu_to_be32(sglist->buffer_len);
2997 ioarcb->write_ioadl_len = 3607
3608 ioarcb->ioadl_len =
2998 cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg); 3609 cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg);
2999 3610
3000 for (i = 0; i < ipr_cmd->dma_use_sg; i++) { 3611 for (i = 0; i < ipr_cmd->dma_use_sg; i++) {
@@ -3146,7 +3757,6 @@ static struct device_attribute *ipr_ioa_attrs[] = {
3146 &ipr_ioa_state_attr, 3757 &ipr_ioa_state_attr,
3147 &ipr_ioa_reset_attr, 3758 &ipr_ioa_reset_attr,
3148 &ipr_update_fw_attr, 3759 &ipr_update_fw_attr,
3149 &ipr_ioa_cache_attr,
3150 NULL, 3760 NULL,
3151}; 3761};
3152 3762
@@ -3450,7 +4060,7 @@ static ssize_t ipr_show_adapter_handle(struct device *dev, struct device_attribu
3450 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); 4060 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3451 res = (struct ipr_resource_entry *)sdev->hostdata; 4061 res = (struct ipr_resource_entry *)sdev->hostdata;
3452 if (res) 4062 if (res)
3453 len = snprintf(buf, PAGE_SIZE, "%08X\n", res->cfgte.res_handle); 4063 len = snprintf(buf, PAGE_SIZE, "%08X\n", res->res_handle);
3454 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); 4064 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3455 return len; 4065 return len;
3456} 4066}
@@ -3463,8 +4073,43 @@ static struct device_attribute ipr_adapter_handle_attr = {
3463 .show = ipr_show_adapter_handle 4073 .show = ipr_show_adapter_handle
3464}; 4074};
3465 4075
4076/**
4077 * ipr_show_resource_path - Show the resource path for this device.
4078 * @dev: device struct
4079 * @buf: buffer
4080 *
4081 * Return value:
4082 * number of bytes printed to buffer
4083 **/
4084static ssize_t ipr_show_resource_path(struct device *dev, struct device_attribute *attr, char *buf)
4085{
4086 struct scsi_device *sdev = to_scsi_device(dev);
4087 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
4088 struct ipr_resource_entry *res;
4089 unsigned long lock_flags = 0;
4090 ssize_t len = -ENXIO;
4091 char buffer[IPR_MAX_RES_PATH_LENGTH];
4092
4093 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4094 res = (struct ipr_resource_entry *)sdev->hostdata;
4095 if (res)
4096 len = snprintf(buf, PAGE_SIZE, "%s\n",
4097 ipr_format_resource_path(&res->res_path[0], &buffer[0]));
4098 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4099 return len;
4100}
4101
4102static struct device_attribute ipr_resource_path_attr = {
4103 .attr = {
4104 .name = "resource_path",
4105 .mode = S_IRUSR,
4106 },
4107 .show = ipr_show_resource_path
4108};
4109
3466static struct device_attribute *ipr_dev_attrs[] = { 4110static struct device_attribute *ipr_dev_attrs[] = {
3467 &ipr_adapter_handle_attr, 4111 &ipr_adapter_handle_attr,
4112 &ipr_resource_path_attr,
3468 NULL, 4113 NULL,
3469}; 4114};
3470 4115
@@ -3517,9 +4162,9 @@ static struct ipr_resource_entry *ipr_find_starget(struct scsi_target *starget)
3517 struct ipr_resource_entry *res; 4162 struct ipr_resource_entry *res;
3518 4163
3519 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) { 4164 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
3520 if ((res->cfgte.res_addr.bus == starget->channel) && 4165 if ((res->bus == starget->channel) &&
3521 (res->cfgte.res_addr.target == starget->id) && 4166 (res->target == starget->id) &&
3522 (res->cfgte.res_addr.lun == 0)) { 4167 (res->lun == 0)) {
3523 return res; 4168 return res;
3524 } 4169 }
3525 } 4170 }
@@ -3589,6 +4234,17 @@ static int ipr_target_alloc(struct scsi_target *starget)
3589static void ipr_target_destroy(struct scsi_target *starget) 4234static void ipr_target_destroy(struct scsi_target *starget)
3590{ 4235{
3591 struct ipr_sata_port *sata_port = starget->hostdata; 4236 struct ipr_sata_port *sata_port = starget->hostdata;
4237 struct Scsi_Host *shost = dev_to_shost(&starget->dev);
4238 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) shost->hostdata;
4239
4240 if (ioa_cfg->sis64) {
4241 if (starget->channel == IPR_ARRAY_VIRTUAL_BUS)
4242 clear_bit(starget->id, ioa_cfg->array_ids);
4243 else if (starget->channel == IPR_VSET_VIRTUAL_BUS)
4244 clear_bit(starget->id, ioa_cfg->vset_ids);
4245 else if (starget->channel == 0)
4246 clear_bit(starget->id, ioa_cfg->target_ids);
4247 }
3592 4248
3593 if (sata_port) { 4249 if (sata_port) {
3594 starget->hostdata = NULL; 4250 starget->hostdata = NULL;
@@ -3610,9 +4266,9 @@ static struct ipr_resource_entry *ipr_find_sdev(struct scsi_device *sdev)
3610 struct ipr_resource_entry *res; 4266 struct ipr_resource_entry *res;
3611 4267
3612 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) { 4268 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
3613 if ((res->cfgte.res_addr.bus == sdev->channel) && 4269 if ((res->bus == sdev->channel) &&
3614 (res->cfgte.res_addr.target == sdev->id) && 4270 (res->target == sdev->id) &&
3615 (res->cfgte.res_addr.lun == sdev->lun)) 4271 (res->lun == sdev->lun))
3616 return res; 4272 return res;
3617 } 4273 }
3618 4274
@@ -3661,6 +4317,7 @@ static int ipr_slave_configure(struct scsi_device *sdev)
3661 struct ipr_resource_entry *res; 4317 struct ipr_resource_entry *res;
3662 struct ata_port *ap = NULL; 4318 struct ata_port *ap = NULL;
3663 unsigned long lock_flags = 0; 4319 unsigned long lock_flags = 0;
4320 char buffer[IPR_MAX_RES_PATH_LENGTH];
3664 4321
3665 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); 4322 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3666 res = sdev->hostdata; 4323 res = sdev->hostdata;
@@ -3687,6 +4344,9 @@ static int ipr_slave_configure(struct scsi_device *sdev)
3687 ata_sas_slave_configure(sdev, ap); 4344 ata_sas_slave_configure(sdev, ap);
3688 } else 4345 } else
3689 scsi_adjust_queue_depth(sdev, 0, sdev->host->cmd_per_lun); 4346 scsi_adjust_queue_depth(sdev, 0, sdev->host->cmd_per_lun);
4347 if (ioa_cfg->sis64)
4348 sdev_printk(KERN_INFO, sdev, "Resource path: %s\n",
4349 ipr_format_resource_path(&res->res_path[0], &buffer[0]));
3690 return 0; 4350 return 0;
3691 } 4351 }
3692 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); 4352 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
@@ -3828,14 +4488,19 @@ static int ipr_device_reset(struct ipr_ioa_cfg *ioa_cfg,
3828 ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg); 4488 ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
3829 ioarcb = &ipr_cmd->ioarcb; 4489 ioarcb = &ipr_cmd->ioarcb;
3830 cmd_pkt = &ioarcb->cmd_pkt; 4490 cmd_pkt = &ioarcb->cmd_pkt;
3831 regs = &ioarcb->add_data.u.regs;
3832 4491
3833 ioarcb->res_handle = res->cfgte.res_handle; 4492 if (ipr_cmd->ioa_cfg->sis64) {
4493 regs = &ipr_cmd->i.ata_ioadl.regs;
4494 ioarcb->add_cmd_parms_offset = cpu_to_be16(sizeof(*ioarcb));
4495 } else
4496 regs = &ioarcb->u.add_data.u.regs;
4497
4498 ioarcb->res_handle = res->res_handle;
3834 cmd_pkt->request_type = IPR_RQTYPE_IOACMD; 4499 cmd_pkt->request_type = IPR_RQTYPE_IOACMD;
3835 cmd_pkt->cdb[0] = IPR_RESET_DEVICE; 4500 cmd_pkt->cdb[0] = IPR_RESET_DEVICE;
3836 if (ipr_is_gata(res)) { 4501 if (ipr_is_gata(res)) {
3837 cmd_pkt->cdb[2] = IPR_ATA_PHY_RESET; 4502 cmd_pkt->cdb[2] = IPR_ATA_PHY_RESET;
3838 ioarcb->add_cmd_parms_len = cpu_to_be32(sizeof(regs->flags)); 4503 ioarcb->add_cmd_parms_len = cpu_to_be16(sizeof(regs->flags));
3839 regs->flags |= IPR_ATA_FLAG_STATUS_ON_GOOD_COMPLETION; 4504 regs->flags |= IPR_ATA_FLAG_STATUS_ON_GOOD_COMPLETION;
3840 } 4505 }
3841 4506
@@ -3880,19 +4545,7 @@ static int ipr_sata_reset(struct ata_link *link, unsigned int *classes,
3880 res = sata_port->res; 4545 res = sata_port->res;
3881 if (res) { 4546 if (res) {
3882 rc = ipr_device_reset(ioa_cfg, res); 4547 rc = ipr_device_reset(ioa_cfg, res);
3883 switch(res->cfgte.proto) { 4548 *classes = res->ata_class;
3884 case IPR_PROTO_SATA:
3885 case IPR_PROTO_SAS_STP:
3886 *classes = ATA_DEV_ATA;
3887 break;
3888 case IPR_PROTO_SATA_ATAPI:
3889 case IPR_PROTO_SAS_STP_ATAPI:
3890 *classes = ATA_DEV_ATAPI;
3891 break;
3892 default:
3893 *classes = ATA_DEV_UNKNOWN;
3894 break;
3895 };
3896 } 4549 }
3897 4550
3898 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); 4551 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
@@ -3937,7 +4590,7 @@ static int __ipr_eh_dev_reset(struct scsi_cmnd * scsi_cmd)
3937 return FAILED; 4590 return FAILED;
3938 4591
3939 list_for_each_entry(ipr_cmd, &ioa_cfg->pending_q, queue) { 4592 list_for_each_entry(ipr_cmd, &ioa_cfg->pending_q, queue) {
3940 if (ipr_cmd->ioarcb.res_handle == res->cfgte.res_handle) { 4593 if (ipr_cmd->ioarcb.res_handle == res->res_handle) {
3941 if (ipr_cmd->scsi_cmd) 4594 if (ipr_cmd->scsi_cmd)
3942 ipr_cmd->done = ipr_scsi_eh_done; 4595 ipr_cmd->done = ipr_scsi_eh_done;
3943 if (ipr_cmd->qc) 4596 if (ipr_cmd->qc)
@@ -3959,7 +4612,7 @@ static int __ipr_eh_dev_reset(struct scsi_cmnd * scsi_cmd)
3959 spin_lock_irq(scsi_cmd->device->host->host_lock); 4612 spin_lock_irq(scsi_cmd->device->host->host_lock);
3960 4613
3961 list_for_each_entry(ipr_cmd, &ioa_cfg->pending_q, queue) { 4614 list_for_each_entry(ipr_cmd, &ioa_cfg->pending_q, queue) {
3962 if (ipr_cmd->ioarcb.res_handle == res->cfgte.res_handle) { 4615 if (ipr_cmd->ioarcb.res_handle == res->res_handle) {
3963 rc = -EIO; 4616 rc = -EIO;
3964 break; 4617 break;
3965 } 4618 }
@@ -3998,13 +4651,13 @@ static void ipr_bus_reset_done(struct ipr_cmnd *ipr_cmd)
3998 struct ipr_resource_entry *res; 4651 struct ipr_resource_entry *res;
3999 4652
4000 ENTER; 4653 ENTER;
4001 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) { 4654 if (!ioa_cfg->sis64)
4002 if (!memcmp(&res->cfgte.res_handle, &ipr_cmd->ioarcb.res_handle, 4655 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
4003 sizeof(res->cfgte.res_handle))) { 4656 if (res->res_handle == ipr_cmd->ioarcb.res_handle) {
4004 scsi_report_bus_reset(ioa_cfg->host, res->cfgte.res_addr.bus); 4657 scsi_report_bus_reset(ioa_cfg->host, res->bus);
4005 break; 4658 break;
4659 }
4006 } 4660 }
4007 }
4008 4661
4009 /* 4662 /*
4010 * If abort has not completed, indicate the reset has, else call the 4663 * If abort has not completed, indicate the reset has, else call the
@@ -4102,7 +4755,7 @@ static int ipr_cancel_op(struct scsi_cmnd * scsi_cmd)
4102 return SUCCESS; 4755 return SUCCESS;
4103 4756
4104 ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg); 4757 ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
4105 ipr_cmd->ioarcb.res_handle = res->cfgte.res_handle; 4758 ipr_cmd->ioarcb.res_handle = res->res_handle;
4106 cmd_pkt = &ipr_cmd->ioarcb.cmd_pkt; 4759 cmd_pkt = &ipr_cmd->ioarcb.cmd_pkt;
4107 cmd_pkt->request_type = IPR_RQTYPE_IOACMD; 4760 cmd_pkt->request_type = IPR_RQTYPE_IOACMD;
4108 cmd_pkt->cdb[0] = IPR_CANCEL_ALL_REQUESTS; 4761 cmd_pkt->cdb[0] = IPR_CANCEL_ALL_REQUESTS;
@@ -4239,11 +4892,29 @@ static irqreturn_t ipr_isr(int irq, void *devp)
4239 return IRQ_NONE; 4892 return IRQ_NONE;
4240 } 4893 }
4241 4894
4242 int_mask_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg); 4895 int_mask_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg32);
4243 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg) & ~int_mask_reg; 4896 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg32) & ~int_mask_reg;
4244 4897
4245 /* If an interrupt on the adapter did not occur, ignore it */ 4898 /* If an interrupt on the adapter did not occur, ignore it.
4899 * Or in the case of SIS 64, check for a stage change interrupt.
4900 */
4246 if (unlikely((int_reg & IPR_PCII_OPER_INTERRUPTS) == 0)) { 4901 if (unlikely((int_reg & IPR_PCII_OPER_INTERRUPTS) == 0)) {
4902 if (ioa_cfg->sis64) {
4903 int_mask_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
4904 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg) & ~int_mask_reg;
4905 if (int_reg & IPR_PCII_IPL_STAGE_CHANGE) {
4906
4907 /* clear stage change */
4908 writel(IPR_PCII_IPL_STAGE_CHANGE, ioa_cfg->regs.clr_interrupt_reg);
4909 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg) & ~int_mask_reg;
4910 list_del(&ioa_cfg->reset_cmd->queue);
4911 del_timer(&ioa_cfg->reset_cmd->timer);
4912 ipr_reset_ioa_job(ioa_cfg->reset_cmd);
4913 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4914 return IRQ_HANDLED;
4915 }
4916 }
4917
4247 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); 4918 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4248 return IRQ_NONE; 4919 return IRQ_NONE;
4249 } 4920 }
@@ -4286,8 +4957,8 @@ static irqreturn_t ipr_isr(int irq, void *devp)
4286 if (ipr_cmd != NULL) { 4957 if (ipr_cmd != NULL) {
4287 /* Clear the PCI interrupt */ 4958 /* Clear the PCI interrupt */
4288 do { 4959 do {
4289 writel(IPR_PCII_HRRQ_UPDATED, ioa_cfg->regs.clr_interrupt_reg); 4960 writel(IPR_PCII_HRRQ_UPDATED, ioa_cfg->regs.clr_interrupt_reg32);
4290 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg) & ~int_mask_reg; 4961 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg32) & ~int_mask_reg;
4291 } while (int_reg & IPR_PCII_HRRQ_UPDATED && 4962 } while (int_reg & IPR_PCII_HRRQ_UPDATED &&
4292 num_hrrq++ < IPR_MAX_HRRQ_RETRIES); 4963 num_hrrq++ < IPR_MAX_HRRQ_RETRIES);
4293 4964
@@ -4309,6 +4980,53 @@ static irqreturn_t ipr_isr(int irq, void *devp)
4309} 4980}
4310 4981
4311/** 4982/**
4983 * ipr_build_ioadl64 - Build a scatter/gather list and map the buffer
4984 * @ioa_cfg: ioa config struct
4985 * @ipr_cmd: ipr command struct
4986 *
4987 * Return value:
4988 * 0 on success / -1 on failure
4989 **/
4990static int ipr_build_ioadl64(struct ipr_ioa_cfg *ioa_cfg,
4991 struct ipr_cmnd *ipr_cmd)
4992{
4993 int i, nseg;
4994 struct scatterlist *sg;
4995 u32 length;
4996 u32 ioadl_flags = 0;
4997 struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
4998 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
4999 struct ipr_ioadl64_desc *ioadl64 = ipr_cmd->i.ioadl64;
5000
5001 length = scsi_bufflen(scsi_cmd);
5002 if (!length)
5003 return 0;
5004
5005 nseg = scsi_dma_map(scsi_cmd);
5006 if (nseg < 0) {
5007 dev_err(&ioa_cfg->pdev->dev, "pci_map_sg failed!\n");
5008 return -1;
5009 }
5010
5011 ipr_cmd->dma_use_sg = nseg;
5012
5013 if (scsi_cmd->sc_data_direction == DMA_TO_DEVICE) {
5014 ioadl_flags = IPR_IOADL_FLAGS_WRITE;
5015 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
5016 } else if (scsi_cmd->sc_data_direction == DMA_FROM_DEVICE)
5017 ioadl_flags = IPR_IOADL_FLAGS_READ;
5018
5019 scsi_for_each_sg(scsi_cmd, sg, ipr_cmd->dma_use_sg, i) {
5020 ioadl64[i].flags = cpu_to_be32(ioadl_flags);
5021 ioadl64[i].data_len = cpu_to_be32(sg_dma_len(sg));
5022 ioadl64[i].address = cpu_to_be64(sg_dma_address(sg));
5023 }
5024
5025 ioadl64[i-1].flags |= cpu_to_be32(IPR_IOADL_FLAGS_LAST);
5026 return 0;
5027}
5028
5029/**
4312 * ipr_build_ioadl - Build a scatter/gather list and map the buffer 5030 * ipr_build_ioadl - Build a scatter/gather list and map the buffer
4313 * @ioa_cfg: ioa config struct 5031 * @ioa_cfg: ioa config struct
4314 * @ipr_cmd: ipr command struct 5032 * @ipr_cmd: ipr command struct
@@ -4325,7 +5043,7 @@ static int ipr_build_ioadl(struct ipr_ioa_cfg *ioa_cfg,
4325 u32 ioadl_flags = 0; 5043 u32 ioadl_flags = 0;
4326 struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd; 5044 struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
4327 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb; 5045 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
4328 struct ipr_ioadl_desc *ioadl = ipr_cmd->ioadl; 5046 struct ipr_ioadl_desc *ioadl = ipr_cmd->i.ioadl;
4329 5047
4330 length = scsi_bufflen(scsi_cmd); 5048 length = scsi_bufflen(scsi_cmd);
4331 if (!length) 5049 if (!length)
@@ -4342,8 +5060,8 @@ static int ipr_build_ioadl(struct ipr_ioa_cfg *ioa_cfg,
4342 if (scsi_cmd->sc_data_direction == DMA_TO_DEVICE) { 5060 if (scsi_cmd->sc_data_direction == DMA_TO_DEVICE) {
4343 ioadl_flags = IPR_IOADL_FLAGS_WRITE; 5061 ioadl_flags = IPR_IOADL_FLAGS_WRITE;
4344 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ; 5062 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
4345 ioarcb->write_data_transfer_length = cpu_to_be32(length); 5063 ioarcb->data_transfer_length = cpu_to_be32(length);
4346 ioarcb->write_ioadl_len = 5064 ioarcb->ioadl_len =
4347 cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg); 5065 cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg);
4348 } else if (scsi_cmd->sc_data_direction == DMA_FROM_DEVICE) { 5066 } else if (scsi_cmd->sc_data_direction == DMA_FROM_DEVICE) {
4349 ioadl_flags = IPR_IOADL_FLAGS_READ; 5067 ioadl_flags = IPR_IOADL_FLAGS_READ;
@@ -4352,11 +5070,10 @@ static int ipr_build_ioadl(struct ipr_ioa_cfg *ioa_cfg,
4352 cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg); 5070 cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg);
4353 } 5071 }
4354 5072
4355 if (ipr_cmd->dma_use_sg <= ARRAY_SIZE(ioarcb->add_data.u.ioadl)) { 5073 if (ipr_cmd->dma_use_sg <= ARRAY_SIZE(ioarcb->u.add_data.u.ioadl)) {
4356 ioadl = ioarcb->add_data.u.ioadl; 5074 ioadl = ioarcb->u.add_data.u.ioadl;
4357 ioarcb->write_ioadl_addr = 5075 ioarcb->write_ioadl_addr = cpu_to_be32((ipr_cmd->dma_addr) +
4358 cpu_to_be32(be32_to_cpu(ioarcb->ioarcb_host_pci_addr) + 5076 offsetof(struct ipr_ioarcb, u.add_data));
4359 offsetof(struct ipr_ioarcb, add_data));
4360 ioarcb->read_ioadl_addr = ioarcb->write_ioadl_addr; 5077 ioarcb->read_ioadl_addr = ioarcb->write_ioadl_addr;
4361 } 5078 }
4362 5079
@@ -4446,18 +5163,24 @@ static void ipr_reinit_ipr_cmnd_for_erp(struct ipr_cmnd *ipr_cmd)
4446{ 5163{
4447 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb; 5164 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
4448 struct ipr_ioasa *ioasa = &ipr_cmd->ioasa; 5165 struct ipr_ioasa *ioasa = &ipr_cmd->ioasa;
4449 dma_addr_t dma_addr = be32_to_cpu(ioarcb->ioarcb_host_pci_addr); 5166 dma_addr_t dma_addr = ipr_cmd->dma_addr;
4450 5167
4451 memset(&ioarcb->cmd_pkt, 0, sizeof(struct ipr_cmd_pkt)); 5168 memset(&ioarcb->cmd_pkt, 0, sizeof(struct ipr_cmd_pkt));
4452 ioarcb->write_data_transfer_length = 0; 5169 ioarcb->data_transfer_length = 0;
4453 ioarcb->read_data_transfer_length = 0; 5170 ioarcb->read_data_transfer_length = 0;
4454 ioarcb->write_ioadl_len = 0; 5171 ioarcb->ioadl_len = 0;
4455 ioarcb->read_ioadl_len = 0; 5172 ioarcb->read_ioadl_len = 0;
4456 ioasa->ioasc = 0; 5173 ioasa->ioasc = 0;
4457 ioasa->residual_data_len = 0; 5174 ioasa->residual_data_len = 0;
4458 ioarcb->write_ioadl_addr = 5175
4459 cpu_to_be32(dma_addr + offsetof(struct ipr_cmnd, ioadl)); 5176 if (ipr_cmd->ioa_cfg->sis64)
4460 ioarcb->read_ioadl_addr = ioarcb->write_ioadl_addr; 5177 ioarcb->u.sis64_addr_data.data_ioadl_addr =
5178 cpu_to_be64(dma_addr + offsetof(struct ipr_cmnd, i.ioadl64));
5179 else {
5180 ioarcb->write_ioadl_addr =
5181 cpu_to_be32(dma_addr + offsetof(struct ipr_cmnd, i.ioadl));
5182 ioarcb->read_ioadl_addr = ioarcb->write_ioadl_addr;
5183 }
4461} 5184}
4462 5185
4463/** 5186/**
@@ -4489,15 +5212,8 @@ static void ipr_erp_request_sense(struct ipr_cmnd *ipr_cmd)
4489 cmd_pkt->flags_hi |= IPR_FLAGS_HI_NO_ULEN_CHK; 5212 cmd_pkt->flags_hi |= IPR_FLAGS_HI_NO_ULEN_CHK;
4490 cmd_pkt->timeout = cpu_to_be16(IPR_REQUEST_SENSE_TIMEOUT / HZ); 5213 cmd_pkt->timeout = cpu_to_be16(IPR_REQUEST_SENSE_TIMEOUT / HZ);
4491 5214
4492 ipr_cmd->ioadl[0].flags_and_data_len = 5215 ipr_init_ioadl(ipr_cmd, ipr_cmd->sense_buffer_dma,
4493 cpu_to_be32(IPR_IOADL_FLAGS_READ_LAST | SCSI_SENSE_BUFFERSIZE); 5216 SCSI_SENSE_BUFFERSIZE, IPR_IOADL_FLAGS_READ_LAST);
4494 ipr_cmd->ioadl[0].address =
4495 cpu_to_be32(ipr_cmd->sense_buffer_dma);
4496
4497 ipr_cmd->ioarcb.read_ioadl_len =
4498 cpu_to_be32(sizeof(struct ipr_ioadl_desc));
4499 ipr_cmd->ioarcb.read_data_transfer_length =
4500 cpu_to_be32(SCSI_SENSE_BUFFERSIZE);
4501 5217
4502 ipr_do_req(ipr_cmd, ipr_erp_done, ipr_timeout, 5218 ipr_do_req(ipr_cmd, ipr_erp_done, ipr_timeout,
4503 IPR_REQUEST_SENSE_TIMEOUT * 2); 5219 IPR_REQUEST_SENSE_TIMEOUT * 2);
@@ -4893,9 +5609,9 @@ static int ipr_queuecommand(struct scsi_cmnd *scsi_cmd,
4893 5609
4894 memcpy(ioarcb->cmd_pkt.cdb, scsi_cmd->cmnd, scsi_cmd->cmd_len); 5610 memcpy(ioarcb->cmd_pkt.cdb, scsi_cmd->cmnd, scsi_cmd->cmd_len);
4895 ipr_cmd->scsi_cmd = scsi_cmd; 5611 ipr_cmd->scsi_cmd = scsi_cmd;
4896 ioarcb->res_handle = res->cfgte.res_handle; 5612 ioarcb->res_handle = res->res_handle;
4897 ipr_cmd->done = ipr_scsi_done; 5613 ipr_cmd->done = ipr_scsi_done;
4898 ipr_trc_hook(ipr_cmd, IPR_TRACE_START, IPR_GET_PHYS_LOC(res->cfgte.res_addr)); 5614 ipr_trc_hook(ipr_cmd, IPR_TRACE_START, IPR_GET_RES_PHYS_LOC(res));
4899 5615
4900 if (ipr_is_gscsi(res) || ipr_is_vset_device(res)) { 5616 if (ipr_is_gscsi(res) || ipr_is_vset_device(res)) {
4901 if (scsi_cmd->underflow == 0) 5617 if (scsi_cmd->underflow == 0)
@@ -4916,13 +5632,16 @@ static int ipr_queuecommand(struct scsi_cmnd *scsi_cmd,
4916 (!ipr_is_gscsi(res) || scsi_cmd->cmnd[0] == IPR_QUERY_RSRC_STATE)) 5632 (!ipr_is_gscsi(res) || scsi_cmd->cmnd[0] == IPR_QUERY_RSRC_STATE))
4917 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD; 5633 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
4918 5634
4919 if (likely(rc == 0)) 5635 if (likely(rc == 0)) {
4920 rc = ipr_build_ioadl(ioa_cfg, ipr_cmd); 5636 if (ioa_cfg->sis64)
5637 rc = ipr_build_ioadl64(ioa_cfg, ipr_cmd);
5638 else
5639 rc = ipr_build_ioadl(ioa_cfg, ipr_cmd);
5640 }
4921 5641
4922 if (likely(rc == 0)) { 5642 if (likely(rc == 0)) {
4923 mb(); 5643 mb();
4924 writel(be32_to_cpu(ipr_cmd->ioarcb.ioarcb_host_pci_addr), 5644 ipr_send_command(ipr_cmd);
4925 ioa_cfg->regs.ioarrin_reg);
4926 } else { 5645 } else {
4927 list_move_tail(&ipr_cmd->queue, &ioa_cfg->free_q); 5646 list_move_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
4928 return SCSI_MLQUEUE_HOST_BUSY; 5647 return SCSI_MLQUEUE_HOST_BUSY;
@@ -5035,20 +5754,9 @@ static void ipr_ata_phy_reset(struct ata_port *ap)
5035 goto out_unlock; 5754 goto out_unlock;
5036 } 5755 }
5037 5756
5038 switch(res->cfgte.proto) { 5757 ap->link.device[0].class = res->ata_class;
5039 case IPR_PROTO_SATA: 5758 if (ap->link.device[0].class == ATA_DEV_UNKNOWN)
5040 case IPR_PROTO_SAS_STP:
5041 ap->link.device[0].class = ATA_DEV_ATA;
5042 break;
5043 case IPR_PROTO_SATA_ATAPI:
5044 case IPR_PROTO_SAS_STP_ATAPI:
5045 ap->link.device[0].class = ATA_DEV_ATAPI;
5046 break;
5047 default:
5048 ap->link.device[0].class = ATA_DEV_UNKNOWN;
5049 ata_port_disable(ap); 5759 ata_port_disable(ap);
5050 break;
5051 };
5052 5760
5053out_unlock: 5761out_unlock:
5054 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags); 5762 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
@@ -5134,8 +5842,7 @@ static void ipr_sata_done(struct ipr_cmnd *ipr_cmd)
5134 ipr_dump_ioasa(ioa_cfg, ipr_cmd, res); 5842 ipr_dump_ioasa(ioa_cfg, ipr_cmd, res);
5135 5843
5136 if (be32_to_cpu(ipr_cmd->ioasa.ioasc_specific) & IPR_ATA_DEVICE_WAS_RESET) 5844 if (be32_to_cpu(ipr_cmd->ioasa.ioasc_specific) & IPR_ATA_DEVICE_WAS_RESET)
5137 scsi_report_device_reset(ioa_cfg->host, res->cfgte.res_addr.bus, 5845 scsi_report_device_reset(ioa_cfg->host, res->bus, res->target);
5138 res->cfgte.res_addr.target);
5139 5846
5140 if (IPR_IOASC_SENSE_KEY(ioasc) > RECOVERED_ERROR) 5847 if (IPR_IOASC_SENSE_KEY(ioasc) > RECOVERED_ERROR)
5141 qc->err_mask |= __ac_err_mask(ipr_cmd->ioasa.u.gata.status); 5848 qc->err_mask |= __ac_err_mask(ipr_cmd->ioasa.u.gata.status);
@@ -5146,6 +5853,52 @@ static void ipr_sata_done(struct ipr_cmnd *ipr_cmd)
5146} 5853}
5147 5854
5148/** 5855/**
5856 * ipr_build_ata_ioadl64 - Build an ATA scatter/gather list
5857 * @ipr_cmd: ipr command struct
5858 * @qc: ATA queued command
5859 *
5860 **/
5861static void ipr_build_ata_ioadl64(struct ipr_cmnd *ipr_cmd,
5862 struct ata_queued_cmd *qc)
5863{
5864 u32 ioadl_flags = 0;
5865 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
5866 struct ipr_ioadl64_desc *ioadl64 = ipr_cmd->i.ioadl64;
5867 struct ipr_ioadl64_desc *last_ioadl64 = NULL;
5868 int len = qc->nbytes;
5869 struct scatterlist *sg;
5870 unsigned int si;
5871 dma_addr_t dma_addr = ipr_cmd->dma_addr;
5872
5873 if (len == 0)
5874 return;
5875
5876 if (qc->dma_dir == DMA_TO_DEVICE) {
5877 ioadl_flags = IPR_IOADL_FLAGS_WRITE;
5878 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
5879 } else if (qc->dma_dir == DMA_FROM_DEVICE)
5880 ioadl_flags = IPR_IOADL_FLAGS_READ;
5881
5882 ioarcb->data_transfer_length = cpu_to_be32(len);
5883 ioarcb->ioadl_len =
5884 cpu_to_be32(sizeof(struct ipr_ioadl64_desc) * ipr_cmd->dma_use_sg);
5885 ioarcb->u.sis64_addr_data.data_ioadl_addr =
5886 cpu_to_be64(dma_addr + offsetof(struct ipr_cmnd, i.ata_ioadl));
5887
5888 for_each_sg(qc->sg, sg, qc->n_elem, si) {
5889 ioadl64->flags = cpu_to_be32(ioadl_flags);
5890 ioadl64->data_len = cpu_to_be32(sg_dma_len(sg));
5891 ioadl64->address = cpu_to_be64(sg_dma_address(sg));
5892
5893 last_ioadl64 = ioadl64;
5894 ioadl64++;
5895 }
5896
5897 if (likely(last_ioadl64))
5898 last_ioadl64->flags |= cpu_to_be32(IPR_IOADL_FLAGS_LAST);
5899}
5900
5901/**
5149 * ipr_build_ata_ioadl - Build an ATA scatter/gather list 5902 * ipr_build_ata_ioadl - Build an ATA scatter/gather list
5150 * @ipr_cmd: ipr command struct 5903 * @ipr_cmd: ipr command struct
5151 * @qc: ATA queued command 5904 * @qc: ATA queued command
@@ -5156,7 +5909,7 @@ static void ipr_build_ata_ioadl(struct ipr_cmnd *ipr_cmd,
5156{ 5909{
5157 u32 ioadl_flags = 0; 5910 u32 ioadl_flags = 0;
5158 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb; 5911 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
5159 struct ipr_ioadl_desc *ioadl = ipr_cmd->ioadl; 5912 struct ipr_ioadl_desc *ioadl = ipr_cmd->i.ioadl;
5160 struct ipr_ioadl_desc *last_ioadl = NULL; 5913 struct ipr_ioadl_desc *last_ioadl = NULL;
5161 int len = qc->nbytes; 5914 int len = qc->nbytes;
5162 struct scatterlist *sg; 5915 struct scatterlist *sg;
@@ -5168,8 +5921,8 @@ static void ipr_build_ata_ioadl(struct ipr_cmnd *ipr_cmd,
5168 if (qc->dma_dir == DMA_TO_DEVICE) { 5921 if (qc->dma_dir == DMA_TO_DEVICE) {
5169 ioadl_flags = IPR_IOADL_FLAGS_WRITE; 5922 ioadl_flags = IPR_IOADL_FLAGS_WRITE;
5170 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ; 5923 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
5171 ioarcb->write_data_transfer_length = cpu_to_be32(len); 5924 ioarcb->data_transfer_length = cpu_to_be32(len);
5172 ioarcb->write_ioadl_len = 5925 ioarcb->ioadl_len =
5173 cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg); 5926 cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg);
5174 } else if (qc->dma_dir == DMA_FROM_DEVICE) { 5927 } else if (qc->dma_dir == DMA_FROM_DEVICE) {
5175 ioadl_flags = IPR_IOADL_FLAGS_READ; 5928 ioadl_flags = IPR_IOADL_FLAGS_READ;
@@ -5212,25 +5965,34 @@ static unsigned int ipr_qc_issue(struct ata_queued_cmd *qc)
5212 5965
5213 ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg); 5966 ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
5214 ioarcb = &ipr_cmd->ioarcb; 5967 ioarcb = &ipr_cmd->ioarcb;
5215 regs = &ioarcb->add_data.u.regs;
5216 5968
5217 memset(&ioarcb->add_data, 0, sizeof(ioarcb->add_data)); 5969 if (ioa_cfg->sis64) {
5218 ioarcb->add_cmd_parms_len = cpu_to_be32(sizeof(ioarcb->add_data.u.regs)); 5970 regs = &ipr_cmd->i.ata_ioadl.regs;
5971 ioarcb->add_cmd_parms_offset = cpu_to_be16(sizeof(*ioarcb));
5972 } else
5973 regs = &ioarcb->u.add_data.u.regs;
5974
5975 memset(regs, 0, sizeof(*regs));
5976 ioarcb->add_cmd_parms_len = cpu_to_be16(sizeof(*regs));
5219 5977
5220 list_add_tail(&ipr_cmd->queue, &ioa_cfg->pending_q); 5978 list_add_tail(&ipr_cmd->queue, &ioa_cfg->pending_q);
5221 ipr_cmd->qc = qc; 5979 ipr_cmd->qc = qc;
5222 ipr_cmd->done = ipr_sata_done; 5980 ipr_cmd->done = ipr_sata_done;
5223 ipr_cmd->ioarcb.res_handle = res->cfgte.res_handle; 5981 ipr_cmd->ioarcb.res_handle = res->res_handle;
5224 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_ATA_PASSTHRU; 5982 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_ATA_PASSTHRU;
5225 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_NO_LINK_DESC; 5983 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_NO_LINK_DESC;
5226 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_NO_ULEN_CHK; 5984 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_NO_ULEN_CHK;
5227 ipr_cmd->dma_use_sg = qc->n_elem; 5985 ipr_cmd->dma_use_sg = qc->n_elem;
5228 5986
5229 ipr_build_ata_ioadl(ipr_cmd, qc); 5987 if (ioa_cfg->sis64)
5988 ipr_build_ata_ioadl64(ipr_cmd, qc);
5989 else
5990 ipr_build_ata_ioadl(ipr_cmd, qc);
5991
5230 regs->flags |= IPR_ATA_FLAG_STATUS_ON_GOOD_COMPLETION; 5992 regs->flags |= IPR_ATA_FLAG_STATUS_ON_GOOD_COMPLETION;
5231 ipr_copy_sata_tf(regs, &qc->tf); 5993 ipr_copy_sata_tf(regs, &qc->tf);
5232 memcpy(ioarcb->cmd_pkt.cdb, qc->cdb, IPR_MAX_CDB_LEN); 5994 memcpy(ioarcb->cmd_pkt.cdb, qc->cdb, IPR_MAX_CDB_LEN);
5233 ipr_trc_hook(ipr_cmd, IPR_TRACE_START, IPR_GET_PHYS_LOC(res->cfgte.res_addr)); 5995 ipr_trc_hook(ipr_cmd, IPR_TRACE_START, IPR_GET_RES_PHYS_LOC(res));
5234 5996
5235 switch (qc->tf.protocol) { 5997 switch (qc->tf.protocol) {
5236 case ATA_PROT_NODATA: 5998 case ATA_PROT_NODATA:
@@ -5257,8 +6019,9 @@ static unsigned int ipr_qc_issue(struct ata_queued_cmd *qc)
5257 } 6019 }
5258 6020
5259 mb(); 6021 mb();
5260 writel(be32_to_cpu(ioarcb->ioarcb_host_pci_addr), 6022
5261 ioa_cfg->regs.ioarrin_reg); 6023 ipr_send_command(ipr_cmd);
6024
5262 return 0; 6025 return 0;
5263} 6026}
5264 6027
@@ -5459,7 +6222,7 @@ static void ipr_set_sup_dev_dflt(struct ipr_supported_device *supported_dev,
5459 * ipr_set_supported_devs - Send Set Supported Devices for a device 6222 * ipr_set_supported_devs - Send Set Supported Devices for a device
5460 * @ipr_cmd: ipr command struct 6223 * @ipr_cmd: ipr command struct
5461 * 6224 *
5462 * This function send a Set Supported Devices to the adapter 6225 * This function sends a Set Supported Devices to the adapter
5463 * 6226 *
5464 * Return value: 6227 * Return value:
5465 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN 6228 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
@@ -5468,7 +6231,6 @@ static int ipr_set_supported_devs(struct ipr_cmnd *ipr_cmd)
5468{ 6231{
5469 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; 6232 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
5470 struct ipr_supported_device *supp_dev = &ioa_cfg->vpd_cbs->supp_dev; 6233 struct ipr_supported_device *supp_dev = &ioa_cfg->vpd_cbs->supp_dev;
5471 struct ipr_ioadl_desc *ioadl = ipr_cmd->ioadl;
5472 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb; 6234 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
5473 struct ipr_resource_entry *res = ipr_cmd->u.res; 6235 struct ipr_resource_entry *res = ipr_cmd->u.res;
5474 6236
@@ -5479,28 +6241,28 @@ static int ipr_set_supported_devs(struct ipr_cmnd *ipr_cmd)
5479 continue; 6241 continue;
5480 6242
5481 ipr_cmd->u.res = res; 6243 ipr_cmd->u.res = res;
5482 ipr_set_sup_dev_dflt(supp_dev, &res->cfgte.std_inq_data.vpids); 6244 ipr_set_sup_dev_dflt(supp_dev, &res->std_inq_data.vpids);
5483 6245
5484 ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE); 6246 ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
5485 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ; 6247 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
5486 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD; 6248 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
5487 6249
5488 ioarcb->cmd_pkt.cdb[0] = IPR_SET_SUPPORTED_DEVICES; 6250 ioarcb->cmd_pkt.cdb[0] = IPR_SET_SUPPORTED_DEVICES;
6251 ioarcb->cmd_pkt.cdb[1] = IPR_SET_ALL_SUPPORTED_DEVICES;
5489 ioarcb->cmd_pkt.cdb[7] = (sizeof(struct ipr_supported_device) >> 8) & 0xff; 6252 ioarcb->cmd_pkt.cdb[7] = (sizeof(struct ipr_supported_device) >> 8) & 0xff;
5490 ioarcb->cmd_pkt.cdb[8] = sizeof(struct ipr_supported_device) & 0xff; 6253 ioarcb->cmd_pkt.cdb[8] = sizeof(struct ipr_supported_device) & 0xff;
5491 6254
5492 ioadl->flags_and_data_len = cpu_to_be32(IPR_IOADL_FLAGS_WRITE_LAST | 6255 ipr_init_ioadl(ipr_cmd,
5493 sizeof(struct ipr_supported_device)); 6256 ioa_cfg->vpd_cbs_dma +
5494 ioadl->address = cpu_to_be32(ioa_cfg->vpd_cbs_dma + 6257 offsetof(struct ipr_misc_cbs, supp_dev),
5495 offsetof(struct ipr_misc_cbs, supp_dev)); 6258 sizeof(struct ipr_supported_device),
5496 ioarcb->write_ioadl_len = cpu_to_be32(sizeof(struct ipr_ioadl_desc)); 6259 IPR_IOADL_FLAGS_WRITE_LAST);
5497 ioarcb->write_data_transfer_length =
5498 cpu_to_be32(sizeof(struct ipr_supported_device));
5499 6260
5500 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, 6261 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout,
5501 IPR_SET_SUP_DEVICE_TIMEOUT); 6262 IPR_SET_SUP_DEVICE_TIMEOUT);
5502 6263
5503 ipr_cmd->job_step = ipr_set_supported_devs; 6264 if (!ioa_cfg->sis64)
6265 ipr_cmd->job_step = ipr_set_supported_devs;
5504 return IPR_RC_JOB_RETURN; 6266 return IPR_RC_JOB_RETURN;
5505 } 6267 }
5506 6268
@@ -5508,36 +6270,6 @@ static int ipr_set_supported_devs(struct ipr_cmnd *ipr_cmd)
5508} 6270}
5509 6271
5510/** 6272/**
5511 * ipr_setup_write_cache - Disable write cache if needed
5512 * @ipr_cmd: ipr command struct
5513 *
5514 * This function sets up adapters write cache to desired setting
5515 *
5516 * Return value:
5517 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
5518 **/
5519static int ipr_setup_write_cache(struct ipr_cmnd *ipr_cmd)
5520{
5521 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
5522
5523 ipr_cmd->job_step = ipr_set_supported_devs;
5524 ipr_cmd->u.res = list_entry(ioa_cfg->used_res_q.next,
5525 struct ipr_resource_entry, queue);
5526
5527 if (ioa_cfg->cache_state != CACHE_DISABLED)
5528 return IPR_RC_JOB_CONTINUE;
5529
5530 ipr_cmd->ioarcb.res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
5531 ipr_cmd->ioarcb.cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
5532 ipr_cmd->ioarcb.cmd_pkt.cdb[0] = IPR_IOA_SHUTDOWN;
5533 ipr_cmd->ioarcb.cmd_pkt.cdb[1] = IPR_SHUTDOWN_PREPARE_FOR_NORMAL;
5534
5535 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
5536
5537 return IPR_RC_JOB_RETURN;
5538}
5539
5540/**
5541 * ipr_get_mode_page - Locate specified mode page 6273 * ipr_get_mode_page - Locate specified mode page
5542 * @mode_pages: mode page buffer 6274 * @mode_pages: mode page buffer
5543 * @page_code: page code to find 6275 * @page_code: page code to find
@@ -5695,10 +6427,9 @@ static void ipr_modify_ioafp_mode_page_28(struct ipr_ioa_cfg *ioa_cfg,
5695 * none 6427 * none
5696 **/ 6428 **/
5697static void ipr_build_mode_select(struct ipr_cmnd *ipr_cmd, 6429static void ipr_build_mode_select(struct ipr_cmnd *ipr_cmd,
5698 __be32 res_handle, u8 parm, u32 dma_addr, 6430 __be32 res_handle, u8 parm,
5699 u8 xfer_len) 6431 dma_addr_t dma_addr, u8 xfer_len)
5700{ 6432{
5701 struct ipr_ioadl_desc *ioadl = ipr_cmd->ioadl;
5702 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb; 6433 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
5703 6434
5704 ioarcb->res_handle = res_handle; 6435 ioarcb->res_handle = res_handle;
@@ -5708,11 +6439,7 @@ static void ipr_build_mode_select(struct ipr_cmnd *ipr_cmd,
5708 ioarcb->cmd_pkt.cdb[1] = parm; 6439 ioarcb->cmd_pkt.cdb[1] = parm;
5709 ioarcb->cmd_pkt.cdb[4] = xfer_len; 6440 ioarcb->cmd_pkt.cdb[4] = xfer_len;
5710 6441
5711 ioadl->flags_and_data_len = 6442 ipr_init_ioadl(ipr_cmd, dma_addr, xfer_len, IPR_IOADL_FLAGS_WRITE_LAST);
5712 cpu_to_be32(IPR_IOADL_FLAGS_WRITE_LAST | xfer_len);
5713 ioadl->address = cpu_to_be32(dma_addr);
5714 ioarcb->write_ioadl_len = cpu_to_be32(sizeof(struct ipr_ioadl_desc));
5715 ioarcb->write_data_transfer_length = cpu_to_be32(xfer_len);
5716} 6443}
5717 6444
5718/** 6445/**
@@ -5742,7 +6469,9 @@ static int ipr_ioafp_mode_select_page28(struct ipr_cmnd *ipr_cmd)
5742 ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, mode_pages), 6469 ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, mode_pages),
5743 length); 6470 length);
5744 6471
5745 ipr_cmd->job_step = ipr_setup_write_cache; 6472 ipr_cmd->job_step = ipr_set_supported_devs;
6473 ipr_cmd->u.res = list_entry(ioa_cfg->used_res_q.next,
6474 struct ipr_resource_entry, queue);
5746 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT); 6475 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
5747 6476
5748 LEAVE; 6477 LEAVE;
@@ -5762,9 +6491,8 @@ static int ipr_ioafp_mode_select_page28(struct ipr_cmnd *ipr_cmd)
5762 **/ 6491 **/
5763static void ipr_build_mode_sense(struct ipr_cmnd *ipr_cmd, 6492static void ipr_build_mode_sense(struct ipr_cmnd *ipr_cmd,
5764 __be32 res_handle, 6493 __be32 res_handle,
5765 u8 parm, u32 dma_addr, u8 xfer_len) 6494 u8 parm, dma_addr_t dma_addr, u8 xfer_len)
5766{ 6495{
5767 struct ipr_ioadl_desc *ioadl = ipr_cmd->ioadl;
5768 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb; 6496 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
5769 6497
5770 ioarcb->res_handle = res_handle; 6498 ioarcb->res_handle = res_handle;
@@ -5773,11 +6501,7 @@ static void ipr_build_mode_sense(struct ipr_cmnd *ipr_cmd,
5773 ioarcb->cmd_pkt.cdb[4] = xfer_len; 6501 ioarcb->cmd_pkt.cdb[4] = xfer_len;
5774 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_SCSICDB; 6502 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_SCSICDB;
5775 6503
5776 ioadl->flags_and_data_len = 6504 ipr_init_ioadl(ipr_cmd, dma_addr, xfer_len, IPR_IOADL_FLAGS_READ_LAST);
5777 cpu_to_be32(IPR_IOADL_FLAGS_READ_LAST | xfer_len);
5778 ioadl->address = cpu_to_be32(dma_addr);
5779 ioarcb->read_ioadl_len = cpu_to_be32(sizeof(struct ipr_ioadl_desc));
5780 ioarcb->read_data_transfer_length = cpu_to_be32(xfer_len);
5781} 6505}
5782 6506
5783/** 6507/**
@@ -5815,10 +6539,13 @@ static int ipr_reset_cmd_failed(struct ipr_cmnd *ipr_cmd)
5815 **/ 6539 **/
5816static int ipr_reset_mode_sense_failed(struct ipr_cmnd *ipr_cmd) 6540static int ipr_reset_mode_sense_failed(struct ipr_cmnd *ipr_cmd)
5817{ 6541{
6542 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
5818 u32 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc); 6543 u32 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc);
5819 6544
5820 if (ioasc == IPR_IOASC_IR_INVALID_REQ_TYPE_OR_PKT) { 6545 if (ioasc == IPR_IOASC_IR_INVALID_REQ_TYPE_OR_PKT) {
5821 ipr_cmd->job_step = ipr_setup_write_cache; 6546 ipr_cmd->job_step = ipr_set_supported_devs;
6547 ipr_cmd->u.res = list_entry(ioa_cfg->used_res_q.next,
6548 struct ipr_resource_entry, queue);
5822 return IPR_RC_JOB_CONTINUE; 6549 return IPR_RC_JOB_CONTINUE;
5823 } 6550 }
5824 6551
@@ -5958,24 +6685,36 @@ static int ipr_init_res_table(struct ipr_cmnd *ipr_cmd)
5958{ 6685{
5959 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; 6686 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
5960 struct ipr_resource_entry *res, *temp; 6687 struct ipr_resource_entry *res, *temp;
5961 struct ipr_config_table_entry *cfgte; 6688 struct ipr_config_table_entry_wrapper cfgtew;
5962 int found, i; 6689 int entries, found, flag, i;
5963 LIST_HEAD(old_res); 6690 LIST_HEAD(old_res);
5964 6691
5965 ENTER; 6692 ENTER;
5966 if (ioa_cfg->cfg_table->hdr.flags & IPR_UCODE_DOWNLOAD_REQ) 6693 if (ioa_cfg->sis64)
6694 flag = ioa_cfg->u.cfg_table64->hdr64.flags;
6695 else
6696 flag = ioa_cfg->u.cfg_table->hdr.flags;
6697
6698 if (flag & IPR_UCODE_DOWNLOAD_REQ)
5967 dev_err(&ioa_cfg->pdev->dev, "Microcode download required\n"); 6699 dev_err(&ioa_cfg->pdev->dev, "Microcode download required\n");
5968 6700
5969 list_for_each_entry_safe(res, temp, &ioa_cfg->used_res_q, queue) 6701 list_for_each_entry_safe(res, temp, &ioa_cfg->used_res_q, queue)
5970 list_move_tail(&res->queue, &old_res); 6702 list_move_tail(&res->queue, &old_res);
5971 6703
5972 for (i = 0; i < ioa_cfg->cfg_table->hdr.num_entries; i++) { 6704 if (ioa_cfg->sis64)
5973 cfgte = &ioa_cfg->cfg_table->dev[i]; 6705 entries = ioa_cfg->u.cfg_table64->hdr64.num_entries;
6706 else
6707 entries = ioa_cfg->u.cfg_table->hdr.num_entries;
6708
6709 for (i = 0; i < entries; i++) {
6710 if (ioa_cfg->sis64)
6711 cfgtew.u.cfgte64 = &ioa_cfg->u.cfg_table64->dev[i];
6712 else
6713 cfgtew.u.cfgte = &ioa_cfg->u.cfg_table->dev[i];
5974 found = 0; 6714 found = 0;
5975 6715
5976 list_for_each_entry_safe(res, temp, &old_res, queue) { 6716 list_for_each_entry_safe(res, temp, &old_res, queue) {
5977 if (!memcmp(&res->cfgte.res_addr, 6717 if (ipr_is_same_device(res, &cfgtew)) {
5978 &cfgte->res_addr, sizeof(cfgte->res_addr))) {
5979 list_move_tail(&res->queue, &ioa_cfg->used_res_q); 6718 list_move_tail(&res->queue, &ioa_cfg->used_res_q);
5980 found = 1; 6719 found = 1;
5981 break; 6720 break;
@@ -5992,24 +6731,27 @@ static int ipr_init_res_table(struct ipr_cmnd *ipr_cmd)
5992 res = list_entry(ioa_cfg->free_res_q.next, 6731 res = list_entry(ioa_cfg->free_res_q.next,
5993 struct ipr_resource_entry, queue); 6732 struct ipr_resource_entry, queue);
5994 list_move_tail(&res->queue, &ioa_cfg->used_res_q); 6733 list_move_tail(&res->queue, &ioa_cfg->used_res_q);
5995 ipr_init_res_entry(res); 6734 ipr_init_res_entry(res, &cfgtew);
5996 res->add_to_ml = 1; 6735 res->add_to_ml = 1;
5997 } 6736 }
5998 6737
5999 if (found) 6738 if (found)
6000 memcpy(&res->cfgte, cfgte, sizeof(struct ipr_config_table_entry)); 6739 ipr_update_res_entry(res, &cfgtew);
6001 } 6740 }
6002 6741
6003 list_for_each_entry_safe(res, temp, &old_res, queue) { 6742 list_for_each_entry_safe(res, temp, &old_res, queue) {
6004 if (res->sdev) { 6743 if (res->sdev) {
6005 res->del_from_ml = 1; 6744 res->del_from_ml = 1;
6006 res->cfgte.res_handle = IPR_INVALID_RES_HANDLE; 6745 res->res_handle = IPR_INVALID_RES_HANDLE;
6007 list_move_tail(&res->queue, &ioa_cfg->used_res_q); 6746 list_move_tail(&res->queue, &ioa_cfg->used_res_q);
6008 } else {
6009 list_move_tail(&res->queue, &ioa_cfg->free_res_q);
6010 } 6747 }
6011 } 6748 }
6012 6749
6750 list_for_each_entry_safe(res, temp, &old_res, queue) {
6751 ipr_clear_res_target(res);
6752 list_move_tail(&res->queue, &ioa_cfg->free_res_q);
6753 }
6754
6013 if (ioa_cfg->dual_raid && ipr_dual_ioa_raid) 6755 if (ioa_cfg->dual_raid && ipr_dual_ioa_raid)
6014 ipr_cmd->job_step = ipr_ioafp_mode_sense_page24; 6756 ipr_cmd->job_step = ipr_ioafp_mode_sense_page24;
6015 else 6757 else
@@ -6033,7 +6775,6 @@ static int ipr_ioafp_query_ioa_cfg(struct ipr_cmnd *ipr_cmd)
6033{ 6775{
6034 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; 6776 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6035 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb; 6777 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
6036 struct ipr_ioadl_desc *ioadl = ipr_cmd->ioadl;
6037 struct ipr_inquiry_page3 *ucode_vpd = &ioa_cfg->vpd_cbs->page3_data; 6778 struct ipr_inquiry_page3 *ucode_vpd = &ioa_cfg->vpd_cbs->page3_data;
6038 struct ipr_inquiry_cap *cap = &ioa_cfg->vpd_cbs->cap; 6779 struct ipr_inquiry_cap *cap = &ioa_cfg->vpd_cbs->cap;
6039 6780
@@ -6047,16 +6788,11 @@ static int ipr_ioafp_query_ioa_cfg(struct ipr_cmnd *ipr_cmd)
6047 ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE); 6788 ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
6048 6789
6049 ioarcb->cmd_pkt.cdb[0] = IPR_QUERY_IOA_CONFIG; 6790 ioarcb->cmd_pkt.cdb[0] = IPR_QUERY_IOA_CONFIG;
6050 ioarcb->cmd_pkt.cdb[7] = (sizeof(struct ipr_config_table) >> 8) & 0xff; 6791 ioarcb->cmd_pkt.cdb[7] = (ioa_cfg->cfg_table_size >> 8) & 0xff;
6051 ioarcb->cmd_pkt.cdb[8] = sizeof(struct ipr_config_table) & 0xff; 6792 ioarcb->cmd_pkt.cdb[8] = ioa_cfg->cfg_table_size & 0xff;
6052 6793
6053 ioarcb->read_ioadl_len = cpu_to_be32(sizeof(struct ipr_ioadl_desc)); 6794 ipr_init_ioadl(ipr_cmd, ioa_cfg->cfg_table_dma, ioa_cfg->cfg_table_size,
6054 ioarcb->read_data_transfer_length = 6795 IPR_IOADL_FLAGS_READ_LAST);
6055 cpu_to_be32(sizeof(struct ipr_config_table));
6056
6057 ioadl->address = cpu_to_be32(ioa_cfg->cfg_table_dma);
6058 ioadl->flags_and_data_len =
6059 cpu_to_be32(IPR_IOADL_FLAGS_READ_LAST | sizeof(struct ipr_config_table));
6060 6796
6061 ipr_cmd->job_step = ipr_init_res_table; 6797 ipr_cmd->job_step = ipr_init_res_table;
6062 6798
@@ -6076,10 +6812,9 @@ static int ipr_ioafp_query_ioa_cfg(struct ipr_cmnd *ipr_cmd)
6076 * none 6812 * none
6077 **/ 6813 **/
6078static void ipr_ioafp_inquiry(struct ipr_cmnd *ipr_cmd, u8 flags, u8 page, 6814static void ipr_ioafp_inquiry(struct ipr_cmnd *ipr_cmd, u8 flags, u8 page,
6079 u32 dma_addr, u8 xfer_len) 6815 dma_addr_t dma_addr, u8 xfer_len)
6080{ 6816{
6081 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb; 6817 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
6082 struct ipr_ioadl_desc *ioadl = ipr_cmd->ioadl;
6083 6818
6084 ENTER; 6819 ENTER;
6085 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_SCSICDB; 6820 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_SCSICDB;
@@ -6090,12 +6825,7 @@ static void ipr_ioafp_inquiry(struct ipr_cmnd *ipr_cmd, u8 flags, u8 page,
6090 ioarcb->cmd_pkt.cdb[2] = page; 6825 ioarcb->cmd_pkt.cdb[2] = page;
6091 ioarcb->cmd_pkt.cdb[4] = xfer_len; 6826 ioarcb->cmd_pkt.cdb[4] = xfer_len;
6092 6827
6093 ioarcb->read_ioadl_len = cpu_to_be32(sizeof(struct ipr_ioadl_desc)); 6828 ipr_init_ioadl(ipr_cmd, dma_addr, xfer_len, IPR_IOADL_FLAGS_READ_LAST);
6094 ioarcb->read_data_transfer_length = cpu_to_be32(xfer_len);
6095
6096 ioadl->address = cpu_to_be32(dma_addr);
6097 ioadl->flags_and_data_len =
6098 cpu_to_be32(IPR_IOADL_FLAGS_READ_LAST | xfer_len);
6099 6829
6100 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT); 6830 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
6101 LEAVE; 6831 LEAVE;
@@ -6166,13 +6896,9 @@ static int ipr_ioafp_cap_inquiry(struct ipr_cmnd *ipr_cmd)
6166static int ipr_ioafp_page3_inquiry(struct ipr_cmnd *ipr_cmd) 6896static int ipr_ioafp_page3_inquiry(struct ipr_cmnd *ipr_cmd)
6167{ 6897{
6168 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; 6898 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6169 struct ipr_inquiry_page0 *page0 = &ioa_cfg->vpd_cbs->page0_data;
6170 6899
6171 ENTER; 6900 ENTER;
6172 6901
6173 if (!ipr_inquiry_page_supported(page0, 1))
6174 ioa_cfg->cache_state = CACHE_NONE;
6175
6176 ipr_cmd->job_step = ipr_ioafp_cap_inquiry; 6902 ipr_cmd->job_step = ipr_ioafp_cap_inquiry;
6177 6903
6178 ipr_ioafp_inquiry(ipr_cmd, 1, 3, 6904 ipr_ioafp_inquiry(ipr_cmd, 1, 3,
@@ -6240,7 +6966,7 @@ static int ipr_ioafp_std_inquiry(struct ipr_cmnd *ipr_cmd)
6240} 6966}
6241 6967
6242/** 6968/**
6243 * ipr_ioafp_indentify_hrrq - Send Identify Host RRQ. 6969 * ipr_ioafp_identify_hrrq - Send Identify Host RRQ.
6244 * @ipr_cmd: ipr command struct 6970 * @ipr_cmd: ipr command struct
6245 * 6971 *
6246 * This function send an Identify Host Request Response Queue 6972 * This function send an Identify Host Request Response Queue
@@ -6249,7 +6975,7 @@ static int ipr_ioafp_std_inquiry(struct ipr_cmnd *ipr_cmd)
6249 * Return value: 6975 * Return value:
6250 * IPR_RC_JOB_RETURN 6976 * IPR_RC_JOB_RETURN
6251 **/ 6977 **/
6252static int ipr_ioafp_indentify_hrrq(struct ipr_cmnd *ipr_cmd) 6978static int ipr_ioafp_identify_hrrq(struct ipr_cmnd *ipr_cmd)
6253{ 6979{
6254 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; 6980 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6255 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb; 6981 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
@@ -6261,19 +6987,32 @@ static int ipr_ioafp_indentify_hrrq(struct ipr_cmnd *ipr_cmd)
6261 ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE); 6987 ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
6262 6988
6263 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD; 6989 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
6990 if (ioa_cfg->sis64)
6991 ioarcb->cmd_pkt.cdb[1] = 0x1;
6264 ioarcb->cmd_pkt.cdb[2] = 6992 ioarcb->cmd_pkt.cdb[2] =
6265 ((u32) ioa_cfg->host_rrq_dma >> 24) & 0xff; 6993 ((u64) ioa_cfg->host_rrq_dma >> 24) & 0xff;
6266 ioarcb->cmd_pkt.cdb[3] = 6994 ioarcb->cmd_pkt.cdb[3] =
6267 ((u32) ioa_cfg->host_rrq_dma >> 16) & 0xff; 6995 ((u64) ioa_cfg->host_rrq_dma >> 16) & 0xff;
6268 ioarcb->cmd_pkt.cdb[4] = 6996 ioarcb->cmd_pkt.cdb[4] =
6269 ((u32) ioa_cfg->host_rrq_dma >> 8) & 0xff; 6997 ((u64) ioa_cfg->host_rrq_dma >> 8) & 0xff;
6270 ioarcb->cmd_pkt.cdb[5] = 6998 ioarcb->cmd_pkt.cdb[5] =
6271 ((u32) ioa_cfg->host_rrq_dma) & 0xff; 6999 ((u64) ioa_cfg->host_rrq_dma) & 0xff;
6272 ioarcb->cmd_pkt.cdb[7] = 7000 ioarcb->cmd_pkt.cdb[7] =
6273 ((sizeof(u32) * IPR_NUM_CMD_BLKS) >> 8) & 0xff; 7001 ((sizeof(u32) * IPR_NUM_CMD_BLKS) >> 8) & 0xff;
6274 ioarcb->cmd_pkt.cdb[8] = 7002 ioarcb->cmd_pkt.cdb[8] =
6275 (sizeof(u32) * IPR_NUM_CMD_BLKS) & 0xff; 7003 (sizeof(u32) * IPR_NUM_CMD_BLKS) & 0xff;
6276 7004
7005 if (ioa_cfg->sis64) {
7006 ioarcb->cmd_pkt.cdb[10] =
7007 ((u64) ioa_cfg->host_rrq_dma >> 56) & 0xff;
7008 ioarcb->cmd_pkt.cdb[11] =
7009 ((u64) ioa_cfg->host_rrq_dma >> 48) & 0xff;
7010 ioarcb->cmd_pkt.cdb[12] =
7011 ((u64) ioa_cfg->host_rrq_dma >> 40) & 0xff;
7012 ioarcb->cmd_pkt.cdb[13] =
7013 ((u64) ioa_cfg->host_rrq_dma >> 32) & 0xff;
7014 }
7015
6277 ipr_cmd->job_step = ipr_ioafp_std_inquiry; 7016 ipr_cmd->job_step = ipr_ioafp_std_inquiry;
6278 7017
6279 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT); 7018 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
@@ -6354,7 +7093,58 @@ static void ipr_init_ioa_mem(struct ipr_ioa_cfg *ioa_cfg)
6354 ioa_cfg->toggle_bit = 1; 7093 ioa_cfg->toggle_bit = 1;
6355 7094
6356 /* Zero out config table */ 7095 /* Zero out config table */
6357 memset(ioa_cfg->cfg_table, 0, sizeof(struct ipr_config_table)); 7096 memset(ioa_cfg->u.cfg_table, 0, ioa_cfg->cfg_table_size);
7097}
7098
7099/**
7100 * ipr_reset_next_stage - Process IPL stage change based on feedback register.
7101 * @ipr_cmd: ipr command struct
7102 *
7103 * Return value:
7104 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
7105 **/
7106static int ipr_reset_next_stage(struct ipr_cmnd *ipr_cmd)
7107{
7108 unsigned long stage, stage_time;
7109 u32 feedback;
7110 volatile u32 int_reg;
7111 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7112 u64 maskval = 0;
7113
7114 feedback = readl(ioa_cfg->regs.init_feedback_reg);
7115 stage = feedback & IPR_IPL_INIT_STAGE_MASK;
7116 stage_time = feedback & IPR_IPL_INIT_STAGE_TIME_MASK;
7117
7118 ipr_dbg("IPL stage = 0x%lx, IPL stage time = %ld\n", stage, stage_time);
7119
7120 /* sanity check the stage_time value */
7121 if (stage_time < IPR_IPL_INIT_MIN_STAGE_TIME)
7122 stage_time = IPR_IPL_INIT_MIN_STAGE_TIME;
7123 else if (stage_time > IPR_LONG_OPERATIONAL_TIMEOUT)
7124 stage_time = IPR_LONG_OPERATIONAL_TIMEOUT;
7125
7126 if (stage == IPR_IPL_INIT_STAGE_UNKNOWN) {
7127 writel(IPR_PCII_IPL_STAGE_CHANGE, ioa_cfg->regs.set_interrupt_mask_reg);
7128 int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
7129 stage_time = ioa_cfg->transop_timeout;
7130 ipr_cmd->job_step = ipr_ioafp_identify_hrrq;
7131 } else if (stage == IPR_IPL_INIT_STAGE_TRANSOP) {
7132 ipr_cmd->job_step = ipr_ioafp_identify_hrrq;
7133 maskval = IPR_PCII_IPL_STAGE_CHANGE;
7134 maskval = (maskval << 32) | IPR_PCII_IOA_TRANS_TO_OPER;
7135 writeq(maskval, ioa_cfg->regs.set_interrupt_mask_reg);
7136 int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
7137 return IPR_RC_JOB_CONTINUE;
7138 }
7139
7140 ipr_cmd->timer.data = (unsigned long) ipr_cmd;
7141 ipr_cmd->timer.expires = jiffies + stage_time * HZ;
7142 ipr_cmd->timer.function = (void (*)(unsigned long))ipr_oper_timeout;
7143 ipr_cmd->done = ipr_reset_ioa_job;
7144 add_timer(&ipr_cmd->timer);
7145 list_add_tail(&ipr_cmd->queue, &ioa_cfg->pending_q);
7146
7147 return IPR_RC_JOB_RETURN;
6358} 7148}
6359 7149
6360/** 7150/**
@@ -6373,7 +7163,7 @@ static int ipr_reset_enable_ioa(struct ipr_cmnd *ipr_cmd)
6373 volatile u32 int_reg; 7163 volatile u32 int_reg;
6374 7164
6375 ENTER; 7165 ENTER;
6376 ipr_cmd->job_step = ipr_ioafp_indentify_hrrq; 7166 ipr_cmd->job_step = ipr_ioafp_identify_hrrq;
6377 ipr_init_ioa_mem(ioa_cfg); 7167 ipr_init_ioa_mem(ioa_cfg);
6378 7168
6379 ioa_cfg->allow_interrupts = 1; 7169 ioa_cfg->allow_interrupts = 1;
@@ -6381,19 +7171,27 @@ static int ipr_reset_enable_ioa(struct ipr_cmnd *ipr_cmd)
6381 7171
6382 if (int_reg & IPR_PCII_IOA_TRANS_TO_OPER) { 7172 if (int_reg & IPR_PCII_IOA_TRANS_TO_OPER) {
6383 writel((IPR_PCII_ERROR_INTERRUPTS | IPR_PCII_HRRQ_UPDATED), 7173 writel((IPR_PCII_ERROR_INTERRUPTS | IPR_PCII_HRRQ_UPDATED),
6384 ioa_cfg->regs.clr_interrupt_mask_reg); 7174 ioa_cfg->regs.clr_interrupt_mask_reg32);
6385 int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg); 7175 int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
6386 return IPR_RC_JOB_CONTINUE; 7176 return IPR_RC_JOB_CONTINUE;
6387 } 7177 }
6388 7178
6389 /* Enable destructive diagnostics on IOA */ 7179 /* Enable destructive diagnostics on IOA */
6390 writel(ioa_cfg->doorbell, ioa_cfg->regs.set_uproc_interrupt_reg); 7180 writel(ioa_cfg->doorbell, ioa_cfg->regs.set_uproc_interrupt_reg32);
7181
7182 writel(IPR_PCII_OPER_INTERRUPTS, ioa_cfg->regs.clr_interrupt_mask_reg32);
7183 if (ioa_cfg->sis64)
7184 writel(IPR_PCII_IPL_STAGE_CHANGE, ioa_cfg->regs.clr_interrupt_mask_reg);
6391 7185
6392 writel(IPR_PCII_OPER_INTERRUPTS, ioa_cfg->regs.clr_interrupt_mask_reg);
6393 int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg); 7186 int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
6394 7187
6395 dev_info(&ioa_cfg->pdev->dev, "Initializing IOA.\n"); 7188 dev_info(&ioa_cfg->pdev->dev, "Initializing IOA.\n");
6396 7189
7190 if (ioa_cfg->sis64) {
7191 ipr_cmd->job_step = ipr_reset_next_stage;
7192 return IPR_RC_JOB_CONTINUE;
7193 }
7194
6397 ipr_cmd->timer.data = (unsigned long) ipr_cmd; 7195 ipr_cmd->timer.data = (unsigned long) ipr_cmd;
6398 ipr_cmd->timer.expires = jiffies + (ioa_cfg->transop_timeout * HZ); 7196 ipr_cmd->timer.expires = jiffies + (ioa_cfg->transop_timeout * HZ);
6399 ipr_cmd->timer.function = (void (*)(unsigned long))ipr_oper_timeout; 7197 ipr_cmd->timer.function = (void (*)(unsigned long))ipr_oper_timeout;
@@ -6463,7 +7261,7 @@ static void ipr_get_unit_check_buffer(struct ipr_ioa_cfg *ioa_cfg)
6463 7261
6464 mailbox = readl(ioa_cfg->ioa_mailbox); 7262 mailbox = readl(ioa_cfg->ioa_mailbox);
6465 7263
6466 if (!ipr_sdt_is_fmt2(mailbox)) { 7264 if (!ioa_cfg->sis64 && !ipr_sdt_is_fmt2(mailbox)) {
6467 ipr_unit_check_no_data(ioa_cfg); 7265 ipr_unit_check_no_data(ioa_cfg);
6468 return; 7266 return;
6469 } 7267 }
@@ -6472,15 +7270,20 @@ static void ipr_get_unit_check_buffer(struct ipr_ioa_cfg *ioa_cfg)
6472 rc = ipr_get_ldump_data_section(ioa_cfg, mailbox, (__be32 *) &sdt, 7270 rc = ipr_get_ldump_data_section(ioa_cfg, mailbox, (__be32 *) &sdt,
6473 (sizeof(struct ipr_uc_sdt)) / sizeof(__be32)); 7271 (sizeof(struct ipr_uc_sdt)) / sizeof(__be32));
6474 7272
6475 if (rc || (be32_to_cpu(sdt.hdr.state) != IPR_FMT2_SDT_READY_TO_USE) || 7273 if (rc || !(sdt.entry[0].flags & IPR_SDT_VALID_ENTRY) ||
6476 !(sdt.entry[0].flags & IPR_SDT_VALID_ENTRY)) { 7274 ((be32_to_cpu(sdt.hdr.state) != IPR_FMT3_SDT_READY_TO_USE) &&
7275 (be32_to_cpu(sdt.hdr.state) != IPR_FMT2_SDT_READY_TO_USE))) {
6477 ipr_unit_check_no_data(ioa_cfg); 7276 ipr_unit_check_no_data(ioa_cfg);
6478 return; 7277 return;
6479 } 7278 }
6480 7279
6481 /* Find length of the first sdt entry (UC buffer) */ 7280 /* Find length of the first sdt entry (UC buffer) */
6482 length = (be32_to_cpu(sdt.entry[0].end_offset) - 7281 if (be32_to_cpu(sdt.hdr.state) == IPR_FMT3_SDT_READY_TO_USE)
6483 be32_to_cpu(sdt.entry[0].bar_str_offset)) & IPR_FMT2_MBX_ADDR_MASK; 7282 length = be32_to_cpu(sdt.entry[0].end_token);
7283 else
7284 length = (be32_to_cpu(sdt.entry[0].end_token) -
7285 be32_to_cpu(sdt.entry[0].start_token)) &
7286 IPR_FMT2_MBX_ADDR_MASK;
6484 7287
6485 hostrcb = list_entry(ioa_cfg->hostrcb_free_q.next, 7288 hostrcb = list_entry(ioa_cfg->hostrcb_free_q.next,
6486 struct ipr_hostrcb, queue); 7289 struct ipr_hostrcb, queue);
@@ -6488,13 +7291,13 @@ static void ipr_get_unit_check_buffer(struct ipr_ioa_cfg *ioa_cfg)
6488 memset(&hostrcb->hcam, 0, sizeof(hostrcb->hcam)); 7291 memset(&hostrcb->hcam, 0, sizeof(hostrcb->hcam));
6489 7292
6490 rc = ipr_get_ldump_data_section(ioa_cfg, 7293 rc = ipr_get_ldump_data_section(ioa_cfg,
6491 be32_to_cpu(sdt.entry[0].bar_str_offset), 7294 be32_to_cpu(sdt.entry[0].start_token),
6492 (__be32 *)&hostrcb->hcam, 7295 (__be32 *)&hostrcb->hcam,
6493 min(length, (int)sizeof(hostrcb->hcam)) / sizeof(__be32)); 7296 min(length, (int)sizeof(hostrcb->hcam)) / sizeof(__be32));
6494 7297
6495 if (!rc) { 7298 if (!rc) {
6496 ipr_handle_log_data(ioa_cfg, hostrcb); 7299 ipr_handle_log_data(ioa_cfg, hostrcb);
6497 ioasc = be32_to_cpu(hostrcb->hcam.u.error.failing_dev_ioasc); 7300 ioasc = be32_to_cpu(hostrcb->hcam.u.error.fd_ioasc);
6498 if (ioasc == IPR_IOASC_NR_IOA_RESET_REQUIRED && 7301 if (ioasc == IPR_IOASC_NR_IOA_RESET_REQUIRED &&
6499 ioa_cfg->sdt_state == GET_DUMP) 7302 ioa_cfg->sdt_state == GET_DUMP)
6500 ioa_cfg->sdt_state = WAIT_FOR_DUMP; 7303 ioa_cfg->sdt_state = WAIT_FOR_DUMP;
@@ -6722,7 +7525,7 @@ static int ipr_reset_alert(struct ipr_cmnd *ipr_cmd)
6722 7525
6723 if ((rc == PCIBIOS_SUCCESSFUL) && (cmd_reg & PCI_COMMAND_MEMORY)) { 7526 if ((rc == PCIBIOS_SUCCESSFUL) && (cmd_reg & PCI_COMMAND_MEMORY)) {
6724 ipr_mask_and_clear_interrupts(ioa_cfg, ~0); 7527 ipr_mask_and_clear_interrupts(ioa_cfg, ~0);
6725 writel(IPR_UPROCI_RESET_ALERT, ioa_cfg->regs.set_uproc_interrupt_reg); 7528 writel(IPR_UPROCI_RESET_ALERT, ioa_cfg->regs.set_uproc_interrupt_reg32);
6726 ipr_cmd->job_step = ipr_reset_wait_to_start_bist; 7529 ipr_cmd->job_step = ipr_reset_wait_to_start_bist;
6727 } else { 7530 } else {
6728 ipr_cmd->job_step = ioa_cfg->reset; 7531 ipr_cmd->job_step = ioa_cfg->reset;
@@ -6785,7 +7588,10 @@ static int ipr_reset_ucode_download(struct ipr_cmnd *ipr_cmd)
6785 ipr_cmd->ioarcb.cmd_pkt.cdb[7] = (sglist->buffer_len & 0x00ff00) >> 8; 7588 ipr_cmd->ioarcb.cmd_pkt.cdb[7] = (sglist->buffer_len & 0x00ff00) >> 8;
6786 ipr_cmd->ioarcb.cmd_pkt.cdb[8] = sglist->buffer_len & 0x0000ff; 7589 ipr_cmd->ioarcb.cmd_pkt.cdb[8] = sglist->buffer_len & 0x0000ff;
6787 7590
6788 ipr_build_ucode_ioadl(ipr_cmd, sglist); 7591 if (ioa_cfg->sis64)
7592 ipr_build_ucode_ioadl64(ipr_cmd, sglist);
7593 else
7594 ipr_build_ucode_ioadl(ipr_cmd, sglist);
6789 ipr_cmd->job_step = ipr_reset_ucode_download_done; 7595 ipr_cmd->job_step = ipr_reset_ucode_download_done;
6790 7596
6791 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, 7597 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout,
@@ -7154,8 +7960,8 @@ static void ipr_free_mem(struct ipr_ioa_cfg *ioa_cfg)
7154 ipr_free_cmd_blks(ioa_cfg); 7960 ipr_free_cmd_blks(ioa_cfg);
7155 pci_free_consistent(ioa_cfg->pdev, sizeof(u32) * IPR_NUM_CMD_BLKS, 7961 pci_free_consistent(ioa_cfg->pdev, sizeof(u32) * IPR_NUM_CMD_BLKS,
7156 ioa_cfg->host_rrq, ioa_cfg->host_rrq_dma); 7962 ioa_cfg->host_rrq, ioa_cfg->host_rrq_dma);
7157 pci_free_consistent(ioa_cfg->pdev, sizeof(struct ipr_config_table), 7963 pci_free_consistent(ioa_cfg->pdev, ioa_cfg->cfg_table_size,
7158 ioa_cfg->cfg_table, 7964 ioa_cfg->u.cfg_table,
7159 ioa_cfg->cfg_table_dma); 7965 ioa_cfg->cfg_table_dma);
7160 7966
7161 for (i = 0; i < IPR_NUM_HCAMS; i++) { 7967 for (i = 0; i < IPR_NUM_HCAMS; i++) {
@@ -7209,7 +8015,7 @@ static int __devinit ipr_alloc_cmd_blks(struct ipr_ioa_cfg *ioa_cfg)
7209 int i; 8015 int i;
7210 8016
7211 ioa_cfg->ipr_cmd_pool = pci_pool_create (IPR_NAME, ioa_cfg->pdev, 8017 ioa_cfg->ipr_cmd_pool = pci_pool_create (IPR_NAME, ioa_cfg->pdev,
7212 sizeof(struct ipr_cmnd), 8, 0); 8018 sizeof(struct ipr_cmnd), 16, 0);
7213 8019
7214 if (!ioa_cfg->ipr_cmd_pool) 8020 if (!ioa_cfg->ipr_cmd_pool)
7215 return -ENOMEM; 8021 return -ENOMEM;
@@ -7227,13 +8033,25 @@ static int __devinit ipr_alloc_cmd_blks(struct ipr_ioa_cfg *ioa_cfg)
7227 ioa_cfg->ipr_cmnd_list_dma[i] = dma_addr; 8033 ioa_cfg->ipr_cmnd_list_dma[i] = dma_addr;
7228 8034
7229 ioarcb = &ipr_cmd->ioarcb; 8035 ioarcb = &ipr_cmd->ioarcb;
7230 ioarcb->ioarcb_host_pci_addr = cpu_to_be32(dma_addr); 8036 ipr_cmd->dma_addr = dma_addr;
8037 if (ioa_cfg->sis64)
8038 ioarcb->a.ioarcb_host_pci_addr64 = cpu_to_be64(dma_addr);
8039 else
8040 ioarcb->a.ioarcb_host_pci_addr = cpu_to_be32(dma_addr);
8041
7231 ioarcb->host_response_handle = cpu_to_be32(i << 2); 8042 ioarcb->host_response_handle = cpu_to_be32(i << 2);
7232 ioarcb->write_ioadl_addr = 8043 if (ioa_cfg->sis64) {
7233 cpu_to_be32(dma_addr + offsetof(struct ipr_cmnd, ioadl)); 8044 ioarcb->u.sis64_addr_data.data_ioadl_addr =
7234 ioarcb->read_ioadl_addr = ioarcb->write_ioadl_addr; 8045 cpu_to_be64(dma_addr + offsetof(struct ipr_cmnd, i.ioadl64));
7235 ioarcb->ioasa_host_pci_addr = 8046 ioarcb->u.sis64_addr_data.ioasa_host_pci_addr =
7236 cpu_to_be32(dma_addr + offsetof(struct ipr_cmnd, ioasa)); 8047 cpu_to_be64(dma_addr + offsetof(struct ipr_cmnd, ioasa));
8048 } else {
8049 ioarcb->write_ioadl_addr =
8050 cpu_to_be32(dma_addr + offsetof(struct ipr_cmnd, i.ioadl));
8051 ioarcb->read_ioadl_addr = ioarcb->write_ioadl_addr;
8052 ioarcb->ioasa_host_pci_addr =
8053 cpu_to_be32(dma_addr + offsetof(struct ipr_cmnd, ioasa));
8054 }
7237 ioarcb->ioasa_len = cpu_to_be16(sizeof(struct ipr_ioasa)); 8055 ioarcb->ioasa_len = cpu_to_be16(sizeof(struct ipr_ioasa));
7238 ipr_cmd->cmd_index = i; 8056 ipr_cmd->cmd_index = i;
7239 ipr_cmd->ioa_cfg = ioa_cfg; 8057 ipr_cmd->ioa_cfg = ioa_cfg;
@@ -7260,13 +8078,24 @@ static int __devinit ipr_alloc_mem(struct ipr_ioa_cfg *ioa_cfg)
7260 8078
7261 ENTER; 8079 ENTER;
7262 ioa_cfg->res_entries = kzalloc(sizeof(struct ipr_resource_entry) * 8080 ioa_cfg->res_entries = kzalloc(sizeof(struct ipr_resource_entry) *
7263 IPR_MAX_PHYSICAL_DEVS, GFP_KERNEL); 8081 ioa_cfg->max_devs_supported, GFP_KERNEL);
7264 8082
7265 if (!ioa_cfg->res_entries) 8083 if (!ioa_cfg->res_entries)
7266 goto out; 8084 goto out;
7267 8085
7268 for (i = 0; i < IPR_MAX_PHYSICAL_DEVS; i++) 8086 if (ioa_cfg->sis64) {
8087 ioa_cfg->target_ids = kzalloc(sizeof(unsigned long) *
8088 BITS_TO_LONGS(ioa_cfg->max_devs_supported), GFP_KERNEL);
8089 ioa_cfg->array_ids = kzalloc(sizeof(unsigned long) *
8090 BITS_TO_LONGS(ioa_cfg->max_devs_supported), GFP_KERNEL);
8091 ioa_cfg->vset_ids = kzalloc(sizeof(unsigned long) *
8092 BITS_TO_LONGS(ioa_cfg->max_devs_supported), GFP_KERNEL);
8093 }
8094
8095 for (i = 0; i < ioa_cfg->max_devs_supported; i++) {
7269 list_add_tail(&ioa_cfg->res_entries[i].queue, &ioa_cfg->free_res_q); 8096 list_add_tail(&ioa_cfg->res_entries[i].queue, &ioa_cfg->free_res_q);
8097 ioa_cfg->res_entries[i].ioa_cfg = ioa_cfg;
8098 }
7270 8099
7271 ioa_cfg->vpd_cbs = pci_alloc_consistent(ioa_cfg->pdev, 8100 ioa_cfg->vpd_cbs = pci_alloc_consistent(ioa_cfg->pdev,
7272 sizeof(struct ipr_misc_cbs), 8101 sizeof(struct ipr_misc_cbs),
@@ -7285,11 +8114,11 @@ static int __devinit ipr_alloc_mem(struct ipr_ioa_cfg *ioa_cfg)
7285 if (!ioa_cfg->host_rrq) 8114 if (!ioa_cfg->host_rrq)
7286 goto out_ipr_free_cmd_blocks; 8115 goto out_ipr_free_cmd_blocks;
7287 8116
7288 ioa_cfg->cfg_table = pci_alloc_consistent(ioa_cfg->pdev, 8117 ioa_cfg->u.cfg_table = pci_alloc_consistent(ioa_cfg->pdev,
7289 sizeof(struct ipr_config_table), 8118 ioa_cfg->cfg_table_size,
7290 &ioa_cfg->cfg_table_dma); 8119 &ioa_cfg->cfg_table_dma);
7291 8120
7292 if (!ioa_cfg->cfg_table) 8121 if (!ioa_cfg->u.cfg_table)
7293 goto out_free_host_rrq; 8122 goto out_free_host_rrq;
7294 8123
7295 for (i = 0; i < IPR_NUM_HCAMS; i++) { 8124 for (i = 0; i < IPR_NUM_HCAMS; i++) {
@@ -7323,8 +8152,9 @@ out_free_hostrcb_dma:
7323 ioa_cfg->hostrcb[i], 8152 ioa_cfg->hostrcb[i],
7324 ioa_cfg->hostrcb_dma[i]); 8153 ioa_cfg->hostrcb_dma[i]);
7325 } 8154 }
7326 pci_free_consistent(pdev, sizeof(struct ipr_config_table), 8155 pci_free_consistent(pdev, ioa_cfg->cfg_table_size,
7327 ioa_cfg->cfg_table, ioa_cfg->cfg_table_dma); 8156 ioa_cfg->u.cfg_table,
8157 ioa_cfg->cfg_table_dma);
7328out_free_host_rrq: 8158out_free_host_rrq:
7329 pci_free_consistent(pdev, sizeof(u32) * IPR_NUM_CMD_BLKS, 8159 pci_free_consistent(pdev, sizeof(u32) * IPR_NUM_CMD_BLKS,
7330 ioa_cfg->host_rrq, ioa_cfg->host_rrq_dma); 8160 ioa_cfg->host_rrq, ioa_cfg->host_rrq_dma);
@@ -7399,15 +8229,21 @@ static void __devinit ipr_init_ioa_cfg(struct ipr_ioa_cfg *ioa_cfg,
7399 init_waitqueue_head(&ioa_cfg->reset_wait_q); 8229 init_waitqueue_head(&ioa_cfg->reset_wait_q);
7400 init_waitqueue_head(&ioa_cfg->msi_wait_q); 8230 init_waitqueue_head(&ioa_cfg->msi_wait_q);
7401 ioa_cfg->sdt_state = INACTIVE; 8231 ioa_cfg->sdt_state = INACTIVE;
7402 if (ipr_enable_cache)
7403 ioa_cfg->cache_state = CACHE_ENABLED;
7404 else
7405 ioa_cfg->cache_state = CACHE_DISABLED;
7406 8232
7407 ipr_initialize_bus_attr(ioa_cfg); 8233 ipr_initialize_bus_attr(ioa_cfg);
8234 ioa_cfg->max_devs_supported = ipr_max_devs;
7408 8235
7409 host->max_id = IPR_MAX_NUM_TARGETS_PER_BUS; 8236 if (ioa_cfg->sis64) {
7410 host->max_lun = IPR_MAX_NUM_LUNS_PER_TARGET; 8237 host->max_id = IPR_MAX_SIS64_TARGETS_PER_BUS;
8238 host->max_lun = IPR_MAX_SIS64_LUNS_PER_TARGET;
8239 if (ipr_max_devs > IPR_MAX_SIS64_DEVS)
8240 ioa_cfg->max_devs_supported = IPR_MAX_SIS64_DEVS;
8241 } else {
8242 host->max_id = IPR_MAX_NUM_TARGETS_PER_BUS;
8243 host->max_lun = IPR_MAX_NUM_LUNS_PER_TARGET;
8244 if (ipr_max_devs > IPR_MAX_PHYSICAL_DEVS)
8245 ioa_cfg->max_devs_supported = IPR_MAX_PHYSICAL_DEVS;
8246 }
7411 host->max_channel = IPR_MAX_BUS_TO_SCAN; 8247 host->max_channel = IPR_MAX_BUS_TO_SCAN;
7412 host->unique_id = host->host_no; 8248 host->unique_id = host->host_no;
7413 host->max_cmd_len = IPR_MAX_CDB_LEN; 8249 host->max_cmd_len = IPR_MAX_CDB_LEN;
@@ -7419,13 +8255,26 @@ static void __devinit ipr_init_ioa_cfg(struct ipr_ioa_cfg *ioa_cfg,
7419 8255
7420 t->set_interrupt_mask_reg = base + p->set_interrupt_mask_reg; 8256 t->set_interrupt_mask_reg = base + p->set_interrupt_mask_reg;
7421 t->clr_interrupt_mask_reg = base + p->clr_interrupt_mask_reg; 8257 t->clr_interrupt_mask_reg = base + p->clr_interrupt_mask_reg;
8258 t->clr_interrupt_mask_reg32 = base + p->clr_interrupt_mask_reg32;
7422 t->sense_interrupt_mask_reg = base + p->sense_interrupt_mask_reg; 8259 t->sense_interrupt_mask_reg = base + p->sense_interrupt_mask_reg;
8260 t->sense_interrupt_mask_reg32 = base + p->sense_interrupt_mask_reg32;
7423 t->clr_interrupt_reg = base + p->clr_interrupt_reg; 8261 t->clr_interrupt_reg = base + p->clr_interrupt_reg;
8262 t->clr_interrupt_reg32 = base + p->clr_interrupt_reg32;
7424 t->sense_interrupt_reg = base + p->sense_interrupt_reg; 8263 t->sense_interrupt_reg = base + p->sense_interrupt_reg;
8264 t->sense_interrupt_reg32 = base + p->sense_interrupt_reg32;
7425 t->ioarrin_reg = base + p->ioarrin_reg; 8265 t->ioarrin_reg = base + p->ioarrin_reg;
7426 t->sense_uproc_interrupt_reg = base + p->sense_uproc_interrupt_reg; 8266 t->sense_uproc_interrupt_reg = base + p->sense_uproc_interrupt_reg;
8267 t->sense_uproc_interrupt_reg32 = base + p->sense_uproc_interrupt_reg32;
7427 t->set_uproc_interrupt_reg = base + p->set_uproc_interrupt_reg; 8268 t->set_uproc_interrupt_reg = base + p->set_uproc_interrupt_reg;
8269 t->set_uproc_interrupt_reg32 = base + p->set_uproc_interrupt_reg32;
7428 t->clr_uproc_interrupt_reg = base + p->clr_uproc_interrupt_reg; 8270 t->clr_uproc_interrupt_reg = base + p->clr_uproc_interrupt_reg;
8271 t->clr_uproc_interrupt_reg32 = base + p->clr_uproc_interrupt_reg32;
8272
8273 if (ioa_cfg->sis64) {
8274 t->init_feedback_reg = base + p->init_feedback_reg;
8275 t->dump_addr_reg = base + p->dump_addr_reg;
8276 t->dump_data_reg = base + p->dump_data_reg;
8277 }
7429} 8278}
7430 8279
7431/** 8280/**
@@ -7497,7 +8346,7 @@ static int __devinit ipr_test_msi(struct ipr_ioa_cfg *ioa_cfg,
7497 init_waitqueue_head(&ioa_cfg->msi_wait_q); 8346 init_waitqueue_head(&ioa_cfg->msi_wait_q);
7498 ioa_cfg->msi_received = 0; 8347 ioa_cfg->msi_received = 0;
7499 ipr_mask_and_clear_interrupts(ioa_cfg, ~IPR_PCII_IOA_TRANS_TO_OPER); 8348 ipr_mask_and_clear_interrupts(ioa_cfg, ~IPR_PCII_IOA_TRANS_TO_OPER);
7500 writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE, ioa_cfg->regs.clr_interrupt_mask_reg); 8349 writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE, ioa_cfg->regs.clr_interrupt_mask_reg32);
7501 int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg); 8350 int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
7502 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); 8351 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
7503 8352
@@ -7508,7 +8357,7 @@ static int __devinit ipr_test_msi(struct ipr_ioa_cfg *ioa_cfg,
7508 } else if (ipr_debug) 8357 } else if (ipr_debug)
7509 dev_info(&pdev->dev, "IRQ assigned: %d\n", pdev->irq); 8358 dev_info(&pdev->dev, "IRQ assigned: %d\n", pdev->irq);
7510 8359
7511 writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE, ioa_cfg->regs.sense_interrupt_reg); 8360 writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE, ioa_cfg->regs.sense_interrupt_reg32);
7512 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg); 8361 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
7513 wait_event_timeout(ioa_cfg->msi_wait_q, ioa_cfg->msi_received, HZ); 8362 wait_event_timeout(ioa_cfg->msi_wait_q, ioa_cfg->msi_received, HZ);
7514 ipr_mask_and_clear_interrupts(ioa_cfg, ~IPR_PCII_IOA_TRANS_TO_OPER); 8363 ipr_mask_and_clear_interrupts(ioa_cfg, ~IPR_PCII_IOA_TRANS_TO_OPER);
@@ -7578,6 +8427,8 @@ static int __devinit ipr_probe_ioa(struct pci_dev *pdev,
7578 goto out_scsi_host_put; 8427 goto out_scsi_host_put;
7579 } 8428 }
7580 8429
8430 /* set SIS 32 or SIS 64 */
8431 ioa_cfg->sis64 = ioa_cfg->ipr_chip->sis_type == IPR_SIS64 ? 1 : 0;
7581 ioa_cfg->chip_cfg = ioa_cfg->ipr_chip->cfg; 8432 ioa_cfg->chip_cfg = ioa_cfg->ipr_chip->cfg;
7582 8433
7583 if (ipr_transop_timeout) 8434 if (ipr_transop_timeout)
@@ -7615,7 +8466,16 @@ static int __devinit ipr_probe_ioa(struct pci_dev *pdev,
7615 8466
7616 pci_set_master(pdev); 8467 pci_set_master(pdev);
7617 8468
7618 rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); 8469 if (ioa_cfg->sis64) {
8470 rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
8471 if (rc < 0) {
8472 dev_dbg(&pdev->dev, "Failed to set 64 bit PCI DMA mask\n");
8473 rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
8474 }
8475
8476 } else
8477 rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
8478
7619 if (rc < 0) { 8479 if (rc < 0) {
7620 dev_err(&pdev->dev, "Failed to set PCI DMA mask\n"); 8480 dev_err(&pdev->dev, "Failed to set PCI DMA mask\n");
7621 goto cleanup_nomem; 8481 goto cleanup_nomem;
@@ -7657,6 +8517,15 @@ static int __devinit ipr_probe_ioa(struct pci_dev *pdev,
7657 if ((rc = ipr_set_pcix_cmd_reg(ioa_cfg))) 8517 if ((rc = ipr_set_pcix_cmd_reg(ioa_cfg)))
7658 goto cleanup_nomem; 8518 goto cleanup_nomem;
7659 8519
8520 if (ioa_cfg->sis64)
8521 ioa_cfg->cfg_table_size = (sizeof(struct ipr_config_table_hdr64)
8522 + ((sizeof(struct ipr_config_table_entry64)
8523 * ioa_cfg->max_devs_supported)));
8524 else
8525 ioa_cfg->cfg_table_size = (sizeof(struct ipr_config_table_hdr)
8526 + ((sizeof(struct ipr_config_table_entry)
8527 * ioa_cfg->max_devs_supported)));
8528
7660 rc = ipr_alloc_mem(ioa_cfg); 8529 rc = ipr_alloc_mem(ioa_cfg);
7661 if (rc < 0) { 8530 if (rc < 0) {
7662 dev_err(&pdev->dev, 8531 dev_err(&pdev->dev,
@@ -7668,9 +8537,9 @@ static int __devinit ipr_probe_ioa(struct pci_dev *pdev,
7668 * If HRRQ updated interrupt is not masked, or reset alert is set, 8537 * If HRRQ updated interrupt is not masked, or reset alert is set,
7669 * the card is in an unknown state and needs a hard reset 8538 * the card is in an unknown state and needs a hard reset
7670 */ 8539 */
7671 mask = readl(ioa_cfg->regs.sense_interrupt_mask_reg); 8540 mask = readl(ioa_cfg->regs.sense_interrupt_mask_reg32);
7672 interrupts = readl(ioa_cfg->regs.sense_interrupt_reg); 8541 interrupts = readl(ioa_cfg->regs.sense_interrupt_reg32);
7673 uproc = readl(ioa_cfg->regs.sense_uproc_interrupt_reg); 8542 uproc = readl(ioa_cfg->regs.sense_uproc_interrupt_reg32);
7674 if ((mask & IPR_PCII_HRRQ_UPDATED) == 0 || (uproc & IPR_UPROCI_RESET_ALERT)) 8543 if ((mask & IPR_PCII_HRRQ_UPDATED) == 0 || (uproc & IPR_UPROCI_RESET_ALERT))
7675 ioa_cfg->needs_hard_reset = 1; 8544 ioa_cfg->needs_hard_reset = 1;
7676 if (interrupts & IPR_PCII_ERROR_INTERRUPTS) 8545 if (interrupts & IPR_PCII_ERROR_INTERRUPTS)
@@ -7958,9 +8827,6 @@ static struct pci_device_id ipr_pci_table[] __devinitdata = {
7958 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_574E, 0, 0, 8827 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_574E, 0, 0,
7959 IPR_USE_LONG_TRANSOP_TIMEOUT }, 8828 IPR_USE_LONG_TRANSOP_TIMEOUT },
7960 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN_E, 8829 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN_E,
7961 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_575D, 0, 0,
7962 IPR_USE_LONG_TRANSOP_TIMEOUT },
7963 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN_E,
7964 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B3, 0, 0, 0 }, 8830 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B3, 0, 0, 0 },
7965 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN_E, 8831 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN_E,
7966 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B7, 0, 0, 8832 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B7, 0, 0,
@@ -7975,9 +8841,22 @@ static struct pci_device_id ipr_pci_table[] __devinitdata = {
7975 { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_SCAMP, 8841 { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_SCAMP,
7976 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572F, 0, 0, 8842 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572F, 0, 0,
7977 IPR_USE_LONG_TRANSOP_TIMEOUT }, 8843 IPR_USE_LONG_TRANSOP_TIMEOUT },
7978 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_SCAMP_E, 8844 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2,
7979 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_574D, 0, 0, 8845 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B5, 0, 0, 0 },
7980 IPR_USE_LONG_TRANSOP_TIMEOUT }, 8846 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2,
8847 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_574D, 0, 0, 0 },
8848 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2,
8849 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B2, 0, 0, 0 },
8850 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_ASIC_E2,
8851 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B4, 0, 0, 0 },
8852 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_ASIC_E2,
8853 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B1, 0, 0, 0 },
8854 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_ASIC_E2,
8855 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57C6, 0, 0, 0 },
8856 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_ASIC_E2,
8857 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_575D, 0, 0, 0 },
8858 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_ASIC_E2,
8859 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57CE, 0, 0, 0 },
7981 { } 8860 { }
7982}; 8861};
7983MODULE_DEVICE_TABLE(pci, ipr_pci_table); 8862MODULE_DEVICE_TABLE(pci, ipr_pci_table);
@@ -7997,6 +8876,61 @@ static struct pci_driver ipr_driver = {
7997}; 8876};
7998 8877
7999/** 8878/**
8879 * ipr_halt_done - Shutdown prepare completion
8880 *
8881 * Return value:
8882 * none
8883 **/
8884static void ipr_halt_done(struct ipr_cmnd *ipr_cmd)
8885{
8886 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8887
8888 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
8889}
8890
8891/**
8892 * ipr_halt - Issue shutdown prepare to all adapters
8893 *
8894 * Return value:
8895 * NOTIFY_OK on success / NOTIFY_DONE on failure
8896 **/
8897static int ipr_halt(struct notifier_block *nb, ulong event, void *buf)
8898{
8899 struct ipr_cmnd *ipr_cmd;
8900 struct ipr_ioa_cfg *ioa_cfg;
8901 unsigned long flags = 0;
8902
8903 if (event != SYS_RESTART && event != SYS_HALT && event != SYS_POWER_OFF)
8904 return NOTIFY_DONE;
8905
8906 spin_lock(&ipr_driver_lock);
8907
8908 list_for_each_entry(ioa_cfg, &ipr_ioa_head, queue) {
8909 spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
8910 if (!ioa_cfg->allow_cmds) {
8911 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
8912 continue;
8913 }
8914
8915 ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
8916 ipr_cmd->ioarcb.res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
8917 ipr_cmd->ioarcb.cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
8918 ipr_cmd->ioarcb.cmd_pkt.cdb[0] = IPR_IOA_SHUTDOWN;
8919 ipr_cmd->ioarcb.cmd_pkt.cdb[1] = IPR_SHUTDOWN_PREPARE_FOR_NORMAL;
8920
8921 ipr_do_req(ipr_cmd, ipr_halt_done, ipr_timeout, IPR_DEVICE_RESET_TIMEOUT);
8922 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
8923 }
8924 spin_unlock(&ipr_driver_lock);
8925
8926 return NOTIFY_OK;
8927}
8928
8929static struct notifier_block ipr_notifier = {
8930 ipr_halt, NULL, 0
8931};
8932
8933/**
8000 * ipr_init - Module entry point 8934 * ipr_init - Module entry point
8001 * 8935 *
8002 * Return value: 8936 * Return value:
@@ -8007,6 +8941,7 @@ static int __init ipr_init(void)
8007 ipr_info("IBM Power RAID SCSI Device Driver version: %s %s\n", 8941 ipr_info("IBM Power RAID SCSI Device Driver version: %s %s\n",
8008 IPR_DRIVER_VERSION, IPR_DRIVER_DATE); 8942 IPR_DRIVER_VERSION, IPR_DRIVER_DATE);
8009 8943
8944 register_reboot_notifier(&ipr_notifier);
8010 return pci_register_driver(&ipr_driver); 8945 return pci_register_driver(&ipr_driver);
8011} 8946}
8012 8947
@@ -8020,6 +8955,7 @@ static int __init ipr_init(void)
8020 **/ 8955 **/
8021static void __exit ipr_exit(void) 8956static void __exit ipr_exit(void)
8022{ 8957{
8958 unregister_reboot_notifier(&ipr_notifier);
8023 pci_unregister_driver(&ipr_driver); 8959 pci_unregister_driver(&ipr_driver);
8024} 8960}
8025 8961
diff --git a/drivers/scsi/ipr.h b/drivers/scsi/ipr.h
index 19bbcf39f0c9..4c267b5e0b96 100644
--- a/drivers/scsi/ipr.h
+++ b/drivers/scsi/ipr.h
@@ -37,8 +37,8 @@
37/* 37/*
38 * Literals 38 * Literals
39 */ 39 */
40#define IPR_DRIVER_VERSION "2.4.3" 40#define IPR_DRIVER_VERSION "2.5.0"
41#define IPR_DRIVER_DATE "(June 10, 2009)" 41#define IPR_DRIVER_DATE "(February 11, 2010)"
42 42
43/* 43/*
44 * IPR_MAX_CMD_PER_LUN: This defines the maximum number of outstanding 44 * IPR_MAX_CMD_PER_LUN: This defines the maximum number of outstanding
@@ -55,7 +55,9 @@
55#define IPR_NUM_BASE_CMD_BLKS 100 55#define IPR_NUM_BASE_CMD_BLKS 100
56 56
57#define PCI_DEVICE_ID_IBM_OBSIDIAN_E 0x0339 57#define PCI_DEVICE_ID_IBM_OBSIDIAN_E 0x0339
58#define PCI_DEVICE_ID_IBM_SCAMP_E 0x034A 58
59#define PCI_DEVICE_ID_IBM_CROC_FPGA_E2 0x033D
60#define PCI_DEVICE_ID_IBM_CROC_ASIC_E2 0x034A
59 61
60#define IPR_SUBS_DEV_ID_2780 0x0264 62#define IPR_SUBS_DEV_ID_2780 0x0264
61#define IPR_SUBS_DEV_ID_5702 0x0266 63#define IPR_SUBS_DEV_ID_5702 0x0266
@@ -70,15 +72,24 @@
70#define IPR_SUBS_DEV_ID_572A 0x02C1 72#define IPR_SUBS_DEV_ID_572A 0x02C1
71#define IPR_SUBS_DEV_ID_572B 0x02C2 73#define IPR_SUBS_DEV_ID_572B 0x02C2
72#define IPR_SUBS_DEV_ID_572F 0x02C3 74#define IPR_SUBS_DEV_ID_572F 0x02C3
73#define IPR_SUBS_DEV_ID_574D 0x030B
74#define IPR_SUBS_DEV_ID_574E 0x030A 75#define IPR_SUBS_DEV_ID_574E 0x030A
75#define IPR_SUBS_DEV_ID_575B 0x030D 76#define IPR_SUBS_DEV_ID_575B 0x030D
76#define IPR_SUBS_DEV_ID_575C 0x0338 77#define IPR_SUBS_DEV_ID_575C 0x0338
77#define IPR_SUBS_DEV_ID_575D 0x033E
78#define IPR_SUBS_DEV_ID_57B3 0x033A 78#define IPR_SUBS_DEV_ID_57B3 0x033A
79#define IPR_SUBS_DEV_ID_57B7 0x0360 79#define IPR_SUBS_DEV_ID_57B7 0x0360
80#define IPR_SUBS_DEV_ID_57B8 0x02C2 80#define IPR_SUBS_DEV_ID_57B8 0x02C2
81 81
82#define IPR_SUBS_DEV_ID_57B4 0x033B
83#define IPR_SUBS_DEV_ID_57B2 0x035F
84#define IPR_SUBS_DEV_ID_57C6 0x0357
85
86#define IPR_SUBS_DEV_ID_57B5 0x033C
87#define IPR_SUBS_DEV_ID_57CE 0x035E
88#define IPR_SUBS_DEV_ID_57B1 0x0355
89
90#define IPR_SUBS_DEV_ID_574D 0x0356
91#define IPR_SUBS_DEV_ID_575D 0x035D
92
82#define IPR_NAME "ipr" 93#define IPR_NAME "ipr"
83 94
84/* 95/*
@@ -118,6 +129,10 @@
118#define IPR_NUM_LOG_HCAMS 2 129#define IPR_NUM_LOG_HCAMS 2
119#define IPR_NUM_CFG_CHG_HCAMS 2 130#define IPR_NUM_CFG_CHG_HCAMS 2
120#define IPR_NUM_HCAMS (IPR_NUM_LOG_HCAMS + IPR_NUM_CFG_CHG_HCAMS) 131#define IPR_NUM_HCAMS (IPR_NUM_LOG_HCAMS + IPR_NUM_CFG_CHG_HCAMS)
132
133#define IPR_MAX_SIS64_TARGETS_PER_BUS 1024
134#define IPR_MAX_SIS64_LUNS_PER_TARGET 0xffffffff
135
121#define IPR_MAX_NUM_TARGETS_PER_BUS 256 136#define IPR_MAX_NUM_TARGETS_PER_BUS 256
122#define IPR_MAX_NUM_LUNS_PER_TARGET 256 137#define IPR_MAX_NUM_LUNS_PER_TARGET 256
123#define IPR_MAX_NUM_VSET_LUNS_PER_TARGET 8 138#define IPR_MAX_NUM_VSET_LUNS_PER_TARGET 8
@@ -132,13 +147,15 @@
132 147
133/* We need resources for HCAMS, IOA reset, IOA bringdown, and ERP */ 148/* We need resources for HCAMS, IOA reset, IOA bringdown, and ERP */
134#define IPR_NUM_INTERNAL_CMD_BLKS (IPR_NUM_HCAMS + \ 149#define IPR_NUM_INTERNAL_CMD_BLKS (IPR_NUM_HCAMS + \
135 ((IPR_NUM_RESET_RELOAD_RETRIES + 1) * 2) + 3) 150 ((IPR_NUM_RESET_RELOAD_RETRIES + 1) * 2) + 4)
136 151
137#define IPR_MAX_COMMANDS IPR_NUM_BASE_CMD_BLKS 152#define IPR_MAX_COMMANDS IPR_NUM_BASE_CMD_BLKS
138#define IPR_NUM_CMD_BLKS (IPR_NUM_BASE_CMD_BLKS + \ 153#define IPR_NUM_CMD_BLKS (IPR_NUM_BASE_CMD_BLKS + \
139 IPR_NUM_INTERNAL_CMD_BLKS) 154 IPR_NUM_INTERNAL_CMD_BLKS)
140 155
141#define IPR_MAX_PHYSICAL_DEVS 192 156#define IPR_MAX_PHYSICAL_DEVS 192
157#define IPR_DEFAULT_SIS64_DEVS 1024
158#define IPR_MAX_SIS64_DEVS 4096
142 159
143#define IPR_MAX_SGLIST 64 160#define IPR_MAX_SGLIST 64
144#define IPR_IOA_MAX_SECTORS 32767 161#define IPR_IOA_MAX_SECTORS 32767
@@ -173,6 +190,7 @@
173#define IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE 0x01 190#define IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE 0x01
174#define IPR_HCAM_CDB_OP_CODE_LOG_DATA 0x02 191#define IPR_HCAM_CDB_OP_CODE_LOG_DATA 0x02
175#define IPR_SET_SUPPORTED_DEVICES 0xFB 192#define IPR_SET_SUPPORTED_DEVICES 0xFB
193#define IPR_SET_ALL_SUPPORTED_DEVICES 0x80
176#define IPR_IOA_SHUTDOWN 0xF7 194#define IPR_IOA_SHUTDOWN 0xF7
177#define IPR_WR_BUF_DOWNLOAD_AND_SAVE 0x05 195#define IPR_WR_BUF_DOWNLOAD_AND_SAVE 0x05
178 196
@@ -221,9 +239,17 @@
221#define IPR_SDT_FMT2_BAR5_SEL 0x5 239#define IPR_SDT_FMT2_BAR5_SEL 0x5
222#define IPR_SDT_FMT2_EXP_ROM_SEL 0x8 240#define IPR_SDT_FMT2_EXP_ROM_SEL 0x8
223#define IPR_FMT2_SDT_READY_TO_USE 0xC4D4E3F2 241#define IPR_FMT2_SDT_READY_TO_USE 0xC4D4E3F2
242#define IPR_FMT3_SDT_READY_TO_USE 0xC4D4E3F3
224#define IPR_DOORBELL 0x82800000 243#define IPR_DOORBELL 0x82800000
225#define IPR_RUNTIME_RESET 0x40000000 244#define IPR_RUNTIME_RESET 0x40000000
226 245
246#define IPR_IPL_INIT_MIN_STAGE_TIME 5
247#define IPR_IPL_INIT_STAGE_UNKNOWN 0x0
248#define IPR_IPL_INIT_STAGE_TRANSOP 0xB0000000
249#define IPR_IPL_INIT_STAGE_MASK 0xff000000
250#define IPR_IPL_INIT_STAGE_TIME_MASK 0x0000ffff
251#define IPR_PCII_IPL_STAGE_CHANGE (0x80000000 >> 0)
252
227#define IPR_PCII_IOA_TRANS_TO_OPER (0x80000000 >> 0) 253#define IPR_PCII_IOA_TRANS_TO_OPER (0x80000000 >> 0)
228#define IPR_PCII_IOARCB_XFER_FAILED (0x80000000 >> 3) 254#define IPR_PCII_IOARCB_XFER_FAILED (0x80000000 >> 3)
229#define IPR_PCII_IOA_UNIT_CHECKED (0x80000000 >> 4) 255#define IPR_PCII_IOA_UNIT_CHECKED (0x80000000 >> 4)
@@ -318,27 +344,27 @@ struct ipr_std_inq_data {
318 u8 serial_num[IPR_SERIAL_NUM_LEN]; 344 u8 serial_num[IPR_SERIAL_NUM_LEN];
319}__attribute__ ((packed)); 345}__attribute__ ((packed));
320 346
347#define IPR_RES_TYPE_AF_DASD 0x00
348#define IPR_RES_TYPE_GENERIC_SCSI 0x01
349#define IPR_RES_TYPE_VOLUME_SET 0x02
350#define IPR_RES_TYPE_REMOTE_AF_DASD 0x03
351#define IPR_RES_TYPE_GENERIC_ATA 0x04
352#define IPR_RES_TYPE_ARRAY 0x05
353#define IPR_RES_TYPE_IOAFP 0xff
354
321struct ipr_config_table_entry { 355struct ipr_config_table_entry {
322 u8 proto; 356 u8 proto;
323#define IPR_PROTO_SATA 0x02 357#define IPR_PROTO_SATA 0x02
324#define IPR_PROTO_SATA_ATAPI 0x03 358#define IPR_PROTO_SATA_ATAPI 0x03
325#define IPR_PROTO_SAS_STP 0x06 359#define IPR_PROTO_SAS_STP 0x06
326#define IPR_PROTO_SAS_STP_ATAPI 0x07 360#define IPR_PROTO_SAS_STP_ATAPI 0x07
327 u8 array_id; 361 u8 array_id;
328 u8 flags; 362 u8 flags;
329#define IPR_IS_IOA_RESOURCE 0x80 363#define IPR_IS_IOA_RESOURCE 0x80
330#define IPR_IS_ARRAY_MEMBER 0x20
331#define IPR_IS_HOT_SPARE 0x10
332
333 u8 rsvd_subtype; 364 u8 rsvd_subtype;
334#define IPR_RES_SUBTYPE(res) (((res)->cfgte.rsvd_subtype) & 0x0f) 365
335#define IPR_SUBTYPE_AF_DASD 0 366#define IPR_QUEUEING_MODEL(res) ((((res)->flags) & 0x70) >> 4)
336#define IPR_SUBTYPE_GENERIC_SCSI 1 367#define IPR_QUEUE_FROZEN_MODEL 0
337#define IPR_SUBTYPE_VOLUME_SET 2
338#define IPR_SUBTYPE_GENERIC_ATA 4
339
340#define IPR_QUEUEING_MODEL(res) ((((res)->cfgte.flags) & 0x70) >> 4)
341#define IPR_QUEUE_FROZEN_MODEL 0
342#define IPR_QUEUE_NACA_MODEL 1 368#define IPR_QUEUE_NACA_MODEL 1
343 369
344 struct ipr_res_addr res_addr; 370 struct ipr_res_addr res_addr;
@@ -347,6 +373,28 @@ struct ipr_config_table_entry {
347 struct ipr_std_inq_data std_inq_data; 373 struct ipr_std_inq_data std_inq_data;
348}__attribute__ ((packed, aligned (4))); 374}__attribute__ ((packed, aligned (4)));
349 375
376struct ipr_config_table_entry64 {
377 u8 res_type;
378 u8 proto;
379 u8 vset_num;
380 u8 array_id;
381 __be16 flags;
382 __be16 res_flags;
383#define IPR_QUEUEING_MODEL64(res) ((((res)->res_flags) & 0x7000) >> 12)
384 __be32 res_handle;
385 u8 dev_id_type;
386 u8 reserved[3];
387 __be64 dev_id;
388 __be64 lun;
389 __be64 lun_wwn[2];
390#define IPR_MAX_RES_PATH_LENGTH 24
391 __be64 res_path;
392 struct ipr_std_inq_data std_inq_data;
393 u8 reserved2[4];
394 __be64 reserved3[2]; // description text
395 u8 reserved4[8];
396}__attribute__ ((packed, aligned (8)));
397
350struct ipr_config_table_hdr { 398struct ipr_config_table_hdr {
351 u8 num_entries; 399 u8 num_entries;
352 u8 flags; 400 u8 flags;
@@ -354,13 +402,35 @@ struct ipr_config_table_hdr {
354 __be16 reserved; 402 __be16 reserved;
355}__attribute__((packed, aligned (4))); 403}__attribute__((packed, aligned (4)));
356 404
405struct ipr_config_table_hdr64 {
406 __be16 num_entries;
407 __be16 reserved;
408 u8 flags;
409 u8 reserved2[11];
410}__attribute__((packed, aligned (4)));
411
357struct ipr_config_table { 412struct ipr_config_table {
358 struct ipr_config_table_hdr hdr; 413 struct ipr_config_table_hdr hdr;
359 struct ipr_config_table_entry dev[IPR_MAX_PHYSICAL_DEVS]; 414 struct ipr_config_table_entry dev[0];
360}__attribute__((packed, aligned (4))); 415}__attribute__((packed, aligned (4)));
361 416
417struct ipr_config_table64 {
418 struct ipr_config_table_hdr64 hdr64;
419 struct ipr_config_table_entry64 dev[0];
420}__attribute__((packed, aligned (8)));
421
422struct ipr_config_table_entry_wrapper {
423 union {
424 struct ipr_config_table_entry *cfgte;
425 struct ipr_config_table_entry64 *cfgte64;
426 } u;
427};
428
362struct ipr_hostrcb_cfg_ch_not { 429struct ipr_hostrcb_cfg_ch_not {
363 struct ipr_config_table_entry cfgte; 430 union {
431 struct ipr_config_table_entry cfgte;
432 struct ipr_config_table_entry64 cfgte64;
433 } u;
364 u8 reserved[936]; 434 u8 reserved[936];
365}__attribute__((packed, aligned (4))); 435}__attribute__((packed, aligned (4)));
366 436
@@ -381,7 +451,7 @@ struct ipr_cmd_pkt {
381#define IPR_RQTYPE_HCAM 0x02 451#define IPR_RQTYPE_HCAM 0x02
382#define IPR_RQTYPE_ATA_PASSTHRU 0x04 452#define IPR_RQTYPE_ATA_PASSTHRU 0x04
383 453
384 u8 luntar_luntrn; 454 u8 reserved2;
385 455
386 u8 flags_hi; 456 u8 flags_hi;
387#define IPR_FLAGS_HI_WRITE_NOT_READ 0x80 457#define IPR_FLAGS_HI_WRITE_NOT_READ 0x80
@@ -403,7 +473,7 @@ struct ipr_cmd_pkt {
403 __be16 timeout; 473 __be16 timeout;
404}__attribute__ ((packed, aligned(4))); 474}__attribute__ ((packed, aligned(4)));
405 475
406struct ipr_ioarcb_ata_regs { 476struct ipr_ioarcb_ata_regs { /* 22 bytes */
407 u8 flags; 477 u8 flags;
408#define IPR_ATA_FLAG_PACKET_CMD 0x80 478#define IPR_ATA_FLAG_PACKET_CMD 0x80
409#define IPR_ATA_FLAG_XFER_TYPE_DMA 0x40 479#define IPR_ATA_FLAG_XFER_TYPE_DMA 0x40
@@ -442,28 +512,49 @@ struct ipr_ioadl_desc {
442 __be32 address; 512 __be32 address;
443}__attribute__((packed, aligned (8))); 513}__attribute__((packed, aligned (8)));
444 514
515struct ipr_ioadl64_desc {
516 __be32 flags;
517 __be32 data_len;
518 __be64 address;
519}__attribute__((packed, aligned (16)));
520
521struct ipr_ata64_ioadl {
522 struct ipr_ioarcb_ata_regs regs;
523 u16 reserved[5];
524 struct ipr_ioadl64_desc ioadl64[IPR_NUM_IOADL_ENTRIES];
525}__attribute__((packed, aligned (16)));
526
445struct ipr_ioarcb_add_data { 527struct ipr_ioarcb_add_data {
446 union { 528 union {
447 struct ipr_ioarcb_ata_regs regs; 529 struct ipr_ioarcb_ata_regs regs;
448 struct ipr_ioadl_desc ioadl[5]; 530 struct ipr_ioadl_desc ioadl[5];
449 __be32 add_cmd_parms[10]; 531 __be32 add_cmd_parms[10];
450 }u; 532 } u;
451}__attribute__ ((packed, aligned(4))); 533}__attribute__ ((packed, aligned (4)));
534
535struct ipr_ioarcb_sis64_add_addr_ecb {
536 __be64 ioasa_host_pci_addr;
537 __be64 data_ioadl_addr;
538 __be64 reserved;
539 __be32 ext_control_buf[4];
540}__attribute__((packed, aligned (8)));
452 541
453/* IOA Request Control Block 128 bytes */ 542/* IOA Request Control Block 128 bytes */
454struct ipr_ioarcb { 543struct ipr_ioarcb {
455 __be32 ioarcb_host_pci_addr; 544 union {
456 __be32 reserved; 545 __be32 ioarcb_host_pci_addr;
546 __be64 ioarcb_host_pci_addr64;
547 } a;
457 __be32 res_handle; 548 __be32 res_handle;
458 __be32 host_response_handle; 549 __be32 host_response_handle;
459 __be32 reserved1; 550 __be32 reserved1;
460 __be32 reserved2; 551 __be32 reserved2;
461 __be32 reserved3; 552 __be32 reserved3;
462 553
463 __be32 write_data_transfer_length; 554 __be32 data_transfer_length;
464 __be32 read_data_transfer_length; 555 __be32 read_data_transfer_length;
465 __be32 write_ioadl_addr; 556 __be32 write_ioadl_addr;
466 __be32 write_ioadl_len; 557 __be32 ioadl_len;
467 __be32 read_ioadl_addr; 558 __be32 read_ioadl_addr;
468 __be32 read_ioadl_len; 559 __be32 read_ioadl_len;
469 560
@@ -473,8 +564,14 @@ struct ipr_ioarcb {
473 564
474 struct ipr_cmd_pkt cmd_pkt; 565 struct ipr_cmd_pkt cmd_pkt;
475 566
476 __be32 add_cmd_parms_len; 567 __be16 add_cmd_parms_offset;
477 struct ipr_ioarcb_add_data add_data; 568 __be16 add_cmd_parms_len;
569
570 union {
571 struct ipr_ioarcb_add_data add_data;
572 struct ipr_ioarcb_sis64_add_addr_ecb sis64_addr_data;
573 } u;
574
478}__attribute__((packed, aligned (4))); 575}__attribute__((packed, aligned (4)));
479 576
480struct ipr_ioasa_vset { 577struct ipr_ioasa_vset {
@@ -676,12 +773,29 @@ struct ipr_hostrcb_device_data_entry_enhanced {
676 struct ipr_ext_vpd cfc_last_with_dev_vpd; 773 struct ipr_ext_vpd cfc_last_with_dev_vpd;
677}__attribute__((packed, aligned (4))); 774}__attribute__((packed, aligned (4)));
678 775
776struct ipr_hostrcb64_device_data_entry_enhanced {
777 struct ipr_ext_vpd vpd;
778 u8 ccin[4];
779 u8 res_path[8];
780 struct ipr_ext_vpd new_vpd;
781 u8 new_ccin[4];
782 struct ipr_ext_vpd ioa_last_with_dev_vpd;
783 struct ipr_ext_vpd cfc_last_with_dev_vpd;
784}__attribute__((packed, aligned (4)));
785
679struct ipr_hostrcb_array_data_entry { 786struct ipr_hostrcb_array_data_entry {
680 struct ipr_vpd vpd; 787 struct ipr_vpd vpd;
681 struct ipr_res_addr expected_dev_res_addr; 788 struct ipr_res_addr expected_dev_res_addr;
682 struct ipr_res_addr dev_res_addr; 789 struct ipr_res_addr dev_res_addr;
683}__attribute__((packed, aligned (4))); 790}__attribute__((packed, aligned (4)));
684 791
792struct ipr_hostrcb64_array_data_entry {
793 struct ipr_ext_vpd vpd;
794 u8 ccin[4];
795 u8 expected_res_path[8];
796 u8 res_path[8];
797}__attribute__((packed, aligned (4)));
798
685struct ipr_hostrcb_array_data_entry_enhanced { 799struct ipr_hostrcb_array_data_entry_enhanced {
686 struct ipr_ext_vpd vpd; 800 struct ipr_ext_vpd vpd;
687 u8 ccin[4]; 801 u8 ccin[4];
@@ -733,6 +847,14 @@ struct ipr_hostrcb_type_13_error {
733 struct ipr_hostrcb_device_data_entry_enhanced dev[3]; 847 struct ipr_hostrcb_device_data_entry_enhanced dev[3];
734}__attribute__((packed, aligned (4))); 848}__attribute__((packed, aligned (4)));
735 849
850struct ipr_hostrcb_type_23_error {
851 struct ipr_ext_vpd ioa_vpd;
852 struct ipr_ext_vpd cfc_vpd;
853 __be32 errors_detected;
854 __be32 errors_logged;
855 struct ipr_hostrcb64_device_data_entry_enhanced dev[3];
856}__attribute__((packed, aligned (4)));
857
736struct ipr_hostrcb_type_04_error { 858struct ipr_hostrcb_type_04_error {
737 struct ipr_vpd ioa_vpd; 859 struct ipr_vpd ioa_vpd;
738 struct ipr_vpd cfc_vpd; 860 struct ipr_vpd cfc_vpd;
@@ -760,6 +882,22 @@ struct ipr_hostrcb_type_14_error {
760 struct ipr_hostrcb_array_data_entry_enhanced array_member[18]; 882 struct ipr_hostrcb_array_data_entry_enhanced array_member[18];
761}__attribute__((packed, aligned (4))); 883}__attribute__((packed, aligned (4)));
762 884
885struct ipr_hostrcb_type_24_error {
886 struct ipr_ext_vpd ioa_vpd;
887 struct ipr_ext_vpd cfc_vpd;
888 u8 reserved[2];
889 u8 exposed_mode_adn;
890#define IPR_INVALID_ARRAY_DEV_NUM 0xff
891 u8 array_id;
892 u8 last_res_path[8];
893 u8 protection_level[8];
894 struct ipr_ext_vpd array_vpd;
895 u8 description[16];
896 u8 reserved2[3];
897 u8 num_entries;
898 struct ipr_hostrcb64_array_data_entry array_member[32];
899}__attribute__((packed, aligned (4)));
900
763struct ipr_hostrcb_type_07_error { 901struct ipr_hostrcb_type_07_error {
764 u8 failure_reason[64]; 902 u8 failure_reason[64];
765 struct ipr_vpd vpd; 903 struct ipr_vpd vpd;
@@ -797,6 +935,22 @@ struct ipr_hostrcb_config_element {
797 __be32 wwid[2]; 935 __be32 wwid[2];
798}__attribute__((packed, aligned (4))); 936}__attribute__((packed, aligned (4)));
799 937
938struct ipr_hostrcb64_config_element {
939 __be16 length;
940 u8 descriptor_id;
941#define IPR_DESCRIPTOR_MASK 0xC0
942#define IPR_DESCRIPTOR_SIS64 0x00
943
944 u8 reserved;
945 u8 type_status;
946
947 u8 reserved2[2];
948 u8 link_rate;
949
950 u8 res_path[8];
951 __be32 wwid[2];
952}__attribute__((packed, aligned (8)));
953
800struct ipr_hostrcb_fabric_desc { 954struct ipr_hostrcb_fabric_desc {
801 __be16 length; 955 __be16 length;
802 u8 ioa_port; 956 u8 ioa_port;
@@ -818,6 +972,20 @@ struct ipr_hostrcb_fabric_desc {
818 struct ipr_hostrcb_config_element elem[1]; 972 struct ipr_hostrcb_config_element elem[1];
819}__attribute__((packed, aligned (4))); 973}__attribute__((packed, aligned (4)));
820 974
975struct ipr_hostrcb64_fabric_desc {
976 __be16 length;
977 u8 descriptor_id;
978
979 u8 reserved;
980 u8 path_state;
981
982 u8 reserved2[2];
983 u8 res_path[8];
984 u8 reserved3[6];
985 __be16 num_entries;
986 struct ipr_hostrcb64_config_element elem[1];
987}__attribute__((packed, aligned (8)));
988
821#define for_each_fabric_cfg(fabric, cfg) \ 989#define for_each_fabric_cfg(fabric, cfg) \
822 for (cfg = (fabric)->elem; \ 990 for (cfg = (fabric)->elem; \
823 cfg < ((fabric)->elem + be16_to_cpu((fabric)->num_entries)); \ 991 cfg < ((fabric)->elem + be16_to_cpu((fabric)->num_entries)); \
@@ -830,10 +998,17 @@ struct ipr_hostrcb_type_20_error {
830 struct ipr_hostrcb_fabric_desc desc[1]; 998 struct ipr_hostrcb_fabric_desc desc[1];
831}__attribute__((packed, aligned (4))); 999}__attribute__((packed, aligned (4)));
832 1000
1001struct ipr_hostrcb_type_30_error {
1002 u8 failure_reason[64];
1003 u8 reserved[3];
1004 u8 num_entries;
1005 struct ipr_hostrcb64_fabric_desc desc[1];
1006}__attribute__((packed, aligned (4)));
1007
833struct ipr_hostrcb_error { 1008struct ipr_hostrcb_error {
834 __be32 failing_dev_ioasc; 1009 __be32 fd_ioasc;
835 struct ipr_res_addr failing_dev_res_addr; 1010 struct ipr_res_addr fd_res_addr;
836 __be32 failing_dev_res_handle; 1011 __be32 fd_res_handle;
837 __be32 prc; 1012 __be32 prc;
838 union { 1013 union {
839 struct ipr_hostrcb_type_ff_error type_ff_error; 1014 struct ipr_hostrcb_type_ff_error type_ff_error;
@@ -850,6 +1025,26 @@ struct ipr_hostrcb_error {
850 } u; 1025 } u;
851}__attribute__((packed, aligned (4))); 1026}__attribute__((packed, aligned (4)));
852 1027
1028struct ipr_hostrcb64_error {
1029 __be32 fd_ioasc;
1030 __be32 ioa_fw_level;
1031 __be32 fd_res_handle;
1032 __be32 prc;
1033 __be64 fd_dev_id;
1034 __be64 fd_lun;
1035 u8 fd_res_path[8];
1036 __be64 time_stamp;
1037 u8 reserved[2];
1038 union {
1039 struct ipr_hostrcb_type_ff_error type_ff_error;
1040 struct ipr_hostrcb_type_12_error type_12_error;
1041 struct ipr_hostrcb_type_17_error type_17_error;
1042 struct ipr_hostrcb_type_23_error type_23_error;
1043 struct ipr_hostrcb_type_24_error type_24_error;
1044 struct ipr_hostrcb_type_30_error type_30_error;
1045 } u;
1046}__attribute__((packed, aligned (8)));
1047
853struct ipr_hostrcb_raw { 1048struct ipr_hostrcb_raw {
854 __be32 data[sizeof(struct ipr_hostrcb_error)/sizeof(__be32)]; 1049 __be32 data[sizeof(struct ipr_hostrcb_error)/sizeof(__be32)];
855}__attribute__((packed, aligned (4))); 1050}__attribute__((packed, aligned (4)));
@@ -887,7 +1082,11 @@ struct ipr_hcam {
887#define IPR_HOST_RCB_OVERLAY_ID_16 0x16 1082#define IPR_HOST_RCB_OVERLAY_ID_16 0x16
888#define IPR_HOST_RCB_OVERLAY_ID_17 0x17 1083#define IPR_HOST_RCB_OVERLAY_ID_17 0x17
889#define IPR_HOST_RCB_OVERLAY_ID_20 0x20 1084#define IPR_HOST_RCB_OVERLAY_ID_20 0x20
890#define IPR_HOST_RCB_OVERLAY_ID_DEFAULT 0xFF 1085#define IPR_HOST_RCB_OVERLAY_ID_23 0x23
1086#define IPR_HOST_RCB_OVERLAY_ID_24 0x24
1087#define IPR_HOST_RCB_OVERLAY_ID_26 0x26
1088#define IPR_HOST_RCB_OVERLAY_ID_30 0x30
1089#define IPR_HOST_RCB_OVERLAY_ID_DEFAULT 0xFF
891 1090
892 u8 reserved1[3]; 1091 u8 reserved1[3];
893 __be32 ilid; 1092 __be32 ilid;
@@ -897,6 +1096,7 @@ struct ipr_hcam {
897 1096
898 union { 1097 union {
899 struct ipr_hostrcb_error error; 1098 struct ipr_hostrcb_error error;
1099 struct ipr_hostrcb64_error error64;
900 struct ipr_hostrcb_cfg_ch_not ccn; 1100 struct ipr_hostrcb_cfg_ch_not ccn;
901 struct ipr_hostrcb_raw raw; 1101 struct ipr_hostrcb_raw raw;
902 } u; 1102 } u;
@@ -907,14 +1107,14 @@ struct ipr_hostrcb {
907 dma_addr_t hostrcb_dma; 1107 dma_addr_t hostrcb_dma;
908 struct list_head queue; 1108 struct list_head queue;
909 struct ipr_ioa_cfg *ioa_cfg; 1109 struct ipr_ioa_cfg *ioa_cfg;
1110 char rp_buffer[IPR_MAX_RES_PATH_LENGTH];
910}; 1111};
911 1112
912/* IPR smart dump table structures */ 1113/* IPR smart dump table structures */
913struct ipr_sdt_entry { 1114struct ipr_sdt_entry {
914 __be32 bar_str_offset; 1115 __be32 start_token;
915 __be32 end_offset; 1116 __be32 end_token;
916 u8 entry_byte; 1117 u8 reserved[4];
917 u8 reserved[3];
918 1118
919 u8 flags; 1119 u8 flags;
920#define IPR_SDT_ENDIAN 0x80 1120#define IPR_SDT_ENDIAN 0x80
@@ -960,28 +1160,48 @@ struct ipr_sata_port {
960}; 1160};
961 1161
962struct ipr_resource_entry { 1162struct ipr_resource_entry {
963 struct ipr_config_table_entry cfgte;
964 u8 needs_sync_complete:1; 1163 u8 needs_sync_complete:1;
965 u8 in_erp:1; 1164 u8 in_erp:1;
966 u8 add_to_ml:1; 1165 u8 add_to_ml:1;
967 u8 del_from_ml:1; 1166 u8 del_from_ml:1;
968 u8 resetting_device:1; 1167 u8 resetting_device:1;
969 1168
1169 u32 bus; /* AKA channel */
1170 u32 target; /* AKA id */
1171 u32 lun;
1172#define IPR_ARRAY_VIRTUAL_BUS 0x1
1173#define IPR_VSET_VIRTUAL_BUS 0x2
1174#define IPR_IOAFP_VIRTUAL_BUS 0x3
1175
1176#define IPR_GET_RES_PHYS_LOC(res) \
1177 (((res)->bus << 24) | ((res)->target << 8) | (res)->lun)
1178
1179 u8 ata_class;
1180
1181 u8 flags;
1182 __be16 res_flags;
1183
1184 __be32 type;
1185
1186 u8 qmodel;
1187 struct ipr_std_inq_data std_inq_data;
1188
1189 __be32 res_handle;
1190 __be64 dev_id;
1191 struct scsi_lun dev_lun;
1192 u8 res_path[8];
1193
1194 struct ipr_ioa_cfg *ioa_cfg;
970 struct scsi_device *sdev; 1195 struct scsi_device *sdev;
971 struct ipr_sata_port *sata_port; 1196 struct ipr_sata_port *sata_port;
972 struct list_head queue; 1197 struct list_head queue;
973}; 1198}; /* struct ipr_resource_entry */
974 1199
975struct ipr_resource_hdr { 1200struct ipr_resource_hdr {
976 u16 num_entries; 1201 u16 num_entries;
977 u16 reserved; 1202 u16 reserved;
978}; 1203};
979 1204
980struct ipr_resource_table {
981 struct ipr_resource_hdr hdr;
982 struct ipr_resource_entry dev[IPR_MAX_PHYSICAL_DEVS];
983};
984
985struct ipr_misc_cbs { 1205struct ipr_misc_cbs {
986 struct ipr_ioa_vpd ioa_vpd; 1206 struct ipr_ioa_vpd ioa_vpd;
987 struct ipr_inquiry_page0 page0_data; 1207 struct ipr_inquiry_page0 page0_data;
@@ -994,27 +1214,51 @@ struct ipr_misc_cbs {
994struct ipr_interrupt_offsets { 1214struct ipr_interrupt_offsets {
995 unsigned long set_interrupt_mask_reg; 1215 unsigned long set_interrupt_mask_reg;
996 unsigned long clr_interrupt_mask_reg; 1216 unsigned long clr_interrupt_mask_reg;
1217 unsigned long clr_interrupt_mask_reg32;
997 unsigned long sense_interrupt_mask_reg; 1218 unsigned long sense_interrupt_mask_reg;
1219 unsigned long sense_interrupt_mask_reg32;
998 unsigned long clr_interrupt_reg; 1220 unsigned long clr_interrupt_reg;
1221 unsigned long clr_interrupt_reg32;
999 1222
1000 unsigned long sense_interrupt_reg; 1223 unsigned long sense_interrupt_reg;
1224 unsigned long sense_interrupt_reg32;
1001 unsigned long ioarrin_reg; 1225 unsigned long ioarrin_reg;
1002 unsigned long sense_uproc_interrupt_reg; 1226 unsigned long sense_uproc_interrupt_reg;
1227 unsigned long sense_uproc_interrupt_reg32;
1003 unsigned long set_uproc_interrupt_reg; 1228 unsigned long set_uproc_interrupt_reg;
1229 unsigned long set_uproc_interrupt_reg32;
1004 unsigned long clr_uproc_interrupt_reg; 1230 unsigned long clr_uproc_interrupt_reg;
1231 unsigned long clr_uproc_interrupt_reg32;
1232
1233 unsigned long init_feedback_reg;
1234
1235 unsigned long dump_addr_reg;
1236 unsigned long dump_data_reg;
1005}; 1237};
1006 1238
1007struct ipr_interrupts { 1239struct ipr_interrupts {
1008 void __iomem *set_interrupt_mask_reg; 1240 void __iomem *set_interrupt_mask_reg;
1009 void __iomem *clr_interrupt_mask_reg; 1241 void __iomem *clr_interrupt_mask_reg;
1242 void __iomem *clr_interrupt_mask_reg32;
1010 void __iomem *sense_interrupt_mask_reg; 1243 void __iomem *sense_interrupt_mask_reg;
1244 void __iomem *sense_interrupt_mask_reg32;
1011 void __iomem *clr_interrupt_reg; 1245 void __iomem *clr_interrupt_reg;
1246 void __iomem *clr_interrupt_reg32;
1012 1247
1013 void __iomem *sense_interrupt_reg; 1248 void __iomem *sense_interrupt_reg;
1249 void __iomem *sense_interrupt_reg32;
1014 void __iomem *ioarrin_reg; 1250 void __iomem *ioarrin_reg;
1015 void __iomem *sense_uproc_interrupt_reg; 1251 void __iomem *sense_uproc_interrupt_reg;
1252 void __iomem *sense_uproc_interrupt_reg32;
1016 void __iomem *set_uproc_interrupt_reg; 1253 void __iomem *set_uproc_interrupt_reg;
1254 void __iomem *set_uproc_interrupt_reg32;
1017 void __iomem *clr_uproc_interrupt_reg; 1255 void __iomem *clr_uproc_interrupt_reg;
1256 void __iomem *clr_uproc_interrupt_reg32;
1257
1258 void __iomem *init_feedback_reg;
1259
1260 void __iomem *dump_addr_reg;
1261 void __iomem *dump_data_reg;
1018}; 1262};
1019 1263
1020struct ipr_chip_cfg_t { 1264struct ipr_chip_cfg_t {
@@ -1029,6 +1273,9 @@ struct ipr_chip_t {
1029 u16 intr_type; 1273 u16 intr_type;
1030#define IPR_USE_LSI 0x00 1274#define IPR_USE_LSI 0x00
1031#define IPR_USE_MSI 0x01 1275#define IPR_USE_MSI 0x01
1276 u16 sis_type;
1277#define IPR_SIS32 0x00
1278#define IPR_SIS64 0x01
1032 const struct ipr_chip_cfg_t *cfg; 1279 const struct ipr_chip_cfg_t *cfg;
1033}; 1280};
1034 1281
@@ -1073,13 +1320,6 @@ enum ipr_sdt_state {
1073 DUMP_OBTAINED 1320 DUMP_OBTAINED
1074}; 1321};
1075 1322
1076enum ipr_cache_state {
1077 CACHE_NONE,
1078 CACHE_DISABLED,
1079 CACHE_ENABLED,
1080 CACHE_INVALID
1081};
1082
1083/* Per-controller data */ 1323/* Per-controller data */
1084struct ipr_ioa_cfg { 1324struct ipr_ioa_cfg {
1085 char eye_catcher[8]; 1325 char eye_catcher[8];
@@ -1099,10 +1339,17 @@ struct ipr_ioa_cfg {
1099 u8 dual_raid:1; 1339 u8 dual_raid:1;
1100 u8 needs_warm_reset:1; 1340 u8 needs_warm_reset:1;
1101 u8 msi_received:1; 1341 u8 msi_received:1;
1342 u8 sis64:1;
1102 1343
1103 u8 revid; 1344 u8 revid;
1104 1345
1105 enum ipr_cache_state cache_state; 1346 /*
1347 * Bitmaps for SIS64 generated target values
1348 */
1349 unsigned long *target_ids;
1350 unsigned long *array_ids;
1351 unsigned long *vset_ids;
1352
1106 u16 type; /* CCIN of the card */ 1353 u16 type; /* CCIN of the card */
1107 1354
1108 u8 log_level; 1355 u8 log_level;
@@ -1133,8 +1380,13 @@ struct ipr_ioa_cfg {
1133 1380
1134 char cfg_table_start[8]; 1381 char cfg_table_start[8];
1135#define IPR_CFG_TBL_START "cfg" 1382#define IPR_CFG_TBL_START "cfg"
1136 struct ipr_config_table *cfg_table; 1383 union {
1384 struct ipr_config_table *cfg_table;
1385 struct ipr_config_table64 *cfg_table64;
1386 } u;
1137 dma_addr_t cfg_table_dma; 1387 dma_addr_t cfg_table_dma;
1388 u32 cfg_table_size;
1389 u32 max_devs_supported;
1138 1390
1139 char resource_table_label[8]; 1391 char resource_table_label[8];
1140#define IPR_RES_TABLE_LABEL "res_tbl" 1392#define IPR_RES_TABLE_LABEL "res_tbl"
@@ -1202,13 +1454,17 @@ struct ipr_ioa_cfg {
1202 char ipr_cmd_label[8]; 1454 char ipr_cmd_label[8];
1203#define IPR_CMD_LABEL "ipr_cmd" 1455#define IPR_CMD_LABEL "ipr_cmd"
1204 struct ipr_cmnd *ipr_cmnd_list[IPR_NUM_CMD_BLKS]; 1456 struct ipr_cmnd *ipr_cmnd_list[IPR_NUM_CMD_BLKS];
1205 u32 ipr_cmnd_list_dma[IPR_NUM_CMD_BLKS]; 1457 dma_addr_t ipr_cmnd_list_dma[IPR_NUM_CMD_BLKS];
1206}; 1458}; /* struct ipr_ioa_cfg */
1207 1459
1208struct ipr_cmnd { 1460struct ipr_cmnd {
1209 struct ipr_ioarcb ioarcb; 1461 struct ipr_ioarcb ioarcb;
1462 union {
1463 struct ipr_ioadl_desc ioadl[IPR_NUM_IOADL_ENTRIES];
1464 struct ipr_ioadl64_desc ioadl64[IPR_NUM_IOADL_ENTRIES];
1465 struct ipr_ata64_ioadl ata_ioadl;
1466 } i;
1210 struct ipr_ioasa ioasa; 1467 struct ipr_ioasa ioasa;
1211 struct ipr_ioadl_desc ioadl[IPR_NUM_IOADL_ENTRIES];
1212 struct list_head queue; 1468 struct list_head queue;
1213 struct scsi_cmnd *scsi_cmd; 1469 struct scsi_cmnd *scsi_cmd;
1214 struct ata_queued_cmd *qc; 1470 struct ata_queued_cmd *qc;
@@ -1221,7 +1477,7 @@ struct ipr_cmnd {
1221 u8 sense_buffer[SCSI_SENSE_BUFFERSIZE]; 1477 u8 sense_buffer[SCSI_SENSE_BUFFERSIZE];
1222 dma_addr_t sense_buffer_dma; 1478 dma_addr_t sense_buffer_dma;
1223 unsigned short dma_use_sg; 1479 unsigned short dma_use_sg;
1224 dma_addr_t dma_handle; 1480 dma_addr_t dma_addr;
1225 struct ipr_cmnd *sibling; 1481 struct ipr_cmnd *sibling;
1226 union { 1482 union {
1227 enum ipr_shutdown_type shutdown_type; 1483 enum ipr_shutdown_type shutdown_type;
@@ -1314,8 +1570,6 @@ struct ipr_ioa_dump {
1314 u32 next_page_index; 1570 u32 next_page_index;
1315 u32 page_offset; 1571 u32 page_offset;
1316 u32 format; 1572 u32 format;
1317#define IPR_SDT_FMT2 2
1318#define IPR_SDT_UNKNOWN 3
1319}__attribute__((packed, aligned (4))); 1573}__attribute__((packed, aligned (4)));
1320 1574
1321struct ipr_dump { 1575struct ipr_dump {
@@ -1377,6 +1631,13 @@ struct ipr_ucode_image_header {
1377#define ipr_info(...) printk(KERN_INFO IPR_NAME ": "__VA_ARGS__) 1631#define ipr_info(...) printk(KERN_INFO IPR_NAME ": "__VA_ARGS__)
1378#define ipr_dbg(...) IPR_DBG_CMD(printk(KERN_INFO IPR_NAME ": "__VA_ARGS__)) 1632#define ipr_dbg(...) IPR_DBG_CMD(printk(KERN_INFO IPR_NAME ": "__VA_ARGS__))
1379 1633
1634#define ipr_res_printk(level, ioa_cfg, bus, target, lun, fmt, ...) \
1635 printk(level IPR_NAME ": %d:%d:%d:%d: " fmt, (ioa_cfg)->host->host_no, \
1636 bus, target, lun, ##__VA_ARGS__)
1637
1638#define ipr_res_err(ioa_cfg, res, fmt, ...) \
1639 ipr_res_printk(KERN_ERR, ioa_cfg, (res)->bus, (res)->target, (res)->lun, fmt, ##__VA_ARGS__)
1640
1380#define ipr_ra_printk(level, ioa_cfg, ra, fmt, ...) \ 1641#define ipr_ra_printk(level, ioa_cfg, ra, fmt, ...) \
1381 printk(level IPR_NAME ": %d:%d:%d:%d: " fmt, (ioa_cfg)->host->host_no, \ 1642 printk(level IPR_NAME ": %d:%d:%d:%d: " fmt, (ioa_cfg)->host->host_no, \
1382 (ra).bus, (ra).target, (ra).lun, ##__VA_ARGS__) 1643 (ra).bus, (ra).target, (ra).lun, ##__VA_ARGS__)
@@ -1384,9 +1645,6 @@ struct ipr_ucode_image_header {
1384#define ipr_ra_err(ioa_cfg, ra, fmt, ...) \ 1645#define ipr_ra_err(ioa_cfg, ra, fmt, ...) \
1385 ipr_ra_printk(KERN_ERR, ioa_cfg, ra, fmt, ##__VA_ARGS__) 1646 ipr_ra_printk(KERN_ERR, ioa_cfg, ra, fmt, ##__VA_ARGS__)
1386 1647
1387#define ipr_res_err(ioa_cfg, res, fmt, ...) \
1388 ipr_ra_err(ioa_cfg, (res)->cfgte.res_addr, fmt, ##__VA_ARGS__)
1389
1390#define ipr_phys_res_err(ioa_cfg, res, fmt, ...) \ 1648#define ipr_phys_res_err(ioa_cfg, res, fmt, ...) \
1391{ \ 1649{ \
1392 if ((res).bus >= IPR_MAX_NUM_BUSES) { \ 1650 if ((res).bus >= IPR_MAX_NUM_BUSES) { \
@@ -1399,14 +1657,21 @@ struct ipr_ucode_image_header {
1399} 1657}
1400 1658
1401#define ipr_hcam_err(hostrcb, fmt, ...) \ 1659#define ipr_hcam_err(hostrcb, fmt, ...) \
1402{ \ 1660{ \
1403 if (ipr_is_device(&(hostrcb)->hcam.u.error.failing_dev_res_addr)) { \ 1661 if (ipr_is_device(hostrcb)) { \
1404 ipr_ra_err((hostrcb)->ioa_cfg, \ 1662 if ((hostrcb)->ioa_cfg->sis64) { \
1405 (hostrcb)->hcam.u.error.failing_dev_res_addr, \ 1663 printk(KERN_ERR IPR_NAME ": %s: " fmt, \
1406 fmt, ##__VA_ARGS__); \ 1664 ipr_format_resource_path(&hostrcb->hcam.u.error64.fd_res_path[0], \
1407 } else { \ 1665 &hostrcb->rp_buffer[0]), \
1408 dev_err(&(hostrcb)->ioa_cfg->pdev->dev, fmt, ##__VA_ARGS__); \ 1666 __VA_ARGS__); \
1409 } \ 1667 } else { \
1668 ipr_ra_err((hostrcb)->ioa_cfg, \
1669 (hostrcb)->hcam.u.error.fd_res_addr, \
1670 fmt, __VA_ARGS__); \
1671 } \
1672 } else { \
1673 dev_err(&(hostrcb)->ioa_cfg->pdev->dev, fmt, __VA_ARGS__); \
1674 } \
1410} 1675}
1411 1676
1412#define ipr_trace ipr_dbg("%s: %s: Line: %d\n",\ 1677#define ipr_trace ipr_dbg("%s: %s: Line: %d\n",\
@@ -1432,7 +1697,7 @@ ipr_err("----------------------------------------------------------\n")
1432 **/ 1697 **/
1433static inline int ipr_is_ioa_resource(struct ipr_resource_entry *res) 1698static inline int ipr_is_ioa_resource(struct ipr_resource_entry *res)
1434{ 1699{
1435 return (res->cfgte.flags & IPR_IS_IOA_RESOURCE) ? 1 : 0; 1700 return res->type == IPR_RES_TYPE_IOAFP;
1436} 1701}
1437 1702
1438/** 1703/**
@@ -1444,12 +1709,8 @@ static inline int ipr_is_ioa_resource(struct ipr_resource_entry *res)
1444 **/ 1709 **/
1445static inline int ipr_is_af_dasd_device(struct ipr_resource_entry *res) 1710static inline int ipr_is_af_dasd_device(struct ipr_resource_entry *res)
1446{ 1711{
1447 if (IPR_IS_DASD_DEVICE(res->cfgte.std_inq_data) && 1712 return res->type == IPR_RES_TYPE_AF_DASD ||
1448 !ipr_is_ioa_resource(res) && 1713 res->type == IPR_RES_TYPE_REMOTE_AF_DASD;
1449 IPR_RES_SUBTYPE(res) == IPR_SUBTYPE_AF_DASD)
1450 return 1;
1451 else
1452 return 0;
1453} 1714}
1454 1715
1455/** 1716/**
@@ -1461,12 +1722,7 @@ static inline int ipr_is_af_dasd_device(struct ipr_resource_entry *res)
1461 **/ 1722 **/
1462static inline int ipr_is_vset_device(struct ipr_resource_entry *res) 1723static inline int ipr_is_vset_device(struct ipr_resource_entry *res)
1463{ 1724{
1464 if (IPR_IS_DASD_DEVICE(res->cfgte.std_inq_data) && 1725 return res->type == IPR_RES_TYPE_VOLUME_SET;
1465 !ipr_is_ioa_resource(res) &&
1466 IPR_RES_SUBTYPE(res) == IPR_SUBTYPE_VOLUME_SET)
1467 return 1;
1468 else
1469 return 0;
1470} 1726}
1471 1727
1472/** 1728/**
@@ -1478,11 +1734,7 @@ static inline int ipr_is_vset_device(struct ipr_resource_entry *res)
1478 **/ 1734 **/
1479static inline int ipr_is_gscsi(struct ipr_resource_entry *res) 1735static inline int ipr_is_gscsi(struct ipr_resource_entry *res)
1480{ 1736{
1481 if (!ipr_is_ioa_resource(res) && 1737 return res->type == IPR_RES_TYPE_GENERIC_SCSI;
1482 IPR_RES_SUBTYPE(res) == IPR_SUBTYPE_GENERIC_SCSI)
1483 return 1;
1484 else
1485 return 0;
1486} 1738}
1487 1739
1488/** 1740/**
@@ -1495,7 +1747,7 @@ static inline int ipr_is_gscsi(struct ipr_resource_entry *res)
1495static inline int ipr_is_scsi_disk(struct ipr_resource_entry *res) 1747static inline int ipr_is_scsi_disk(struct ipr_resource_entry *res)
1496{ 1748{
1497 if (ipr_is_af_dasd_device(res) || 1749 if (ipr_is_af_dasd_device(res) ||
1498 (ipr_is_gscsi(res) && IPR_IS_DASD_DEVICE(res->cfgte.std_inq_data))) 1750 (ipr_is_gscsi(res) && IPR_IS_DASD_DEVICE(res->std_inq_data)))
1499 return 1; 1751 return 1;
1500 else 1752 else
1501 return 0; 1753 return 0;
@@ -1510,11 +1762,7 @@ static inline int ipr_is_scsi_disk(struct ipr_resource_entry *res)
1510 **/ 1762 **/
1511static inline int ipr_is_gata(struct ipr_resource_entry *res) 1763static inline int ipr_is_gata(struct ipr_resource_entry *res)
1512{ 1764{
1513 if (!ipr_is_ioa_resource(res) && 1765 return res->type == IPR_RES_TYPE_GENERIC_ATA;
1514 IPR_RES_SUBTYPE(res) == IPR_SUBTYPE_GENERIC_ATA)
1515 return 1;
1516 else
1517 return 0;
1518} 1766}
1519 1767
1520/** 1768/**
@@ -1526,24 +1774,35 @@ static inline int ipr_is_gata(struct ipr_resource_entry *res)
1526 **/ 1774 **/
1527static inline int ipr_is_naca_model(struct ipr_resource_entry *res) 1775static inline int ipr_is_naca_model(struct ipr_resource_entry *res)
1528{ 1776{
1529 if (ipr_is_gscsi(res) && IPR_QUEUEING_MODEL(res) == IPR_QUEUE_NACA_MODEL) 1777 if (ipr_is_gscsi(res) && res->qmodel == IPR_QUEUE_NACA_MODEL)
1530 return 1; 1778 return 1;
1531 return 0; 1779 return 0;
1532} 1780}
1533 1781
1534/** 1782/**
1535 * ipr_is_device - Determine if resource address is that of a device 1783 * ipr_is_device - Determine if the hostrcb structure is related to a device
1536 * @res_addr: resource address struct 1784 * @hostrcb: host resource control blocks struct
1537 * 1785 *
1538 * Return value: 1786 * Return value:
1539 * 1 if AF / 0 if not AF 1787 * 1 if AF / 0 if not AF
1540 **/ 1788 **/
1541static inline int ipr_is_device(struct ipr_res_addr *res_addr) 1789static inline int ipr_is_device(struct ipr_hostrcb *hostrcb)
1542{ 1790{
1543 if ((res_addr->bus < IPR_MAX_NUM_BUSES) && 1791 struct ipr_res_addr *res_addr;
1544 (res_addr->target < (IPR_MAX_NUM_TARGETS_PER_BUS - 1))) 1792 u8 *res_path;
1545 return 1; 1793
1546 1794 if (hostrcb->ioa_cfg->sis64) {
1795 res_path = &hostrcb->hcam.u.error64.fd_res_path[0];
1796 if ((res_path[0] == 0x00 || res_path[0] == 0x80 ||
1797 res_path[0] == 0x81) && res_path[2] != 0xFF)
1798 return 1;
1799 } else {
1800 res_addr = &hostrcb->hcam.u.error.fd_res_addr;
1801
1802 if ((res_addr->bus < IPR_MAX_NUM_BUSES) &&
1803 (res_addr->target < (IPR_MAX_NUM_TARGETS_PER_BUS - 1)))
1804 return 1;
1805 }
1547 return 0; 1806 return 0;
1548} 1807}
1549 1808
diff --git a/drivers/scsi/iscsi_tcp.c b/drivers/scsi/iscsi_tcp.c
index 8a89ba900588..249053a9d4fa 100644
--- a/drivers/scsi/iscsi_tcp.c
+++ b/drivers/scsi/iscsi_tcp.c
@@ -874,7 +874,7 @@ static struct scsi_host_template iscsi_sw_tcp_sht = {
874 .cmd_per_lun = ISCSI_DEF_CMD_PER_LUN, 874 .cmd_per_lun = ISCSI_DEF_CMD_PER_LUN,
875 .eh_abort_handler = iscsi_eh_abort, 875 .eh_abort_handler = iscsi_eh_abort,
876 .eh_device_reset_handler= iscsi_eh_device_reset, 876 .eh_device_reset_handler= iscsi_eh_device_reset,
877 .eh_target_reset_handler= iscsi_eh_target_reset, 877 .eh_target_reset_handler = iscsi_eh_recover_target,
878 .use_clustering = DISABLE_CLUSTERING, 878 .use_clustering = DISABLE_CLUSTERING,
879 .slave_alloc = iscsi_sw_tcp_slave_alloc, 879 .slave_alloc = iscsi_sw_tcp_slave_alloc,
880 .slave_configure = iscsi_sw_tcp_slave_configure, 880 .slave_configure = iscsi_sw_tcp_slave_configure,
diff --git a/drivers/scsi/libiscsi.c b/drivers/scsi/libiscsi.c
index 703eb6a88790..685eaec53218 100644
--- a/drivers/scsi/libiscsi.c
+++ b/drivers/scsi/libiscsi.c
@@ -2338,7 +2338,7 @@ EXPORT_SYMBOL_GPL(iscsi_session_recovery_timedout);
2338 * This function will wait for a relogin, session termination from 2338 * This function will wait for a relogin, session termination from
2339 * userspace, or a recovery/replacement timeout. 2339 * userspace, or a recovery/replacement timeout.
2340 */ 2340 */
2341static int iscsi_eh_session_reset(struct scsi_cmnd *sc) 2341int iscsi_eh_session_reset(struct scsi_cmnd *sc)
2342{ 2342{
2343 struct iscsi_cls_session *cls_session; 2343 struct iscsi_cls_session *cls_session;
2344 struct iscsi_session *session; 2344 struct iscsi_session *session;
@@ -2389,6 +2389,7 @@ failed:
2389 mutex_unlock(&session->eh_mutex); 2389 mutex_unlock(&session->eh_mutex);
2390 return SUCCESS; 2390 return SUCCESS;
2391} 2391}
2392EXPORT_SYMBOL_GPL(iscsi_eh_session_reset);
2392 2393
2393static void iscsi_prep_tgt_reset_pdu(struct scsi_cmnd *sc, struct iscsi_tm *hdr) 2394static void iscsi_prep_tgt_reset_pdu(struct scsi_cmnd *sc, struct iscsi_tm *hdr)
2394{ 2395{
@@ -2403,8 +2404,7 @@ static void iscsi_prep_tgt_reset_pdu(struct scsi_cmnd *sc, struct iscsi_tm *hdr)
2403 * iscsi_eh_target_reset - reset target 2404 * iscsi_eh_target_reset - reset target
2404 * @sc: scsi command 2405 * @sc: scsi command
2405 * 2406 *
2406 * This will attempt to send a warm target reset. If that fails 2407 * This will attempt to send a warm target reset.
2407 * then we will drop the session and attempt ERL0 recovery.
2408 */ 2408 */
2409int iscsi_eh_target_reset(struct scsi_cmnd *sc) 2409int iscsi_eh_target_reset(struct scsi_cmnd *sc)
2410{ 2410{
@@ -2476,12 +2476,27 @@ done:
2476 ISCSI_DBG_EH(session, "tgt %s reset result = %s\n", session->targetname, 2476 ISCSI_DBG_EH(session, "tgt %s reset result = %s\n", session->targetname,
2477 rc == SUCCESS ? "SUCCESS" : "FAILED"); 2477 rc == SUCCESS ? "SUCCESS" : "FAILED");
2478 mutex_unlock(&session->eh_mutex); 2478 mutex_unlock(&session->eh_mutex);
2479 return rc;
2480}
2481EXPORT_SYMBOL_GPL(iscsi_eh_target_reset);
2479 2482
2483/**
2484 * iscsi_eh_recover_target - reset target and possibly the session
2485 * @sc: scsi command
2486 *
2487 * This will attempt to send a warm target reset. If that fails,
2488 * we will escalate to ERL0 session recovery.
2489 */
2490int iscsi_eh_recover_target(struct scsi_cmnd *sc)
2491{
2492 int rc;
2493
2494 rc = iscsi_eh_target_reset(sc);
2480 if (rc == FAILED) 2495 if (rc == FAILED)
2481 rc = iscsi_eh_session_reset(sc); 2496 rc = iscsi_eh_session_reset(sc);
2482 return rc; 2497 return rc;
2483} 2498}
2484EXPORT_SYMBOL_GPL(iscsi_eh_target_reset); 2499EXPORT_SYMBOL_GPL(iscsi_eh_recover_target);
2485 2500
2486/* 2501/*
2487 * Pre-allocate a pool of @max items of @item_size. By default, the pool 2502 * Pre-allocate a pool of @max items of @item_size. By default, the pool
diff --git a/drivers/scsi/lpfc/lpfc.h b/drivers/scsi/lpfc/lpfc.h
index 84b696463a58..565e16dd74fc 100644
--- a/drivers/scsi/lpfc/lpfc.h
+++ b/drivers/scsi/lpfc/lpfc.h
@@ -37,6 +37,9 @@ struct lpfc_sli2_slim;
37 the NameServer before giving up. */ 37 the NameServer before giving up. */
38#define LPFC_CMD_PER_LUN 3 /* max outstanding cmds per lun */ 38#define LPFC_CMD_PER_LUN 3 /* max outstanding cmds per lun */
39#define LPFC_DEFAULT_SG_SEG_CNT 64 /* sg element count per scsi cmnd */ 39#define LPFC_DEFAULT_SG_SEG_CNT 64 /* sg element count per scsi cmnd */
40#define LPFC_DEFAULT_MENLO_SG_SEG_CNT 128 /* sg element count per scsi
41 cmnd for menlo needs nearly twice as for firmware
42 downloads using bsg */
40#define LPFC_DEFAULT_PROT_SG_SEG_CNT 4096 /* sg protection elements count */ 43#define LPFC_DEFAULT_PROT_SG_SEG_CNT 4096 /* sg protection elements count */
41#define LPFC_MAX_SG_SEG_CNT 4096 /* sg element count per scsi cmnd */ 44#define LPFC_MAX_SG_SEG_CNT 4096 /* sg element count per scsi cmnd */
42#define LPFC_MAX_PROT_SG_SEG_CNT 4096 /* prot sg element count per scsi cmd*/ 45#define LPFC_MAX_PROT_SG_SEG_CNT 4096 /* prot sg element count per scsi cmd*/
@@ -509,7 +512,6 @@ struct lpfc_hba {
509 int (*lpfc_hba_down_link) 512 int (*lpfc_hba_down_link)
510 (struct lpfc_hba *); 513 (struct lpfc_hba *);
511 514
512
513 /* SLI4 specific HBA data structure */ 515 /* SLI4 specific HBA data structure */
514 struct lpfc_sli4_hba sli4_hba; 516 struct lpfc_sli4_hba sli4_hba;
515 517
@@ -623,6 +625,9 @@ struct lpfc_hba {
623 uint32_t cfg_log_verbose; 625 uint32_t cfg_log_verbose;
624 uint32_t cfg_aer_support; 626 uint32_t cfg_aer_support;
625 uint32_t cfg_suppress_link_up; 627 uint32_t cfg_suppress_link_up;
628#define LPFC_INITIALIZE_LINK 0 /* do normal init_link mbox */
629#define LPFC_DELAY_INIT_LINK 1 /* layered driver hold off */
630#define LPFC_DELAY_INIT_LINK_INDEFINITELY 2 /* wait, manual intervention */
626 631
627 lpfc_vpd_t vpd; /* vital product data */ 632 lpfc_vpd_t vpd; /* vital product data */
628 633
@@ -804,6 +809,9 @@ struct lpfc_hba {
804 struct list_head ct_ev_waiters; 809 struct list_head ct_ev_waiters;
805 struct unsol_rcv_ct_ctx ct_ctx[64]; 810 struct unsol_rcv_ct_ctx ct_ctx[64];
806 uint32_t ctx_idx; 811 uint32_t ctx_idx;
812
813 uint8_t menlo_flag; /* menlo generic flags */
814#define HBA_MENLO_SUPPORT 0x1 /* HBA supports menlo commands */
807}; 815};
808 816
809static inline struct Scsi_Host * 817static inline struct Scsi_Host *
diff --git a/drivers/scsi/lpfc/lpfc_attr.c b/drivers/scsi/lpfc/lpfc_attr.c
index c992e8328f9e..64cd17eedb64 100644
--- a/drivers/scsi/lpfc/lpfc_attr.c
+++ b/drivers/scsi/lpfc/lpfc_attr.c
@@ -1939,7 +1939,9 @@ static DEVICE_ATTR(lpfc_enable_npiv, S_IRUGO,
1939# 0x2 = never bring up link 1939# 0x2 = never bring up link
1940# Default value is 0. 1940# Default value is 0.
1941*/ 1941*/
1942LPFC_ATTR_R(suppress_link_up, 0, 0, 2, "Suppress Link Up at initialization"); 1942LPFC_ATTR_R(suppress_link_up, LPFC_INITIALIZE_LINK, LPFC_INITIALIZE_LINK,
1943 LPFC_DELAY_INIT_LINK_INDEFINITELY,
1944 "Suppress Link Up at initialization");
1943 1945
1944/* 1946/*
1945# lpfc_nodev_tmo: If set, it will hold all I/O errors on devices that disappear 1947# lpfc_nodev_tmo: If set, it will hold all I/O errors on devices that disappear
@@ -1966,8 +1968,7 @@ lpfc_nodev_tmo_show(struct device *dev, struct device_attribute *attr,
1966{ 1968{
1967 struct Scsi_Host *shost = class_to_shost(dev); 1969 struct Scsi_Host *shost = class_to_shost(dev);
1968 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; 1970 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
1969 int val = 0; 1971
1970 val = vport->cfg_devloss_tmo;
1971 return snprintf(buf, PAGE_SIZE, "%d\n", vport->cfg_devloss_tmo); 1972 return snprintf(buf, PAGE_SIZE, "%d\n", vport->cfg_devloss_tmo);
1972} 1973}
1973 1974
diff --git a/drivers/scsi/lpfc/lpfc_bsg.c b/drivers/scsi/lpfc/lpfc_bsg.c
index f3f1bf1a0a71..692c29f6048e 100644
--- a/drivers/scsi/lpfc/lpfc_bsg.c
+++ b/drivers/scsi/lpfc/lpfc_bsg.c
@@ -83,15 +83,28 @@ struct lpfc_bsg_mbox {
83 struct fc_bsg_job *set_job; 83 struct fc_bsg_job *set_job;
84}; 84};
85 85
86#define MENLO_DID 0x0000FC0E
87
88struct lpfc_bsg_menlo {
89 struct lpfc_iocbq *cmdiocbq;
90 struct lpfc_iocbq *rspiocbq;
91 struct lpfc_dmabuf *bmp;
92
93 /* job waiting for this iocb to finish */
94 struct fc_bsg_job *set_job;
95};
96
86#define TYPE_EVT 1 97#define TYPE_EVT 1
87#define TYPE_IOCB 2 98#define TYPE_IOCB 2
88#define TYPE_MBOX 3 99#define TYPE_MBOX 3
100#define TYPE_MENLO 4
89struct bsg_job_data { 101struct bsg_job_data {
90 uint32_t type; 102 uint32_t type;
91 union { 103 union {
92 struct lpfc_bsg_event *evt; 104 struct lpfc_bsg_event *evt;
93 struct lpfc_bsg_iocb iocb; 105 struct lpfc_bsg_iocb iocb;
94 struct lpfc_bsg_mbox mbox; 106 struct lpfc_bsg_mbox mbox;
107 struct lpfc_bsg_menlo menlo;
95 } context_un; 108 } context_un;
96}; 109};
97 110
@@ -2456,6 +2469,18 @@ static int lpfc_bsg_check_cmd_access(struct lpfc_hba *phba,
2456 case MBX_PORT_IOV_CONTROL: 2469 case MBX_PORT_IOV_CONTROL:
2457 break; 2470 break;
2458 case MBX_SET_VARIABLE: 2471 case MBX_SET_VARIABLE:
2472 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
2473 "1226 mbox: set_variable 0x%x, 0x%x\n",
2474 mb->un.varWords[0],
2475 mb->un.varWords[1]);
2476 if ((mb->un.varWords[0] == SETVAR_MLOMNT)
2477 && (mb->un.varWords[1] == 1)) {
2478 phba->wait_4_mlo_maint_flg = 1;
2479 } else if (mb->un.varWords[0] == SETVAR_MLORST) {
2480 phba->link_flag &= ~LS_LOOPBACK_MODE;
2481 phba->fc_topology = TOPOLOGY_PT_PT;
2482 }
2483 break;
2459 case MBX_RUN_BIU_DIAG64: 2484 case MBX_RUN_BIU_DIAG64:
2460 case MBX_READ_EVENT_LOG: 2485 case MBX_READ_EVENT_LOG:
2461 case MBX_READ_SPARM64: 2486 case MBX_READ_SPARM64:
@@ -2638,6 +2663,297 @@ job_error:
2638} 2663}
2639 2664
2640/** 2665/**
2666 * lpfc_bsg_menlo_cmd_cmp - lpfc_menlo_cmd completion handler
2667 * @phba: Pointer to HBA context object.
2668 * @cmdiocbq: Pointer to command iocb.
2669 * @rspiocbq: Pointer to response iocb.
2670 *
2671 * This function is the completion handler for iocbs issued using
2672 * lpfc_menlo_cmd function. This function is called by the
2673 * ring event handler function without any lock held. This function
2674 * can be called from both worker thread context and interrupt
2675 * context. This function also can be called from another thread which
2676 * cleans up the SLI layer objects.
2677 * This function copies the contents of the response iocb to the
2678 * response iocb memory object provided by the caller of
2679 * lpfc_sli_issue_iocb_wait and then wakes up the thread which
2680 * sleeps for the iocb completion.
2681 **/
2682static void
2683lpfc_bsg_menlo_cmd_cmp(struct lpfc_hba *phba,
2684 struct lpfc_iocbq *cmdiocbq,
2685 struct lpfc_iocbq *rspiocbq)
2686{
2687 struct bsg_job_data *dd_data;
2688 struct fc_bsg_job *job;
2689 IOCB_t *rsp;
2690 struct lpfc_dmabuf *bmp;
2691 struct lpfc_bsg_menlo *menlo;
2692 unsigned long flags;
2693 struct menlo_response *menlo_resp;
2694 int rc = 0;
2695
2696 spin_lock_irqsave(&phba->ct_ev_lock, flags);
2697 dd_data = cmdiocbq->context1;
2698 if (!dd_data) {
2699 spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
2700 return;
2701 }
2702
2703 menlo = &dd_data->context_un.menlo;
2704 job = menlo->set_job;
2705 job->dd_data = NULL; /* so timeout handler does not reply */
2706
2707 spin_lock_irqsave(&phba->hbalock, flags);
2708 cmdiocbq->iocb_flag |= LPFC_IO_WAKE;
2709 if (cmdiocbq->context2 && rspiocbq)
2710 memcpy(&((struct lpfc_iocbq *)cmdiocbq->context2)->iocb,
2711 &rspiocbq->iocb, sizeof(IOCB_t));
2712 spin_unlock_irqrestore(&phba->hbalock, flags);
2713
2714 bmp = menlo->bmp;
2715 rspiocbq = menlo->rspiocbq;
2716 rsp = &rspiocbq->iocb;
2717
2718 pci_unmap_sg(phba->pcidev, job->request_payload.sg_list,
2719 job->request_payload.sg_cnt, DMA_TO_DEVICE);
2720 pci_unmap_sg(phba->pcidev, job->reply_payload.sg_list,
2721 job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
2722
2723 /* always return the xri, this would be used in the case
2724 * of a menlo download to allow the data to be sent as a continuation
2725 * of the exchange.
2726 */
2727 menlo_resp = (struct menlo_response *)
2728 job->reply->reply_data.vendor_reply.vendor_rsp;
2729 menlo_resp->xri = rsp->ulpContext;
2730 if (rsp->ulpStatus) {
2731 if (rsp->ulpStatus == IOSTAT_LOCAL_REJECT) {
2732 switch (rsp->un.ulpWord[4] & 0xff) {
2733 case IOERR_SEQUENCE_TIMEOUT:
2734 rc = -ETIMEDOUT;
2735 break;
2736 case IOERR_INVALID_RPI:
2737 rc = -EFAULT;
2738 break;
2739 default:
2740 rc = -EACCES;
2741 break;
2742 }
2743 } else
2744 rc = -EACCES;
2745 } else
2746 job->reply->reply_payload_rcv_len =
2747 rsp->un.genreq64.bdl.bdeSize;
2748
2749 lpfc_mbuf_free(phba, bmp->virt, bmp->phys);
2750 lpfc_sli_release_iocbq(phba, rspiocbq);
2751 lpfc_sli_release_iocbq(phba, cmdiocbq);
2752 kfree(bmp);
2753 kfree(dd_data);
2754 /* make error code available to userspace */
2755 job->reply->result = rc;
2756 /* complete the job back to userspace */
2757 job->job_done(job);
2758 spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
2759 return;
2760}
2761
2762/**
2763 * lpfc_menlo_cmd - send an ioctl for menlo hardware
2764 * @job: fc_bsg_job to handle
2765 *
2766 * This function issues a gen request 64 CR ioctl for all menlo cmd requests,
2767 * all the command completions will return the xri for the command.
2768 * For menlo data requests a gen request 64 CX is used to continue the exchange
2769 * supplied in the menlo request header xri field.
2770 **/
2771static int
2772lpfc_menlo_cmd(struct fc_bsg_job *job)
2773{
2774 struct lpfc_vport *vport = (struct lpfc_vport *)job->shost->hostdata;
2775 struct lpfc_hba *phba = vport->phba;
2776 struct lpfc_iocbq *cmdiocbq, *rspiocbq;
2777 IOCB_t *cmd, *rsp;
2778 int rc = 0;
2779 struct menlo_command *menlo_cmd;
2780 struct menlo_response *menlo_resp;
2781 struct lpfc_dmabuf *bmp = NULL;
2782 int request_nseg;
2783 int reply_nseg;
2784 struct scatterlist *sgel = NULL;
2785 int numbde;
2786 dma_addr_t busaddr;
2787 struct bsg_job_data *dd_data;
2788 struct ulp_bde64 *bpl = NULL;
2789
2790 /* in case no data is returned return just the return code */
2791 job->reply->reply_payload_rcv_len = 0;
2792
2793 if (job->request_len <
2794 sizeof(struct fc_bsg_request) +
2795 sizeof(struct menlo_command)) {
2796 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
2797 "2784 Received MENLO_CMD request below "
2798 "minimum size\n");
2799 rc = -ERANGE;
2800 goto no_dd_data;
2801 }
2802
2803 if (job->reply_len <
2804 sizeof(struct fc_bsg_request) + sizeof(struct menlo_response)) {
2805 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
2806 "2785 Received MENLO_CMD reply below "
2807 "minimum size\n");
2808 rc = -ERANGE;
2809 goto no_dd_data;
2810 }
2811
2812 if (!(phba->menlo_flag & HBA_MENLO_SUPPORT)) {
2813 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
2814 "2786 Adapter does not support menlo "
2815 "commands\n");
2816 rc = -EPERM;
2817 goto no_dd_data;
2818 }
2819
2820 menlo_cmd = (struct menlo_command *)
2821 job->request->rqst_data.h_vendor.vendor_cmd;
2822
2823 menlo_resp = (struct menlo_response *)
2824 job->reply->reply_data.vendor_reply.vendor_rsp;
2825
2826 /* allocate our bsg tracking structure */
2827 dd_data = kmalloc(sizeof(struct bsg_job_data), GFP_KERNEL);
2828 if (!dd_data) {
2829 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
2830 "2787 Failed allocation of dd_data\n");
2831 rc = -ENOMEM;
2832 goto no_dd_data;
2833 }
2834
2835 bmp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
2836 if (!bmp) {
2837 rc = -ENOMEM;
2838 goto free_dd;
2839 }
2840
2841 cmdiocbq = lpfc_sli_get_iocbq(phba);
2842 if (!cmdiocbq) {
2843 rc = -ENOMEM;
2844 goto free_bmp;
2845 }
2846
2847 rspiocbq = lpfc_sli_get_iocbq(phba);
2848 if (!rspiocbq) {
2849 rc = -ENOMEM;
2850 goto free_cmdiocbq;
2851 }
2852
2853 rsp = &rspiocbq->iocb;
2854
2855 bmp->virt = lpfc_mbuf_alloc(phba, 0, &bmp->phys);
2856 if (!bmp->virt) {
2857 rc = -ENOMEM;
2858 goto free_rspiocbq;
2859 }
2860
2861 INIT_LIST_HEAD(&bmp->list);
2862 bpl = (struct ulp_bde64 *) bmp->virt;
2863 request_nseg = pci_map_sg(phba->pcidev, job->request_payload.sg_list,
2864 job->request_payload.sg_cnt, DMA_TO_DEVICE);
2865 for_each_sg(job->request_payload.sg_list, sgel, request_nseg, numbde) {
2866 busaddr = sg_dma_address(sgel);
2867 bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64;
2868 bpl->tus.f.bdeSize = sg_dma_len(sgel);
2869 bpl->tus.w = cpu_to_le32(bpl->tus.w);
2870 bpl->addrLow = cpu_to_le32(putPaddrLow(busaddr));
2871 bpl->addrHigh = cpu_to_le32(putPaddrHigh(busaddr));
2872 bpl++;
2873 }
2874
2875 reply_nseg = pci_map_sg(phba->pcidev, job->reply_payload.sg_list,
2876 job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
2877 for_each_sg(job->reply_payload.sg_list, sgel, reply_nseg, numbde) {
2878 busaddr = sg_dma_address(sgel);
2879 bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64I;
2880 bpl->tus.f.bdeSize = sg_dma_len(sgel);
2881 bpl->tus.w = cpu_to_le32(bpl->tus.w);
2882 bpl->addrLow = cpu_to_le32(putPaddrLow(busaddr));
2883 bpl->addrHigh = cpu_to_le32(putPaddrHigh(busaddr));
2884 bpl++;
2885 }
2886
2887 cmd = &cmdiocbq->iocb;
2888 cmd->un.genreq64.bdl.ulpIoTag32 = 0;
2889 cmd->un.genreq64.bdl.addrHigh = putPaddrHigh(bmp->phys);
2890 cmd->un.genreq64.bdl.addrLow = putPaddrLow(bmp->phys);
2891 cmd->un.genreq64.bdl.bdeFlags = BUFF_TYPE_BLP_64;
2892 cmd->un.genreq64.bdl.bdeSize =
2893 (request_nseg + reply_nseg) * sizeof(struct ulp_bde64);
2894 cmd->un.genreq64.w5.hcsw.Fctl = (SI | LA);
2895 cmd->un.genreq64.w5.hcsw.Dfctl = 0;
2896 cmd->un.genreq64.w5.hcsw.Rctl = FC_RCTL_DD_UNSOL_CMD;
2897 cmd->un.genreq64.w5.hcsw.Type = MENLO_TRANSPORT_TYPE; /* 0xfe */
2898 cmd->ulpBdeCount = 1;
2899 cmd->ulpClass = CLASS3;
2900 cmd->ulpOwner = OWN_CHIP;
2901 cmd->ulpLe = 1; /* Limited Edition */
2902 cmdiocbq->iocb_flag |= LPFC_IO_LIBDFC;
2903 cmdiocbq->vport = phba->pport;
2904 /* We want the firmware to timeout before we do */
2905 cmd->ulpTimeout = MENLO_TIMEOUT - 5;
2906 cmdiocbq->context3 = bmp;
2907 cmdiocbq->context2 = rspiocbq;
2908 cmdiocbq->iocb_cmpl = lpfc_bsg_menlo_cmd_cmp;
2909 cmdiocbq->context1 = dd_data;
2910 cmdiocbq->context2 = rspiocbq;
2911 if (menlo_cmd->cmd == LPFC_BSG_VENDOR_MENLO_CMD) {
2912 cmd->ulpCommand = CMD_GEN_REQUEST64_CR;
2913 cmd->ulpPU = MENLO_PU; /* 3 */
2914 cmd->un.ulpWord[4] = MENLO_DID; /* 0x0000FC0E */
2915 cmd->ulpContext = MENLO_CONTEXT; /* 0 */
2916 } else {
2917 cmd->ulpCommand = CMD_GEN_REQUEST64_CX;
2918 cmd->ulpPU = 1;
2919 cmd->un.ulpWord[4] = 0;
2920 cmd->ulpContext = menlo_cmd->xri;
2921 }
2922
2923 dd_data->type = TYPE_MENLO;
2924 dd_data->context_un.menlo.cmdiocbq = cmdiocbq;
2925 dd_data->context_un.menlo.rspiocbq = rspiocbq;
2926 dd_data->context_un.menlo.set_job = job;
2927 dd_data->context_un.menlo.bmp = bmp;
2928
2929 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, cmdiocbq,
2930 MENLO_TIMEOUT - 5);
2931 if (rc == IOCB_SUCCESS)
2932 return 0; /* done for now */
2933
2934 /* iocb failed so cleanup */
2935 pci_unmap_sg(phba->pcidev, job->request_payload.sg_list,
2936 job->request_payload.sg_cnt, DMA_TO_DEVICE);
2937 pci_unmap_sg(phba->pcidev, job->reply_payload.sg_list,
2938 job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
2939
2940 lpfc_mbuf_free(phba, bmp->virt, bmp->phys);
2941
2942free_rspiocbq:
2943 lpfc_sli_release_iocbq(phba, rspiocbq);
2944free_cmdiocbq:
2945 lpfc_sli_release_iocbq(phba, cmdiocbq);
2946free_bmp:
2947 kfree(bmp);
2948free_dd:
2949 kfree(dd_data);
2950no_dd_data:
2951 /* make error code available to userspace */
2952 job->reply->result = rc;
2953 job->dd_data = NULL;
2954 return rc;
2955}
2956/**
2641 * lpfc_bsg_hst_vendor - process a vendor-specific fc_bsg_job 2957 * lpfc_bsg_hst_vendor - process a vendor-specific fc_bsg_job
2642 * @job: fc_bsg_job to handle 2958 * @job: fc_bsg_job to handle
2643 **/ 2959 **/
@@ -2669,6 +2985,10 @@ lpfc_bsg_hst_vendor(struct fc_bsg_job *job)
2669 case LPFC_BSG_VENDOR_MBOX: 2985 case LPFC_BSG_VENDOR_MBOX:
2670 rc = lpfc_bsg_mbox_cmd(job); 2986 rc = lpfc_bsg_mbox_cmd(job);
2671 break; 2987 break;
2988 case LPFC_BSG_VENDOR_MENLO_CMD:
2989 case LPFC_BSG_VENDOR_MENLO_DATA:
2990 rc = lpfc_menlo_cmd(job);
2991 break;
2672 default: 2992 default:
2673 rc = -EINVAL; 2993 rc = -EINVAL;
2674 job->reply->reply_payload_rcv_len = 0; 2994 job->reply->reply_payload_rcv_len = 0;
@@ -2728,6 +3048,7 @@ lpfc_bsg_timeout(struct fc_bsg_job *job)
2728 struct lpfc_bsg_event *evt; 3048 struct lpfc_bsg_event *evt;
2729 struct lpfc_bsg_iocb *iocb; 3049 struct lpfc_bsg_iocb *iocb;
2730 struct lpfc_bsg_mbox *mbox; 3050 struct lpfc_bsg_mbox *mbox;
3051 struct lpfc_bsg_menlo *menlo;
2731 struct lpfc_sli_ring *pring = &phba->sli.ring[LPFC_ELS_RING]; 3052 struct lpfc_sli_ring *pring = &phba->sli.ring[LPFC_ELS_RING];
2732 struct bsg_job_data *dd_data; 3053 struct bsg_job_data *dd_data;
2733 unsigned long flags; 3054 unsigned long flags;
@@ -2775,6 +3096,17 @@ lpfc_bsg_timeout(struct fc_bsg_job *job)
2775 spin_unlock_irqrestore(&phba->ct_ev_lock, flags); 3096 spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
2776 job->job_done(job); 3097 job->job_done(job);
2777 break; 3098 break;
3099 case TYPE_MENLO:
3100 menlo = &dd_data->context_un.menlo;
3101 cmdiocb = menlo->cmdiocbq;
3102 /* hint to completion handler that the job timed out */
3103 job->reply->result = -EAGAIN;
3104 spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
3105 /* this will call our completion handler */
3106 spin_lock_irq(&phba->hbalock);
3107 lpfc_sli_issue_abort_iotag(phba, pring, cmdiocb);
3108 spin_unlock_irq(&phba->hbalock);
3109 break;
2778 default: 3110 default:
2779 spin_unlock_irqrestore(&phba->ct_ev_lock, flags); 3111 spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
2780 break; 3112 break;
diff --git a/drivers/scsi/lpfc/lpfc_bsg.h b/drivers/scsi/lpfc/lpfc_bsg.h
index 6c8f87e39b98..5bc630819b9e 100644
--- a/drivers/scsi/lpfc/lpfc_bsg.h
+++ b/drivers/scsi/lpfc/lpfc_bsg.h
@@ -31,6 +31,8 @@
31#define LPFC_BSG_VENDOR_DIAG_TEST 5 31#define LPFC_BSG_VENDOR_DIAG_TEST 5
32#define LPFC_BSG_VENDOR_GET_MGMT_REV 6 32#define LPFC_BSG_VENDOR_GET_MGMT_REV 6
33#define LPFC_BSG_VENDOR_MBOX 7 33#define LPFC_BSG_VENDOR_MBOX 7
34#define LPFC_BSG_VENDOR_MENLO_CMD 8
35#define LPFC_BSG_VENDOR_MENLO_DATA 9
34 36
35struct set_ct_event { 37struct set_ct_event {
36 uint32_t command; 38 uint32_t command;
@@ -96,3 +98,13 @@ struct dfc_mbox_req {
96 uint8_t mbOffset; 98 uint8_t mbOffset;
97}; 99};
98 100
101/* Used for menlo command or menlo data. The xri is only used for menlo data */
102struct menlo_command {
103 uint32_t cmd;
104 uint32_t xri;
105};
106
107struct menlo_response {
108 uint32_t xri; /* return the xri of the iocb exchange */
109};
110
diff --git a/drivers/scsi/lpfc/lpfc_crtn.h b/drivers/scsi/lpfc/lpfc_crtn.h
index 6f0fb51eb461..5087c4211b43 100644
--- a/drivers/scsi/lpfc/lpfc_crtn.h
+++ b/drivers/scsi/lpfc/lpfc_crtn.h
@@ -63,6 +63,7 @@ void lpfc_linkdown_port(struct lpfc_vport *);
63void lpfc_port_link_failure(struct lpfc_vport *); 63void lpfc_port_link_failure(struct lpfc_vport *);
64void lpfc_mbx_cmpl_read_la(struct lpfc_hba *, LPFC_MBOXQ_t *); 64void lpfc_mbx_cmpl_read_la(struct lpfc_hba *, LPFC_MBOXQ_t *);
65void lpfc_init_vpi_cmpl(struct lpfc_hba *, LPFC_MBOXQ_t *); 65void lpfc_init_vpi_cmpl(struct lpfc_hba *, LPFC_MBOXQ_t *);
66void lpfc_cancel_all_vport_retry_delay_timer(struct lpfc_hba *);
66void lpfc_retry_pport_discovery(struct lpfc_hba *); 67void lpfc_retry_pport_discovery(struct lpfc_hba *);
67 68
68void lpfc_mbx_cmpl_reg_login(struct lpfc_hba *, LPFC_MBOXQ_t *); 69void lpfc_mbx_cmpl_reg_login(struct lpfc_hba *, LPFC_MBOXQ_t *);
@@ -221,6 +222,10 @@ void lpfc_unregister_fcf_rescan(struct lpfc_hba *);
221void lpfc_unregister_unused_fcf(struct lpfc_hba *); 222void lpfc_unregister_unused_fcf(struct lpfc_hba *);
222int lpfc_sli4_redisc_fcf_table(struct lpfc_hba *); 223int lpfc_sli4_redisc_fcf_table(struct lpfc_hba *);
223void lpfc_fcf_redisc_wait_start_timer(struct lpfc_hba *); 224void lpfc_fcf_redisc_wait_start_timer(struct lpfc_hba *);
225void lpfc_sli4_fcf_dead_failthrough(struct lpfc_hba *);
226uint16_t lpfc_sli4_fcf_rr_next_index_get(struct lpfc_hba *);
227int lpfc_sli4_fcf_rr_index_set(struct lpfc_hba *, uint16_t);
228void lpfc_sli4_fcf_rr_index_clear(struct lpfc_hba *, uint16_t);
224 229
225int lpfc_mem_alloc(struct lpfc_hba *, int align); 230int lpfc_mem_alloc(struct lpfc_hba *, int align);
226void lpfc_mem_free(struct lpfc_hba *); 231void lpfc_mem_free(struct lpfc_hba *);
@@ -385,7 +390,7 @@ void lpfc_parse_fcoe_conf(struct lpfc_hba *, uint8_t *, uint32_t);
385int lpfc_parse_vpd(struct lpfc_hba *, uint8_t *, int); 390int lpfc_parse_vpd(struct lpfc_hba *, uint8_t *, int);
386void lpfc_start_fdiscs(struct lpfc_hba *phba); 391void lpfc_start_fdiscs(struct lpfc_hba *phba);
387struct lpfc_vport *lpfc_find_vport_by_vpid(struct lpfc_hba *, uint16_t); 392struct lpfc_vport *lpfc_find_vport_by_vpid(struct lpfc_hba *, uint16_t);
388 393struct lpfc_sglq *__lpfc_get_active_sglq(struct lpfc_hba *, uint16_t);
389#define ScsiResult(host_code, scsi_code) (((host_code) << 16) | scsi_code) 394#define ScsiResult(host_code, scsi_code) (((host_code) << 16) | scsi_code)
390#define HBA_EVENT_RSCN 5 395#define HBA_EVENT_RSCN 5
391#define HBA_EVENT_LINK_UP 2 396#define HBA_EVENT_LINK_UP 2
diff --git a/drivers/scsi/lpfc/lpfc_els.c b/drivers/scsi/lpfc/lpfc_els.c
index 2a40a6eabf4d..ee980bd66869 100644
--- a/drivers/scsi/lpfc/lpfc_els.c
+++ b/drivers/scsi/lpfc/lpfc_els.c
@@ -771,6 +771,7 @@ lpfc_cmpl_els_flogi(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
771 struct lpfc_nodelist *ndlp = cmdiocb->context1; 771 struct lpfc_nodelist *ndlp = cmdiocb->context1;
772 struct lpfc_dmabuf *pcmd = cmdiocb->context2, *prsp; 772 struct lpfc_dmabuf *pcmd = cmdiocb->context2, *prsp;
773 struct serv_parm *sp; 773 struct serv_parm *sp;
774 uint16_t fcf_index;
774 int rc; 775 int rc;
775 776
776 /* Check to see if link went down during discovery */ 777 /* Check to see if link went down during discovery */
@@ -788,6 +789,54 @@ lpfc_cmpl_els_flogi(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
788 vport->port_state); 789 vport->port_state);
789 790
790 if (irsp->ulpStatus) { 791 if (irsp->ulpStatus) {
792 /*
793 * In case of FIP mode, perform round robin FCF failover
794 * due to new FCF discovery
795 */
796 if ((phba->hba_flag & HBA_FIP_SUPPORT) &&
797 (phba->fcf.fcf_flag & FCF_DISCOVERY)) {
798 lpfc_printf_log(phba, KERN_WARNING, LOG_FIP | LOG_ELS,
799 "2611 FLOGI failed on registered "
800 "FCF record fcf_index:%d, trying "
801 "to perform round robin failover\n",
802 phba->fcf.current_rec.fcf_indx);
803 fcf_index = lpfc_sli4_fcf_rr_next_index_get(phba);
804 if (fcf_index == LPFC_FCOE_FCF_NEXT_NONE) {
805 /*
806 * Exhausted the eligible FCF record list,
807 * fail through to retry FLOGI on current
808 * FCF record.
809 */
810 lpfc_printf_log(phba, KERN_WARNING,
811 LOG_FIP | LOG_ELS,
812 "2760 FLOGI exhausted FCF "
813 "round robin failover list, "
814 "retry FLOGI on the current "
815 "registered FCF index:%d\n",
816 phba->fcf.current_rec.fcf_indx);
817 spin_lock_irq(&phba->hbalock);
818 phba->fcf.fcf_flag &= ~FCF_DISCOVERY;
819 spin_unlock_irq(&phba->hbalock);
820 } else {
821 rc = lpfc_sli4_fcf_rr_read_fcf_rec(phba,
822 fcf_index);
823 if (rc) {
824 lpfc_printf_log(phba, KERN_WARNING,
825 LOG_FIP | LOG_ELS,
826 "2761 FLOGI round "
827 "robin FCF failover "
828 "read FCF failed "
829 "rc:x%x, fcf_index:"
830 "%d\n", rc,
831 phba->fcf.current_rec.fcf_indx);
832 spin_lock_irq(&phba->hbalock);
833 phba->fcf.fcf_flag &= ~FCF_DISCOVERY;
834 spin_unlock_irq(&phba->hbalock);
835 } else
836 goto out;
837 }
838 }
839
791 /* Check for retry */ 840 /* Check for retry */
792 if (lpfc_els_retry(phba, cmdiocb, rspiocb)) 841 if (lpfc_els_retry(phba, cmdiocb, rspiocb))
793 goto out; 842 goto out;
@@ -806,9 +855,8 @@ lpfc_cmpl_els_flogi(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
806 } 855 }
807 856
808 /* FLOGI failure */ 857 /* FLOGI failure */
809 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 858 lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
810 "0100 FLOGI failure Data: x%x x%x " 859 "0100 FLOGI failure Status:x%x/x%x TMO:x%x\n",
811 "x%x\n",
812 irsp->ulpStatus, irsp->un.ulpWord[4], 860 irsp->ulpStatus, irsp->un.ulpWord[4],
813 irsp->ulpTimeout); 861 irsp->ulpTimeout);
814 goto flogifail; 862 goto flogifail;
@@ -842,8 +890,18 @@ lpfc_cmpl_els_flogi(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
842 else 890 else
843 rc = lpfc_cmpl_els_flogi_nport(vport, ndlp, sp); 891 rc = lpfc_cmpl_els_flogi_nport(vport, ndlp, sp);
844 892
845 if (!rc) 893 if (!rc) {
894 /* Mark the FCF discovery process done */
895 lpfc_printf_vlog(vport, KERN_INFO, LOG_FIP | LOG_ELS,
896 "2769 FLOGI successful on FCF record: "
897 "current_fcf_index:x%x, terminate FCF "
898 "round robin failover process\n",
899 phba->fcf.current_rec.fcf_indx);
900 spin_lock_irq(&phba->hbalock);
901 phba->fcf.fcf_flag &= ~FCF_DISCOVERY;
902 spin_unlock_irq(&phba->hbalock);
846 goto out; 903 goto out;
904 }
847 } 905 }
848 906
849flogifail: 907flogifail:
@@ -1409,6 +1467,10 @@ lpfc_cmpl_els_plogi(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
1409 goto out; 1467 goto out;
1410 } 1468 }
1411 /* PLOGI failed */ 1469 /* PLOGI failed */
1470 lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
1471 "2753 PLOGI failure DID:%06X Status:x%x/x%x\n",
1472 ndlp->nlp_DID, irsp->ulpStatus,
1473 irsp->un.ulpWord[4]);
1412 /* Do not call DSM for lpfc_els_abort'ed ELS cmds */ 1474 /* Do not call DSM for lpfc_els_abort'ed ELS cmds */
1413 if (lpfc_error_lost_link(irsp)) 1475 if (lpfc_error_lost_link(irsp))
1414 rc = NLP_STE_FREED_NODE; 1476 rc = NLP_STE_FREED_NODE;
@@ -1577,6 +1639,10 @@ lpfc_cmpl_els_prli(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
1577 goto out; 1639 goto out;
1578 } 1640 }
1579 /* PRLI failed */ 1641 /* PRLI failed */
1642 lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
1643 "2754 PRLI failure DID:%06X Status:x%x/x%x\n",
1644 ndlp->nlp_DID, irsp->ulpStatus,
1645 irsp->un.ulpWord[4]);
1580 /* Do not call DSM for lpfc_els_abort'ed ELS cmds */ 1646 /* Do not call DSM for lpfc_els_abort'ed ELS cmds */
1581 if (lpfc_error_lost_link(irsp)) 1647 if (lpfc_error_lost_link(irsp))
1582 goto out; 1648 goto out;
@@ -1860,6 +1926,10 @@ lpfc_cmpl_els_adisc(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
1860 goto out; 1926 goto out;
1861 } 1927 }
1862 /* ADISC failed */ 1928 /* ADISC failed */
1929 lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
1930 "2755 ADISC failure DID:%06X Status:x%x/x%x\n",
1931 ndlp->nlp_DID, irsp->ulpStatus,
1932 irsp->un.ulpWord[4]);
1863 /* Do not call DSM for lpfc_els_abort'ed ELS cmds */ 1933 /* Do not call DSM for lpfc_els_abort'ed ELS cmds */
1864 if (!lpfc_error_lost_link(irsp)) 1934 if (!lpfc_error_lost_link(irsp))
1865 lpfc_disc_state_machine(vport, ndlp, cmdiocb, 1935 lpfc_disc_state_machine(vport, ndlp, cmdiocb,
@@ -2009,6 +2079,10 @@ lpfc_cmpl_els_logo(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
2009 /* ELS command is being retried */ 2079 /* ELS command is being retried */
2010 goto out; 2080 goto out;
2011 /* LOGO failed */ 2081 /* LOGO failed */
2082 lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
2083 "2756 LOGO failure DID:%06X Status:x%x/x%x\n",
2084 ndlp->nlp_DID, irsp->ulpStatus,
2085 irsp->un.ulpWord[4]);
2012 /* Do not call DSM for lpfc_els_abort'ed ELS cmds */ 2086 /* Do not call DSM for lpfc_els_abort'ed ELS cmds */
2013 if (lpfc_error_lost_link(irsp)) 2087 if (lpfc_error_lost_link(irsp))
2014 goto out; 2088 goto out;
@@ -5989,7 +6063,12 @@ lpfc_cmpl_reg_new_vport(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
5989 if (phba->sli_rev < LPFC_SLI_REV4) 6063 if (phba->sli_rev < LPFC_SLI_REV4)
5990 lpfc_issue_fabric_reglogin(vport); 6064 lpfc_issue_fabric_reglogin(vport);
5991 else { 6065 else {
5992 lpfc_start_fdiscs(phba); 6066 /*
6067 * If the physical port is instantiated using
6068 * FDISC, do not start vport discovery.
6069 */
6070 if (vport->port_state != LPFC_FDISC)
6071 lpfc_start_fdiscs(phba);
5993 lpfc_do_scr_ns_plogi(phba, vport); 6072 lpfc_do_scr_ns_plogi(phba, vport);
5994 } 6073 }
5995 } else 6074 } else
@@ -6055,21 +6134,18 @@ mbox_err_exit:
6055} 6134}
6056 6135
6057/** 6136/**
6058 * lpfc_retry_pport_discovery - Start timer to retry FLOGI. 6137 * lpfc_cancel_all_vport_retry_delay_timer - Cancel all vport retry delay timer
6059 * @phba: pointer to lpfc hba data structure. 6138 * @phba: pointer to lpfc hba data structure.
6060 * 6139 *
6061 * This routine abort all pending discovery commands and 6140 * This routine cancels the retry delay timers to all the vports.
6062 * start a timer to retry FLOGI for the physical port
6063 * discovery.
6064 **/ 6141 **/
6065void 6142void
6066lpfc_retry_pport_discovery(struct lpfc_hba *phba) 6143lpfc_cancel_all_vport_retry_delay_timer(struct lpfc_hba *phba)
6067{ 6144{
6068 struct lpfc_vport **vports; 6145 struct lpfc_vport **vports;
6069 struct lpfc_nodelist *ndlp; 6146 struct lpfc_nodelist *ndlp;
6070 struct Scsi_Host *shost;
6071 int i;
6072 uint32_t link_state; 6147 uint32_t link_state;
6148 int i;
6073 6149
6074 /* Treat this failure as linkdown for all vports */ 6150 /* Treat this failure as linkdown for all vports */
6075 link_state = phba->link_state; 6151 link_state = phba->link_state;
@@ -6087,13 +6163,30 @@ lpfc_retry_pport_discovery(struct lpfc_hba *phba)
6087 } 6163 }
6088 lpfc_destroy_vport_work_array(phba, vports); 6164 lpfc_destroy_vport_work_array(phba, vports);
6089 } 6165 }
6166}
6167
6168/**
6169 * lpfc_retry_pport_discovery - Start timer to retry FLOGI.
6170 * @phba: pointer to lpfc hba data structure.
6171 *
6172 * This routine abort all pending discovery commands and
6173 * start a timer to retry FLOGI for the physical port
6174 * discovery.
6175 **/
6176void
6177lpfc_retry_pport_discovery(struct lpfc_hba *phba)
6178{
6179 struct lpfc_nodelist *ndlp;
6180 struct Scsi_Host *shost;
6181
6182 /* Cancel the all vports retry delay retry timers */
6183 lpfc_cancel_all_vport_retry_delay_timer(phba);
6090 6184
6091 /* If fabric require FLOGI, then re-instantiate physical login */ 6185 /* If fabric require FLOGI, then re-instantiate physical login */
6092 ndlp = lpfc_findnode_did(phba->pport, Fabric_DID); 6186 ndlp = lpfc_findnode_did(phba->pport, Fabric_DID);
6093 if (!ndlp) 6187 if (!ndlp)
6094 return; 6188 return;
6095 6189
6096
6097 shost = lpfc_shost_from_vport(phba->pport); 6190 shost = lpfc_shost_from_vport(phba->pport);
6098 mod_timer(&ndlp->nlp_delayfunc, jiffies + HZ); 6191 mod_timer(&ndlp->nlp_delayfunc, jiffies + HZ);
6099 spin_lock_irq(shost->host_lock); 6192 spin_lock_irq(shost->host_lock);
@@ -6219,7 +6312,8 @@ lpfc_cmpl_els_fdisc(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
6219 lpfc_mbx_unreg_vpi(vport); 6312 lpfc_mbx_unreg_vpi(vport);
6220 spin_lock_irq(shost->host_lock); 6313 spin_lock_irq(shost->host_lock);
6221 vport->fc_flag |= FC_VPORT_NEEDS_REG_VPI; 6314 vport->fc_flag |= FC_VPORT_NEEDS_REG_VPI;
6222 vport->fc_flag |= FC_VPORT_NEEDS_INIT_VPI; 6315 if (phba->sli_rev == LPFC_SLI_REV4)
6316 vport->fc_flag |= FC_VPORT_NEEDS_INIT_VPI;
6223 spin_unlock_irq(shost->host_lock); 6317 spin_unlock_irq(shost->host_lock);
6224 } 6318 }
6225 6319
@@ -6797,21 +6891,27 @@ lpfc_sli4_els_xri_aborted(struct lpfc_hba *phba,
6797 struct lpfc_sglq *sglq_entry = NULL, *sglq_next = NULL; 6891 struct lpfc_sglq *sglq_entry = NULL, *sglq_next = NULL;
6798 unsigned long iflag = 0; 6892 unsigned long iflag = 0;
6799 6893
6800 spin_lock_irqsave(&phba->sli4_hba.abts_sgl_list_lock, iflag); 6894 spin_lock_irqsave(&phba->hbalock, iflag);
6895 spin_lock(&phba->sli4_hba.abts_sgl_list_lock);
6801 list_for_each_entry_safe(sglq_entry, sglq_next, 6896 list_for_each_entry_safe(sglq_entry, sglq_next,
6802 &phba->sli4_hba.lpfc_abts_els_sgl_list, list) { 6897 &phba->sli4_hba.lpfc_abts_els_sgl_list, list) {
6803 if (sglq_entry->sli4_xritag == xri) { 6898 if (sglq_entry->sli4_xritag == xri) {
6804 list_del(&sglq_entry->list); 6899 list_del(&sglq_entry->list);
6805 spin_unlock_irqrestore(
6806 &phba->sli4_hba.abts_sgl_list_lock,
6807 iflag);
6808 spin_lock_irqsave(&phba->hbalock, iflag);
6809
6810 list_add_tail(&sglq_entry->list, 6900 list_add_tail(&sglq_entry->list,
6811 &phba->sli4_hba.lpfc_sgl_list); 6901 &phba->sli4_hba.lpfc_sgl_list);
6902 sglq_entry->state = SGL_FREED;
6903 spin_unlock(&phba->sli4_hba.abts_sgl_list_lock);
6812 spin_unlock_irqrestore(&phba->hbalock, iflag); 6904 spin_unlock_irqrestore(&phba->hbalock, iflag);
6813 return; 6905 return;
6814 } 6906 }
6815 } 6907 }
6816 spin_unlock_irqrestore(&phba->sli4_hba.abts_sgl_list_lock, iflag); 6908 spin_unlock(&phba->sli4_hba.abts_sgl_list_lock);
6909 sglq_entry = __lpfc_get_active_sglq(phba, xri);
6910 if (!sglq_entry || (sglq_entry->sli4_xritag != xri)) {
6911 spin_unlock_irqrestore(&phba->hbalock, iflag);
6912 return;
6913 }
6914 sglq_entry->state = SGL_XRI_ABORTED;
6915 spin_unlock_irqrestore(&phba->hbalock, iflag);
6916 return;
6817} 6917}
diff --git a/drivers/scsi/lpfc/lpfc_hbadisc.c b/drivers/scsi/lpfc/lpfc_hbadisc.c
index 2359d0bfb734..c555e3b7f202 100644
--- a/drivers/scsi/lpfc/lpfc_hbadisc.c
+++ b/drivers/scsi/lpfc/lpfc_hbadisc.c
@@ -1481,8 +1481,6 @@ lpfc_match_fcf_conn_list(struct lpfc_hba *phba,
1481int 1481int
1482lpfc_check_pending_fcoe_event(struct lpfc_hba *phba, uint8_t unreg_fcf) 1482lpfc_check_pending_fcoe_event(struct lpfc_hba *phba, uint8_t unreg_fcf)
1483{ 1483{
1484 LPFC_MBOXQ_t *mbox;
1485 int rc;
1486 /* 1484 /*
1487 * If the Link is up and no FCoE events while in the 1485 * If the Link is up and no FCoE events while in the
1488 * FCF discovery, no need to restart FCF discovery. 1486 * FCF discovery, no need to restart FCF discovery.
@@ -1491,86 +1489,70 @@ lpfc_check_pending_fcoe_event(struct lpfc_hba *phba, uint8_t unreg_fcf)
1491 (phba->fcoe_eventtag == phba->fcoe_eventtag_at_fcf_scan)) 1489 (phba->fcoe_eventtag == phba->fcoe_eventtag_at_fcf_scan))
1492 return 0; 1490 return 0;
1493 1491
1492 lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
1493 "2768 Pending link or FCF event during current "
1494 "handling of the previous event: link_state:x%x, "
1495 "evt_tag_at_scan:x%x, evt_tag_current:x%x\n",
1496 phba->link_state, phba->fcoe_eventtag_at_fcf_scan,
1497 phba->fcoe_eventtag);
1498
1494 spin_lock_irq(&phba->hbalock); 1499 spin_lock_irq(&phba->hbalock);
1495 phba->fcf.fcf_flag &= ~FCF_AVAILABLE; 1500 phba->fcf.fcf_flag &= ~FCF_AVAILABLE;
1496 spin_unlock_irq(&phba->hbalock); 1501 spin_unlock_irq(&phba->hbalock);
1497 1502
1498 if (phba->link_state >= LPFC_LINK_UP) 1503 if (phba->link_state >= LPFC_LINK_UP) {
1499 lpfc_sli4_read_fcf_record(phba, LPFC_FCOE_FCF_GET_FIRST); 1504 lpfc_printf_log(phba, KERN_INFO, LOG_FIP | LOG_DISCOVERY,
1500 else { 1505 "2780 Restart FCF table scan due to "
1506 "pending FCF event:evt_tag_at_scan:x%x, "
1507 "evt_tag_current:x%x\n",
1508 phba->fcoe_eventtag_at_fcf_scan,
1509 phba->fcoe_eventtag);
1510 lpfc_sli4_fcf_scan_read_fcf_rec(phba, LPFC_FCOE_FCF_GET_FIRST);
1511 } else {
1501 /* 1512 /*
1502 * Do not continue FCF discovery and clear FCF_DISC_INPROGRESS 1513 * Do not continue FCF discovery and clear FCF_DISC_INPROGRESS
1503 * flag 1514 * flag
1504 */ 1515 */
1505 spin_lock_irq(&phba->hbalock); 1516 spin_lock_irq(&phba->hbalock);
1506 phba->hba_flag &= ~FCF_DISC_INPROGRESS; 1517 phba->hba_flag &= ~FCF_DISC_INPROGRESS;
1507 phba->fcf.fcf_flag &= ~FCF_REDISC_FOV; 1518 phba->fcf.fcf_flag &= ~(FCF_REDISC_FOV | FCF_DISCOVERY);
1508 spin_unlock_irq(&phba->hbalock); 1519 spin_unlock_irq(&phba->hbalock);
1509 } 1520 }
1510 1521
1522 /* Unregister the currently registered FCF if required */
1511 if (unreg_fcf) { 1523 if (unreg_fcf) {
1512 spin_lock_irq(&phba->hbalock); 1524 spin_lock_irq(&phba->hbalock);
1513 phba->fcf.fcf_flag &= ~FCF_REGISTERED; 1525 phba->fcf.fcf_flag &= ~FCF_REGISTERED;
1514 spin_unlock_irq(&phba->hbalock); 1526 spin_unlock_irq(&phba->hbalock);
1515 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 1527 lpfc_sli4_unregister_fcf(phba);
1516 if (!mbox) {
1517 lpfc_printf_log(phba, KERN_ERR,
1518 LOG_DISCOVERY|LOG_MBOX,
1519 "2610 UNREG_FCFI mbox allocation failed\n");
1520 return 1;
1521 }
1522 lpfc_unreg_fcfi(mbox, phba->fcf.fcfi);
1523 mbox->vport = phba->pport;
1524 mbox->mbox_cmpl = lpfc_unregister_fcfi_cmpl;
1525 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT);
1526 if (rc == MBX_NOT_FINISHED) {
1527 lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY|LOG_MBOX,
1528 "2611 UNREG_FCFI issue mbox failed\n");
1529 mempool_free(mbox, phba->mbox_mem_pool);
1530 }
1531 } 1528 }
1532
1533 return 1; 1529 return 1;
1534} 1530}
1535 1531
1536/** 1532/**
1537 * lpfc_mbx_cmpl_read_fcf_record - Completion handler for read_fcf mbox. 1533 * lpfc_sli4_fcf_rec_mbox_parse - parse non-embedded fcf record mailbox command
1538 * @phba: pointer to lpfc hba data structure. 1534 * @phba: pointer to lpfc hba data structure.
1539 * @mboxq: pointer to mailbox object. 1535 * @mboxq: pointer to mailbox object.
1536 * @next_fcf_index: pointer to holder of next fcf index.
1540 * 1537 *
1541 * This function iterate through all the fcf records available in 1538 * This routine parses the non-embedded fcf mailbox command by performing the
1542 * HBA and choose the optimal FCF record for discovery. After finding 1539 * necessarily error checking, non-embedded read FCF record mailbox command
1543 * the FCF for discovery it register the FCF record and kick start 1540 * SGE parsing, and endianness swapping.
1544 * discovery. 1541 *
1545 * If FCF_IN_USE flag is set in currently used FCF, the routine try to 1542 * Returns the pointer to the new FCF record in the non-embedded mailbox
1546 * use a FCF record which match fabric name and mac address of the 1543 * command DMA memory if successfully, other NULL.
1547 * currently used FCF record.
1548 * If the driver support only one FCF, it will try to use the FCF record
1549 * used by BOOT_BIOS.
1550 */ 1544 */
1551void 1545static struct fcf_record *
1552lpfc_mbx_cmpl_read_fcf_record(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq) 1546lpfc_sli4_fcf_rec_mbox_parse(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq,
1547 uint16_t *next_fcf_index)
1553{ 1548{
1554 void *virt_addr; 1549 void *virt_addr;
1555 dma_addr_t phys_addr; 1550 dma_addr_t phys_addr;
1556 uint8_t *bytep;
1557 struct lpfc_mbx_sge sge; 1551 struct lpfc_mbx_sge sge;
1558 struct lpfc_mbx_read_fcf_tbl *read_fcf; 1552 struct lpfc_mbx_read_fcf_tbl *read_fcf;
1559 uint32_t shdr_status, shdr_add_status; 1553 uint32_t shdr_status, shdr_add_status;
1560 union lpfc_sli4_cfg_shdr *shdr; 1554 union lpfc_sli4_cfg_shdr *shdr;
1561 struct fcf_record *new_fcf_record; 1555 struct fcf_record *new_fcf_record;
1562 uint32_t boot_flag, addr_mode;
1563 uint32_t next_fcf_index;
1564 struct lpfc_fcf_rec *fcf_rec = NULL;
1565 unsigned long iflags;
1566 uint16_t vlan_id;
1567 int rc;
1568
1569 /* If there is pending FCoE event restart FCF table scan */
1570 if (lpfc_check_pending_fcoe_event(phba, 0)) {
1571 lpfc_sli4_mbox_cmd_free(phba, mboxq);
1572 return;
1573 }
1574 1556
1575 /* Get the first SGE entry from the non-embedded DMA memory. This 1557 /* Get the first SGE entry from the non-embedded DMA memory. This
1576 * routine only uses a single SGE. 1558 * routine only uses a single SGE.
@@ -1581,59 +1563,183 @@ lpfc_mbx_cmpl_read_fcf_record(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
1581 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX, 1563 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX,
1582 "2524 Failed to get the non-embedded SGE " 1564 "2524 Failed to get the non-embedded SGE "
1583 "virtual address\n"); 1565 "virtual address\n");
1584 goto out; 1566 return NULL;
1585 } 1567 }
1586 virt_addr = mboxq->sge_array->addr[0]; 1568 virt_addr = mboxq->sge_array->addr[0];
1587 1569
1588 shdr = (union lpfc_sli4_cfg_shdr *)virt_addr; 1570 shdr = (union lpfc_sli4_cfg_shdr *)virt_addr;
1589 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); 1571 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
1590 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, 1572 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
1591 &shdr->response);
1592 /*
1593 * The FCF Record was read and there is no reason for the driver
1594 * to maintain the FCF record data or memory. Instead, just need
1595 * to book keeping the FCFIs can be used.
1596 */
1597 if (shdr_status || shdr_add_status) { 1573 if (shdr_status || shdr_add_status) {
1598 if (shdr_status == STATUS_FCF_TABLE_EMPTY) { 1574 if (shdr_status == STATUS_FCF_TABLE_EMPTY)
1599 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 1575 lpfc_printf_log(phba, KERN_ERR, LOG_FIP,
1600 "2726 READ_FCF_RECORD Indicates empty " 1576 "2726 READ_FCF_RECORD Indicates empty "
1601 "FCF table.\n"); 1577 "FCF table.\n");
1602 } else { 1578 else
1603 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 1579 lpfc_printf_log(phba, KERN_ERR, LOG_FIP,
1604 "2521 READ_FCF_RECORD mailbox failed " 1580 "2521 READ_FCF_RECORD mailbox failed "
1605 "with status x%x add_status x%x, mbx\n", 1581 "with status x%x add_status x%x, "
1606 shdr_status, shdr_add_status); 1582 "mbx\n", shdr_status, shdr_add_status);
1607 } 1583 return NULL;
1608 goto out;
1609 } 1584 }
1610 /* Interpreting the returned information of FCF records */ 1585
1586 /* Interpreting the returned information of the FCF record */
1611 read_fcf = (struct lpfc_mbx_read_fcf_tbl *)virt_addr; 1587 read_fcf = (struct lpfc_mbx_read_fcf_tbl *)virt_addr;
1612 lpfc_sli_pcimem_bcopy(read_fcf, read_fcf, 1588 lpfc_sli_pcimem_bcopy(read_fcf, read_fcf,
1613 sizeof(struct lpfc_mbx_read_fcf_tbl)); 1589 sizeof(struct lpfc_mbx_read_fcf_tbl));
1614 next_fcf_index = bf_get(lpfc_mbx_read_fcf_tbl_nxt_vindx, read_fcf); 1590 *next_fcf_index = bf_get(lpfc_mbx_read_fcf_tbl_nxt_vindx, read_fcf);
1615
1616 new_fcf_record = (struct fcf_record *)(virt_addr + 1591 new_fcf_record = (struct fcf_record *)(virt_addr +
1617 sizeof(struct lpfc_mbx_read_fcf_tbl)); 1592 sizeof(struct lpfc_mbx_read_fcf_tbl));
1618 lpfc_sli_pcimem_bcopy(new_fcf_record, new_fcf_record, 1593 lpfc_sli_pcimem_bcopy(new_fcf_record, new_fcf_record,
1619 sizeof(struct fcf_record)); 1594 sizeof(struct fcf_record));
1620 bytep = virt_addr + sizeof(union lpfc_sli4_cfg_shdr);
1621 1595
1596 return new_fcf_record;
1597}
1598
1599/**
1600 * lpfc_sli4_log_fcf_record_info - Log the information of a fcf record
1601 * @phba: pointer to lpfc hba data structure.
1602 * @fcf_record: pointer to the fcf record.
1603 * @vlan_id: the lowest vlan identifier associated to this fcf record.
1604 * @next_fcf_index: the index to the next fcf record in hba's fcf table.
1605 *
1606 * This routine logs the detailed FCF record if the LOG_FIP loggin is
1607 * enabled.
1608 **/
1609static void
1610lpfc_sli4_log_fcf_record_info(struct lpfc_hba *phba,
1611 struct fcf_record *fcf_record,
1612 uint16_t vlan_id,
1613 uint16_t next_fcf_index)
1614{
1615 lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
1616 "2764 READ_FCF_RECORD:\n"
1617 "\tFCF_Index : x%x\n"
1618 "\tFCF_Avail : x%x\n"
1619 "\tFCF_Valid : x%x\n"
1620 "\tFIP_Priority : x%x\n"
1621 "\tMAC_Provider : x%x\n"
1622 "\tLowest VLANID : x%x\n"
1623 "\tFCF_MAC Addr : x%x:%x:%x:%x:%x:%x\n"
1624 "\tFabric_Name : x%x:%x:%x:%x:%x:%x:%x:%x\n"
1625 "\tSwitch_Name : x%x:%x:%x:%x:%x:%x:%x:%x\n"
1626 "\tNext_FCF_Index: x%x\n",
1627 bf_get(lpfc_fcf_record_fcf_index, fcf_record),
1628 bf_get(lpfc_fcf_record_fcf_avail, fcf_record),
1629 bf_get(lpfc_fcf_record_fcf_valid, fcf_record),
1630 fcf_record->fip_priority,
1631 bf_get(lpfc_fcf_record_mac_addr_prov, fcf_record),
1632 vlan_id,
1633 bf_get(lpfc_fcf_record_mac_0, fcf_record),
1634 bf_get(lpfc_fcf_record_mac_1, fcf_record),
1635 bf_get(lpfc_fcf_record_mac_2, fcf_record),
1636 bf_get(lpfc_fcf_record_mac_3, fcf_record),
1637 bf_get(lpfc_fcf_record_mac_4, fcf_record),
1638 bf_get(lpfc_fcf_record_mac_5, fcf_record),
1639 bf_get(lpfc_fcf_record_fab_name_0, fcf_record),
1640 bf_get(lpfc_fcf_record_fab_name_1, fcf_record),
1641 bf_get(lpfc_fcf_record_fab_name_2, fcf_record),
1642 bf_get(lpfc_fcf_record_fab_name_3, fcf_record),
1643 bf_get(lpfc_fcf_record_fab_name_4, fcf_record),
1644 bf_get(lpfc_fcf_record_fab_name_5, fcf_record),
1645 bf_get(lpfc_fcf_record_fab_name_6, fcf_record),
1646 bf_get(lpfc_fcf_record_fab_name_7, fcf_record),
1647 bf_get(lpfc_fcf_record_switch_name_0, fcf_record),
1648 bf_get(lpfc_fcf_record_switch_name_1, fcf_record),
1649 bf_get(lpfc_fcf_record_switch_name_2, fcf_record),
1650 bf_get(lpfc_fcf_record_switch_name_3, fcf_record),
1651 bf_get(lpfc_fcf_record_switch_name_4, fcf_record),
1652 bf_get(lpfc_fcf_record_switch_name_5, fcf_record),
1653 bf_get(lpfc_fcf_record_switch_name_6, fcf_record),
1654 bf_get(lpfc_fcf_record_switch_name_7, fcf_record),
1655 next_fcf_index);
1656}
1657
1658/**
1659 * lpfc_mbx_cmpl_fcf_scan_read_fcf_rec - fcf scan read_fcf mbox cmpl handler.
1660 * @phba: pointer to lpfc hba data structure.
1661 * @mboxq: pointer to mailbox object.
1662 *
1663 * This function iterates through all the fcf records available in
1664 * HBA and chooses the optimal FCF record for discovery. After finding
1665 * the FCF for discovery it registers the FCF record and kicks start
1666 * discovery.
1667 * If FCF_IN_USE flag is set in currently used FCF, the routine tries to
1668 * use an FCF record which matches fabric name and mac address of the
1669 * currently used FCF record.
1670 * If the driver supports only one FCF, it will try to use the FCF record
1671 * used by BOOT_BIOS.
1672 */
1673void
1674lpfc_mbx_cmpl_fcf_scan_read_fcf_rec(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
1675{
1676 struct fcf_record *new_fcf_record;
1677 uint32_t boot_flag, addr_mode;
1678 uint16_t fcf_index, next_fcf_index;
1679 struct lpfc_fcf_rec *fcf_rec = NULL;
1680 uint16_t vlan_id;
1681 int rc;
1682
1683 /* If there is pending FCoE event restart FCF table scan */
1684 if (lpfc_check_pending_fcoe_event(phba, 0)) {
1685 lpfc_sli4_mbox_cmd_free(phba, mboxq);
1686 return;
1687 }
1688
1689 /* Parse the FCF record from the non-embedded mailbox command */
1690 new_fcf_record = lpfc_sli4_fcf_rec_mbox_parse(phba, mboxq,
1691 &next_fcf_index);
1692 if (!new_fcf_record) {
1693 lpfc_printf_log(phba, KERN_WARNING, LOG_FIP,
1694 "2765 Mailbox command READ_FCF_RECORD "
1695 "failed to retrieve a FCF record.\n");
1696 /* Let next new FCF event trigger fast failover */
1697 spin_lock_irq(&phba->hbalock);
1698 phba->hba_flag &= ~FCF_DISC_INPROGRESS;
1699 spin_unlock_irq(&phba->hbalock);
1700 lpfc_sli4_mbox_cmd_free(phba, mboxq);
1701 return;
1702 }
1703
1704 /* Check the FCF record against the connection list */
1622 rc = lpfc_match_fcf_conn_list(phba, new_fcf_record, &boot_flag, 1705 rc = lpfc_match_fcf_conn_list(phba, new_fcf_record, &boot_flag,
1623 &addr_mode, &vlan_id); 1706 &addr_mode, &vlan_id);
1707
1708 /* Log the FCF record information if turned on */
1709 lpfc_sli4_log_fcf_record_info(phba, new_fcf_record, vlan_id,
1710 next_fcf_index);
1711
1624 /* 1712 /*
1625 * If the fcf record does not match with connect list entries 1713 * If the fcf record does not match with connect list entries
1626 * read the next entry. 1714 * read the next entry; otherwise, this is an eligible FCF
1715 * record for round robin FCF failover.
1627 */ 1716 */
1628 if (!rc) 1717 if (!rc) {
1718 lpfc_printf_log(phba, KERN_WARNING, LOG_FIP,
1719 "2781 FCF record fcf_index:x%x failed FCF "
1720 "connection list check, fcf_avail:x%x, "
1721 "fcf_valid:x%x\n",
1722 bf_get(lpfc_fcf_record_fcf_index,
1723 new_fcf_record),
1724 bf_get(lpfc_fcf_record_fcf_avail,
1725 new_fcf_record),
1726 bf_get(lpfc_fcf_record_fcf_valid,
1727 new_fcf_record));
1629 goto read_next_fcf; 1728 goto read_next_fcf;
1729 } else {
1730 fcf_index = bf_get(lpfc_fcf_record_fcf_index, new_fcf_record);
1731 rc = lpfc_sli4_fcf_rr_index_set(phba, fcf_index);
1732 if (rc)
1733 goto read_next_fcf;
1734 }
1735
1630 /* 1736 /*
1631 * If this is not the first FCF discovery of the HBA, use last 1737 * If this is not the first FCF discovery of the HBA, use last
1632 * FCF record for the discovery. The condition that a rescan 1738 * FCF record for the discovery. The condition that a rescan
1633 * matches the in-use FCF record: fabric name, switch name, mac 1739 * matches the in-use FCF record: fabric name, switch name, mac
1634 * address, and vlan_id. 1740 * address, and vlan_id.
1635 */ 1741 */
1636 spin_lock_irqsave(&phba->hbalock, iflags); 1742 spin_lock_irq(&phba->hbalock);
1637 if (phba->fcf.fcf_flag & FCF_IN_USE) { 1743 if (phba->fcf.fcf_flag & FCF_IN_USE) {
1638 if (lpfc_fab_name_match(phba->fcf.current_rec.fabric_name, 1744 if (lpfc_fab_name_match(phba->fcf.current_rec.fabric_name,
1639 new_fcf_record) && 1745 new_fcf_record) &&
@@ -1649,8 +1755,9 @@ lpfc_mbx_cmpl_read_fcf_record(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
1649 __lpfc_sli4_stop_fcf_redisc_wait_timer(phba); 1755 __lpfc_sli4_stop_fcf_redisc_wait_timer(phba);
1650 else if (phba->fcf.fcf_flag & FCF_REDISC_FOV) 1756 else if (phba->fcf.fcf_flag & FCF_REDISC_FOV)
1651 /* If in fast failover, mark it's completed */ 1757 /* If in fast failover, mark it's completed */
1652 phba->fcf.fcf_flag &= ~FCF_REDISC_FOV; 1758 phba->fcf.fcf_flag &= ~(FCF_REDISC_FOV |
1653 spin_unlock_irqrestore(&phba->hbalock, iflags); 1759 FCF_DISCOVERY);
1760 spin_unlock_irq(&phba->hbalock);
1654 goto out; 1761 goto out;
1655 } 1762 }
1656 /* 1763 /*
@@ -1661,7 +1768,7 @@ lpfc_mbx_cmpl_read_fcf_record(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
1661 * next candidate. 1768 * next candidate.
1662 */ 1769 */
1663 if (!(phba->fcf.fcf_flag & FCF_REDISC_FOV)) { 1770 if (!(phba->fcf.fcf_flag & FCF_REDISC_FOV)) {
1664 spin_unlock_irqrestore(&phba->hbalock, iflags); 1771 spin_unlock_irq(&phba->hbalock);
1665 goto read_next_fcf; 1772 goto read_next_fcf;
1666 } 1773 }
1667 } 1774 }
@@ -1669,14 +1776,9 @@ lpfc_mbx_cmpl_read_fcf_record(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
1669 * Update on failover FCF record only if it's in FCF fast-failover 1776 * Update on failover FCF record only if it's in FCF fast-failover
1670 * period; otherwise, update on current FCF record. 1777 * period; otherwise, update on current FCF record.
1671 */ 1778 */
1672 if (phba->fcf.fcf_flag & FCF_REDISC_FOV) { 1779 if (phba->fcf.fcf_flag & FCF_REDISC_FOV)
1673 /* Fast FCF failover only to the same fabric name */ 1780 fcf_rec = &phba->fcf.failover_rec;
1674 if (lpfc_fab_name_match(phba->fcf.current_rec.fabric_name, 1781 else
1675 new_fcf_record))
1676 fcf_rec = &phba->fcf.failover_rec;
1677 else
1678 goto read_next_fcf;
1679 } else
1680 fcf_rec = &phba->fcf.current_rec; 1782 fcf_rec = &phba->fcf.current_rec;
1681 1783
1682 if (phba->fcf.fcf_flag & FCF_AVAILABLE) { 1784 if (phba->fcf.fcf_flag & FCF_AVAILABLE) {
@@ -1689,7 +1791,7 @@ lpfc_mbx_cmpl_read_fcf_record(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
1689 /* Choose this FCF record */ 1791 /* Choose this FCF record */
1690 __lpfc_update_fcf_record(phba, fcf_rec, new_fcf_record, 1792 __lpfc_update_fcf_record(phba, fcf_rec, new_fcf_record,
1691 addr_mode, vlan_id, BOOT_ENABLE); 1793 addr_mode, vlan_id, BOOT_ENABLE);
1692 spin_unlock_irqrestore(&phba->hbalock, iflags); 1794 spin_unlock_irq(&phba->hbalock);
1693 goto read_next_fcf; 1795 goto read_next_fcf;
1694 } 1796 }
1695 /* 1797 /*
@@ -1698,20 +1800,19 @@ lpfc_mbx_cmpl_read_fcf_record(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
1698 * the next FCF record. 1800 * the next FCF record.
1699 */ 1801 */
1700 if (!boot_flag && (fcf_rec->flag & BOOT_ENABLE)) { 1802 if (!boot_flag && (fcf_rec->flag & BOOT_ENABLE)) {
1701 spin_unlock_irqrestore(&phba->hbalock, iflags); 1803 spin_unlock_irq(&phba->hbalock);
1702 goto read_next_fcf; 1804 goto read_next_fcf;
1703 } 1805 }
1704 /* 1806 /*
1705 * If the new hba FCF record has lower priority value 1807 * If the new hba FCF record has lower priority value
1706 * than the driver FCF record, use the new record. 1808 * than the driver FCF record, use the new record.
1707 */ 1809 */
1708 if (lpfc_fab_name_match(fcf_rec->fabric_name, new_fcf_record) && 1810 if (new_fcf_record->fip_priority < fcf_rec->priority) {
1709 (new_fcf_record->fip_priority < fcf_rec->priority)) {
1710 /* Choose this FCF record */ 1811 /* Choose this FCF record */
1711 __lpfc_update_fcf_record(phba, fcf_rec, new_fcf_record, 1812 __lpfc_update_fcf_record(phba, fcf_rec, new_fcf_record,
1712 addr_mode, vlan_id, 0); 1813 addr_mode, vlan_id, 0);
1713 } 1814 }
1714 spin_unlock_irqrestore(&phba->hbalock, iflags); 1815 spin_unlock_irq(&phba->hbalock);
1715 goto read_next_fcf; 1816 goto read_next_fcf;
1716 } 1817 }
1717 /* 1818 /*
@@ -1724,7 +1825,7 @@ lpfc_mbx_cmpl_read_fcf_record(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
1724 BOOT_ENABLE : 0)); 1825 BOOT_ENABLE : 0));
1725 phba->fcf.fcf_flag |= FCF_AVAILABLE; 1826 phba->fcf.fcf_flag |= FCF_AVAILABLE;
1726 } 1827 }
1727 spin_unlock_irqrestore(&phba->hbalock, iflags); 1828 spin_unlock_irq(&phba->hbalock);
1728 goto read_next_fcf; 1829 goto read_next_fcf;
1729 1830
1730read_next_fcf: 1831read_next_fcf:
@@ -1740,9 +1841,22 @@ read_next_fcf:
1740 * FCF scan inprogress, and do nothing 1841 * FCF scan inprogress, and do nothing
1741 */ 1842 */
1742 if (!(phba->fcf.failover_rec.flag & RECORD_VALID)) { 1843 if (!(phba->fcf.failover_rec.flag & RECORD_VALID)) {
1743 spin_lock_irqsave(&phba->hbalock, iflags); 1844 lpfc_printf_log(phba, KERN_WARNING, LOG_FIP,
1845 "2782 No suitable FCF record "
1846 "found during this round of "
1847 "post FCF rediscovery scan: "
1848 "fcf_evt_tag:x%x, fcf_index: "
1849 "x%x\n",
1850 phba->fcoe_eventtag_at_fcf_scan,
1851 bf_get(lpfc_fcf_record_fcf_index,
1852 new_fcf_record));
1853 /*
1854 * Let next new FCF event trigger fast
1855 * failover
1856 */
1857 spin_lock_irq(&phba->hbalock);
1744 phba->hba_flag &= ~FCF_DISC_INPROGRESS; 1858 phba->hba_flag &= ~FCF_DISC_INPROGRESS;
1745 spin_unlock_irqrestore(&phba->hbalock, iflags); 1859 spin_unlock_irq(&phba->hbalock);
1746 return; 1860 return;
1747 } 1861 }
1748 /* 1862 /*
@@ -1754,16 +1868,23 @@ read_next_fcf:
1754 * record. 1868 * record.
1755 */ 1869 */
1756 1870
1757 /* unregister the current in-use FCF record */ 1871 /* Unregister the current in-use FCF record */
1758 lpfc_unregister_fcf(phba); 1872 lpfc_unregister_fcf(phba);
1759 /* replace in-use record with the new record */ 1873
1874 /* Replace in-use record with the new record */
1760 memcpy(&phba->fcf.current_rec, 1875 memcpy(&phba->fcf.current_rec,
1761 &phba->fcf.failover_rec, 1876 &phba->fcf.failover_rec,
1762 sizeof(struct lpfc_fcf_rec)); 1877 sizeof(struct lpfc_fcf_rec));
1763 /* mark the FCF fast failover completed */ 1878 /* mark the FCF fast failover completed */
1764 spin_lock_irqsave(&phba->hbalock, iflags); 1879 spin_lock_irq(&phba->hbalock);
1765 phba->fcf.fcf_flag &= ~FCF_REDISC_FOV; 1880 phba->fcf.fcf_flag &= ~FCF_REDISC_FOV;
1766 spin_unlock_irqrestore(&phba->hbalock, iflags); 1881 spin_unlock_irq(&phba->hbalock);
1882 /*
1883 * Set up the initial registered FCF index for FLOGI
1884 * round robin FCF failover.
1885 */
1886 phba->fcf.fcf_rr_init_indx =
1887 phba->fcf.failover_rec.fcf_indx;
1767 /* Register to the new FCF record */ 1888 /* Register to the new FCF record */
1768 lpfc_register_fcf(phba); 1889 lpfc_register_fcf(phba);
1769 } else { 1890 } else {
@@ -1776,13 +1897,25 @@ read_next_fcf:
1776 return; 1897 return;
1777 /* 1898 /*
1778 * Otherwise, initial scan or post linkdown rescan, 1899 * Otherwise, initial scan or post linkdown rescan,
1779 * register with the best fit FCF record found so 1900 * register with the best FCF record found so far
1780 * far through the scanning process. 1901 * through the FCF scanning process.
1902 */
1903
1904 /* mark the initial FCF discovery completed */
1905 spin_lock_irq(&phba->hbalock);
1906 phba->fcf.fcf_flag &= ~FCF_INIT_DISC;
1907 spin_unlock_irq(&phba->hbalock);
1908 /*
1909 * Set up the initial registered FCF index for FLOGI
1910 * round robin FCF failover
1781 */ 1911 */
1912 phba->fcf.fcf_rr_init_indx =
1913 phba->fcf.current_rec.fcf_indx;
1914 /* Register to the new FCF record */
1782 lpfc_register_fcf(phba); 1915 lpfc_register_fcf(phba);
1783 } 1916 }
1784 } else 1917 } else
1785 lpfc_sli4_read_fcf_record(phba, next_fcf_index); 1918 lpfc_sli4_fcf_scan_read_fcf_rec(phba, next_fcf_index);
1786 return; 1919 return;
1787 1920
1788out: 1921out:
@@ -1793,6 +1926,141 @@ out:
1793} 1926}
1794 1927
1795/** 1928/**
1929 * lpfc_mbx_cmpl_fcf_rr_read_fcf_rec - fcf round robin read_fcf mbox cmpl hdler
1930 * @phba: pointer to lpfc hba data structure.
1931 * @mboxq: pointer to mailbox object.
1932 *
1933 * This is the callback function for FLOGI failure round robin FCF failover
1934 * read FCF record mailbox command from the eligible FCF record bmask for
1935 * performing the failover. If the FCF read back is not valid/available, it
1936 * fails through to retrying FLOGI to the currently registered FCF again.
1937 * Otherwise, if the FCF read back is valid and available, it will set the
1938 * newly read FCF record to the failover FCF record, unregister currently
1939 * registered FCF record, copy the failover FCF record to the current
1940 * FCF record, and then register the current FCF record before proceeding
1941 * to trying FLOGI on the new failover FCF.
1942 */
1943void
1944lpfc_mbx_cmpl_fcf_rr_read_fcf_rec(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
1945{
1946 struct fcf_record *new_fcf_record;
1947 uint32_t boot_flag, addr_mode;
1948 uint16_t next_fcf_index;
1949 uint16_t current_fcf_index;
1950 uint16_t vlan_id;
1951
1952 /* If link state is not up, stop the round robin failover process */
1953 if (phba->link_state < LPFC_LINK_UP) {
1954 spin_lock_irq(&phba->hbalock);
1955 phba->fcf.fcf_flag &= ~FCF_DISCOVERY;
1956 spin_unlock_irq(&phba->hbalock);
1957 lpfc_sli4_mbox_cmd_free(phba, mboxq);
1958 return;
1959 }
1960
1961 /* Parse the FCF record from the non-embedded mailbox command */
1962 new_fcf_record = lpfc_sli4_fcf_rec_mbox_parse(phba, mboxq,
1963 &next_fcf_index);
1964 if (!new_fcf_record) {
1965 lpfc_printf_log(phba, KERN_WARNING, LOG_FIP,
1966 "2766 Mailbox command READ_FCF_RECORD "
1967 "failed to retrieve a FCF record.\n");
1968 goto out;
1969 }
1970
1971 /* Get the needed parameters from FCF record */
1972 lpfc_match_fcf_conn_list(phba, new_fcf_record, &boot_flag,
1973 &addr_mode, &vlan_id);
1974
1975 /* Log the FCF record information if turned on */
1976 lpfc_sli4_log_fcf_record_info(phba, new_fcf_record, vlan_id,
1977 next_fcf_index);
1978
1979 /* Upload new FCF record to the failover FCF record */
1980 spin_lock_irq(&phba->hbalock);
1981 __lpfc_update_fcf_record(phba, &phba->fcf.failover_rec,
1982 new_fcf_record, addr_mode, vlan_id,
1983 (boot_flag ? BOOT_ENABLE : 0));
1984 spin_unlock_irq(&phba->hbalock);
1985
1986 current_fcf_index = phba->fcf.current_rec.fcf_indx;
1987
1988 /* Unregister the current in-use FCF record */
1989 lpfc_unregister_fcf(phba);
1990
1991 /* Replace in-use record with the new record */
1992 memcpy(&phba->fcf.current_rec, &phba->fcf.failover_rec,
1993 sizeof(struct lpfc_fcf_rec));
1994
1995 lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
1996 "2783 FLOGI round robin FCF failover from FCF "
1997 "(index:x%x) to FCF (index:x%x).\n",
1998 current_fcf_index,
1999 bf_get(lpfc_fcf_record_fcf_index, new_fcf_record));
2000
2001out:
2002 lpfc_sli4_mbox_cmd_free(phba, mboxq);
2003 lpfc_register_fcf(phba);
2004}
2005
2006/**
2007 * lpfc_mbx_cmpl_read_fcf_rec - read fcf completion handler.
2008 * @phba: pointer to lpfc hba data structure.
2009 * @mboxq: pointer to mailbox object.
2010 *
2011 * This is the callback function of read FCF record mailbox command for
2012 * updating the eligible FCF bmask for FLOGI failure round robin FCF
2013 * failover when a new FCF event happened. If the FCF read back is
2014 * valid/available and it passes the connection list check, it updates
2015 * the bmask for the eligible FCF record for round robin failover.
2016 */
2017void
2018lpfc_mbx_cmpl_read_fcf_rec(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
2019{
2020 struct fcf_record *new_fcf_record;
2021 uint32_t boot_flag, addr_mode;
2022 uint16_t fcf_index, next_fcf_index;
2023 uint16_t vlan_id;
2024 int rc;
2025
2026 /* If link state is not up, no need to proceed */
2027 if (phba->link_state < LPFC_LINK_UP)
2028 goto out;
2029
2030 /* If FCF discovery period is over, no need to proceed */
2031 if (phba->fcf.fcf_flag & FCF_DISCOVERY)
2032 goto out;
2033
2034 /* Parse the FCF record from the non-embedded mailbox command */
2035 new_fcf_record = lpfc_sli4_fcf_rec_mbox_parse(phba, mboxq,
2036 &next_fcf_index);
2037 if (!new_fcf_record) {
2038 lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
2039 "2767 Mailbox command READ_FCF_RECORD "
2040 "failed to retrieve a FCF record.\n");
2041 goto out;
2042 }
2043
2044 /* Check the connection list for eligibility */
2045 rc = lpfc_match_fcf_conn_list(phba, new_fcf_record, &boot_flag,
2046 &addr_mode, &vlan_id);
2047
2048 /* Log the FCF record information if turned on */
2049 lpfc_sli4_log_fcf_record_info(phba, new_fcf_record, vlan_id,
2050 next_fcf_index);
2051
2052 if (!rc)
2053 goto out;
2054
2055 /* Update the eligible FCF record index bmask */
2056 fcf_index = bf_get(lpfc_fcf_record_fcf_index, new_fcf_record);
2057 rc = lpfc_sli4_fcf_rr_index_set(phba, fcf_index);
2058
2059out:
2060 lpfc_sli4_mbox_cmd_free(phba, mboxq);
2061}
2062
2063/**
1796 * lpfc_init_vpi_cmpl - Completion handler for init_vpi mbox command. 2064 * lpfc_init_vpi_cmpl - Completion handler for init_vpi mbox command.
1797 * @phba: pointer to lpfc hba data structure. 2065 * @phba: pointer to lpfc hba data structure.
1798 * @mboxq: pointer to mailbox data structure. 2066 * @mboxq: pointer to mailbox data structure.
@@ -2024,8 +2292,6 @@ lpfc_mbx_process_link_up(struct lpfc_hba *phba, READ_LA_VAR *la)
2024 int rc; 2292 int rc;
2025 struct fcf_record *fcf_record; 2293 struct fcf_record *fcf_record;
2026 2294
2027 sparam_mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
2028
2029 spin_lock_irq(&phba->hbalock); 2295 spin_lock_irq(&phba->hbalock);
2030 switch (la->UlnkSpeed) { 2296 switch (la->UlnkSpeed) {
2031 case LA_1GHZ_LINK: 2297 case LA_1GHZ_LINK:
@@ -2117,18 +2383,24 @@ lpfc_mbx_process_link_up(struct lpfc_hba *phba, READ_LA_VAR *la)
2117 spin_unlock_irq(&phba->hbalock); 2383 spin_unlock_irq(&phba->hbalock);
2118 2384
2119 lpfc_linkup(phba); 2385 lpfc_linkup(phba);
2120 if (sparam_mbox) { 2386 sparam_mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
2121 lpfc_read_sparam(phba, sparam_mbox, 0); 2387 if (!sparam_mbox)
2122 sparam_mbox->vport = vport; 2388 goto out;
2123 sparam_mbox->mbox_cmpl = lpfc_mbx_cmpl_read_sparam; 2389
2124 rc = lpfc_sli_issue_mbox(phba, sparam_mbox, MBX_NOWAIT); 2390 rc = lpfc_read_sparam(phba, sparam_mbox, 0);
2125 if (rc == MBX_NOT_FINISHED) { 2391 if (rc) {
2126 mp = (struct lpfc_dmabuf *) sparam_mbox->context1; 2392 mempool_free(sparam_mbox, phba->mbox_mem_pool);
2127 lpfc_mbuf_free(phba, mp->virt, mp->phys); 2393 goto out;
2128 kfree(mp); 2394 }
2129 mempool_free(sparam_mbox, phba->mbox_mem_pool); 2395 sparam_mbox->vport = vport;
2130 goto out; 2396 sparam_mbox->mbox_cmpl = lpfc_mbx_cmpl_read_sparam;
2131 } 2397 rc = lpfc_sli_issue_mbox(phba, sparam_mbox, MBX_NOWAIT);
2398 if (rc == MBX_NOT_FINISHED) {
2399 mp = (struct lpfc_dmabuf *) sparam_mbox->context1;
2400 lpfc_mbuf_free(phba, mp->virt, mp->phys);
2401 kfree(mp);
2402 mempool_free(sparam_mbox, phba->mbox_mem_pool);
2403 goto out;
2132 } 2404 }
2133 2405
2134 if (!(phba->hba_flag & HBA_FCOE_SUPPORT)) { 2406 if (!(phba->hba_flag & HBA_FCOE_SUPPORT)) {
@@ -2186,10 +2458,20 @@ lpfc_mbx_process_link_up(struct lpfc_hba *phba, READ_LA_VAR *la)
2186 spin_unlock_irq(&phba->hbalock); 2458 spin_unlock_irq(&phba->hbalock);
2187 return; 2459 return;
2188 } 2460 }
2461 /* This is the initial FCF discovery scan */
2462 phba->fcf.fcf_flag |= FCF_INIT_DISC;
2189 spin_unlock_irq(&phba->hbalock); 2463 spin_unlock_irq(&phba->hbalock);
2190 rc = lpfc_sli4_read_fcf_record(phba, LPFC_FCOE_FCF_GET_FIRST); 2464 lpfc_printf_log(phba, KERN_INFO, LOG_FIP | LOG_DISCOVERY,
2191 if (rc) 2465 "2778 Start FCF table scan at linkup\n");
2466
2467 rc = lpfc_sli4_fcf_scan_read_fcf_rec(phba,
2468 LPFC_FCOE_FCF_GET_FIRST);
2469 if (rc) {
2470 spin_lock_irq(&phba->hbalock);
2471 phba->fcf.fcf_flag &= ~FCF_INIT_DISC;
2472 spin_unlock_irq(&phba->hbalock);
2192 goto out; 2473 goto out;
2474 }
2193 } 2475 }
2194 2476
2195 return; 2477 return;
@@ -3379,8 +3661,12 @@ lpfc_unreg_hba_rpis(struct lpfc_hba *phba)
3379 shost = lpfc_shost_from_vport(vports[i]); 3661 shost = lpfc_shost_from_vport(vports[i]);
3380 spin_lock_irq(shost->host_lock); 3662 spin_lock_irq(shost->host_lock);
3381 list_for_each_entry(ndlp, &vports[i]->fc_nodes, nlp_listp) { 3663 list_for_each_entry(ndlp, &vports[i]->fc_nodes, nlp_listp) {
3382 if (ndlp->nlp_flag & NLP_RPI_VALID) 3664 if (ndlp->nlp_flag & NLP_RPI_VALID) {
3665 /* The mempool_alloc might sleep */
3666 spin_unlock_irq(shost->host_lock);
3383 lpfc_unreg_rpi(vports[i], ndlp); 3667 lpfc_unreg_rpi(vports[i], ndlp);
3668 spin_lock_irq(shost->host_lock);
3669 }
3384 } 3670 }
3385 spin_unlock_irq(shost->host_lock); 3671 spin_unlock_irq(shost->host_lock);
3386 } 3672 }
@@ -4756,6 +5042,7 @@ lpfc_unregister_fcf_rescan(struct lpfc_hba *phba)
4756 return; 5042 return;
4757 /* Reset HBA FCF states after successful unregister FCF */ 5043 /* Reset HBA FCF states after successful unregister FCF */
4758 phba->fcf.fcf_flag = 0; 5044 phba->fcf.fcf_flag = 0;
5045 phba->fcf.current_rec.flag = 0;
4759 5046
4760 /* 5047 /*
4761 * If driver is not unloading, check if there is any other 5048 * If driver is not unloading, check if there is any other
@@ -4765,13 +5052,21 @@ lpfc_unregister_fcf_rescan(struct lpfc_hba *phba)
4765 (phba->link_state < LPFC_LINK_UP)) 5052 (phba->link_state < LPFC_LINK_UP))
4766 return; 5053 return;
4767 5054
4768 rc = lpfc_sli4_read_fcf_record(phba, LPFC_FCOE_FCF_GET_FIRST); 5055 /* This is considered as the initial FCF discovery scan */
5056 spin_lock_irq(&phba->hbalock);
5057 phba->fcf.fcf_flag |= FCF_INIT_DISC;
5058 spin_unlock_irq(&phba->hbalock);
5059 rc = lpfc_sli4_fcf_scan_read_fcf_rec(phba, LPFC_FCOE_FCF_GET_FIRST);
4769 5060
4770 if (rc) 5061 if (rc) {
5062 spin_lock_irq(&phba->hbalock);
5063 phba->fcf.fcf_flag &= ~FCF_INIT_DISC;
5064 spin_unlock_irq(&phba->hbalock);
4771 lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY|LOG_MBOX, 5065 lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY|LOG_MBOX,
4772 "2553 lpfc_unregister_unused_fcf failed " 5066 "2553 lpfc_unregister_unused_fcf failed "
4773 "to read FCF record HBA state x%x\n", 5067 "to read FCF record HBA state x%x\n",
4774 phba->pport->port_state); 5068 phba->pport->port_state);
5069 }
4775} 5070}
4776 5071
4777/** 5072/**
diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c
index d29ac7c317d9..ea44239eeb33 100644
--- a/drivers/scsi/lpfc/lpfc_init.c
+++ b/drivers/scsi/lpfc/lpfc_init.c
@@ -350,7 +350,12 @@ lpfc_config_port_post(struct lpfc_hba *phba)
350 mb = &pmb->u.mb; 350 mb = &pmb->u.mb;
351 351
352 /* Get login parameters for NID. */ 352 /* Get login parameters for NID. */
353 lpfc_read_sparam(phba, pmb, 0); 353 rc = lpfc_read_sparam(phba, pmb, 0);
354 if (rc) {
355 mempool_free(pmb, phba->mbox_mem_pool);
356 return -ENOMEM;
357 }
358
354 pmb->vport = vport; 359 pmb->vport = vport;
355 if (lpfc_sli_issue_mbox(phba, pmb, MBX_POLL) != MBX_SUCCESS) { 360 if (lpfc_sli_issue_mbox(phba, pmb, MBX_POLL) != MBX_SUCCESS) {
356 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 361 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
@@ -359,7 +364,7 @@ lpfc_config_port_post(struct lpfc_hba *phba)
359 mb->mbxCommand, mb->mbxStatus); 364 mb->mbxCommand, mb->mbxStatus);
360 phba->link_state = LPFC_HBA_ERROR; 365 phba->link_state = LPFC_HBA_ERROR;
361 mp = (struct lpfc_dmabuf *) pmb->context1; 366 mp = (struct lpfc_dmabuf *) pmb->context1;
362 mempool_free( pmb, phba->mbox_mem_pool); 367 mempool_free(pmb, phba->mbox_mem_pool);
363 lpfc_mbuf_free(phba, mp->virt, mp->phys); 368 lpfc_mbuf_free(phba, mp->virt, mp->phys);
364 kfree(mp); 369 kfree(mp);
365 return -EIO; 370 return -EIO;
@@ -544,7 +549,7 @@ lpfc_config_port_post(struct lpfc_hba *phba)
544 mempool_free(pmb, phba->mbox_mem_pool); 549 mempool_free(pmb, phba->mbox_mem_pool);
545 return -EIO; 550 return -EIO;
546 } 551 }
547 } else if (phba->cfg_suppress_link_up == 0) { 552 } else if (phba->cfg_suppress_link_up == LPFC_INITIALIZE_LINK) {
548 lpfc_init_link(phba, pmb, phba->cfg_topology, 553 lpfc_init_link(phba, pmb, phba->cfg_topology,
549 phba->cfg_link_speed); 554 phba->cfg_link_speed);
550 pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl; 555 pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
@@ -571,6 +576,11 @@ lpfc_config_port_post(struct lpfc_hba *phba)
571 } 576 }
572 /* MBOX buffer will be freed in mbox compl */ 577 /* MBOX buffer will be freed in mbox compl */
573 pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 578 pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
579 if (!pmb) {
580 phba->link_state = LPFC_HBA_ERROR;
581 return -ENOMEM;
582 }
583
574 lpfc_config_async(phba, pmb, LPFC_ELS_RING); 584 lpfc_config_async(phba, pmb, LPFC_ELS_RING);
575 pmb->mbox_cmpl = lpfc_config_async_cmpl; 585 pmb->mbox_cmpl = lpfc_config_async_cmpl;
576 pmb->vport = phba->pport; 586 pmb->vport = phba->pport;
@@ -588,6 +598,11 @@ lpfc_config_port_post(struct lpfc_hba *phba)
588 598
589 /* Get Option rom version */ 599 /* Get Option rom version */
590 pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 600 pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
601 if (!pmb) {
602 phba->link_state = LPFC_HBA_ERROR;
603 return -ENOMEM;
604 }
605
591 lpfc_dump_wakeup_param(phba, pmb); 606 lpfc_dump_wakeup_param(phba, pmb);
592 pmb->mbox_cmpl = lpfc_dump_wakeup_param_cmpl; 607 pmb->mbox_cmpl = lpfc_dump_wakeup_param_cmpl;
593 pmb->vport = phba->pport; 608 pmb->vport = phba->pport;
@@ -652,7 +667,7 @@ lpfc_hba_init_link(struct lpfc_hba *phba)
652 mempool_free(pmb, phba->mbox_mem_pool); 667 mempool_free(pmb, phba->mbox_mem_pool);
653 return -EIO; 668 return -EIO;
654 } 669 }
655 phba->cfg_suppress_link_up = 0; 670 phba->cfg_suppress_link_up = LPFC_INITIALIZE_LINK;
656 671
657 return 0; 672 return 0;
658} 673}
@@ -807,6 +822,8 @@ lpfc_hba_down_post_s4(struct lpfc_hba *phba)
807 LIST_HEAD(aborts); 822 LIST_HEAD(aborts);
808 int ret; 823 int ret;
809 unsigned long iflag = 0; 824 unsigned long iflag = 0;
825 struct lpfc_sglq *sglq_entry = NULL;
826
810 ret = lpfc_hba_down_post_s3(phba); 827 ret = lpfc_hba_down_post_s3(phba);
811 if (ret) 828 if (ret)
812 return ret; 829 return ret;
@@ -822,6 +839,10 @@ lpfc_hba_down_post_s4(struct lpfc_hba *phba)
822 * list. 839 * list.
823 */ 840 */
824 spin_lock(&phba->sli4_hba.abts_sgl_list_lock); 841 spin_lock(&phba->sli4_hba.abts_sgl_list_lock);
842 list_for_each_entry(sglq_entry,
843 &phba->sli4_hba.lpfc_abts_els_sgl_list, list)
844 sglq_entry->state = SGL_FREED;
845
825 list_splice_init(&phba->sli4_hba.lpfc_abts_els_sgl_list, 846 list_splice_init(&phba->sli4_hba.lpfc_abts_els_sgl_list,
826 &phba->sli4_hba.lpfc_sgl_list); 847 &phba->sli4_hba.lpfc_sgl_list);
827 spin_unlock(&phba->sli4_hba.abts_sgl_list_lock); 848 spin_unlock(&phba->sli4_hba.abts_sgl_list_lock);
@@ -2178,8 +2199,10 @@ lpfc_stop_vport_timers(struct lpfc_vport *vport)
2178void 2199void
2179__lpfc_sli4_stop_fcf_redisc_wait_timer(struct lpfc_hba *phba) 2200__lpfc_sli4_stop_fcf_redisc_wait_timer(struct lpfc_hba *phba)
2180{ 2201{
2181 /* Clear pending FCF rediscovery wait timer */ 2202 /* Clear pending FCF rediscovery wait and failover in progress flags */
2182 phba->fcf.fcf_flag &= ~FCF_REDISC_PEND; 2203 phba->fcf.fcf_flag &= ~(FCF_REDISC_PEND |
2204 FCF_DEAD_DISC |
2205 FCF_ACVL_DISC);
2183 /* Now, try to stop the timer */ 2206 /* Now, try to stop the timer */
2184 del_timer(&phba->fcf.redisc_wait); 2207 del_timer(&phba->fcf.redisc_wait);
2185} 2208}
@@ -2576,6 +2599,14 @@ lpfc_create_port(struct lpfc_hba *phba, int instance, struct device *dev)
2576 init_timer(&vport->els_tmofunc); 2599 init_timer(&vport->els_tmofunc);
2577 vport->els_tmofunc.function = lpfc_els_timeout; 2600 vport->els_tmofunc.function = lpfc_els_timeout;
2578 vport->els_tmofunc.data = (unsigned long)vport; 2601 vport->els_tmofunc.data = (unsigned long)vport;
2602 if (phba->pcidev->device == PCI_DEVICE_ID_HORNET) {
2603 phba->menlo_flag |= HBA_MENLO_SUPPORT;
2604 /* check for menlo minimum sg count */
2605 if (phba->cfg_sg_seg_cnt < LPFC_DEFAULT_MENLO_SG_SEG_CNT) {
2606 phba->cfg_sg_seg_cnt = LPFC_DEFAULT_MENLO_SG_SEG_CNT;
2607 shost->sg_tablesize = phba->cfg_sg_seg_cnt;
2608 }
2609 }
2579 2610
2580 error = scsi_add_host_with_dma(shost, dev, &phba->pcidev->dev); 2611 error = scsi_add_host_with_dma(shost, dev, &phba->pcidev->dev);
2581 if (error) 2612 if (error)
@@ -2912,6 +2943,9 @@ lpfc_sli4_fcf_redisc_wait_tmo(unsigned long ptr)
2912 /* FCF rediscovery event to worker thread */ 2943 /* FCF rediscovery event to worker thread */
2913 phba->fcf.fcf_flag |= FCF_REDISC_EVT; 2944 phba->fcf.fcf_flag |= FCF_REDISC_EVT;
2914 spin_unlock_irq(&phba->hbalock); 2945 spin_unlock_irq(&phba->hbalock);
2946 lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
2947 "2776 FCF rediscover wait timer expired, post "
2948 "a worker thread event for FCF table scan\n");
2915 /* wake up worker thread */ 2949 /* wake up worker thread */
2916 lpfc_worker_wake_up(phba); 2950 lpfc_worker_wake_up(phba);
2917} 2951}
@@ -3183,6 +3217,68 @@ out_free_pmb:
3183} 3217}
3184 3218
3185/** 3219/**
3220 * lpfc_sli4_perform_vport_cvl - Perform clear virtual link on a vport
3221 * @vport: pointer to vport data structure.
3222 *
3223 * This routine is to perform Clear Virtual Link (CVL) on a vport in
3224 * response to a CVL event.
3225 *
3226 * Return the pointer to the ndlp with the vport if successful, otherwise
3227 * return NULL.
3228 **/
3229static struct lpfc_nodelist *
3230lpfc_sli4_perform_vport_cvl(struct lpfc_vport *vport)
3231{
3232 struct lpfc_nodelist *ndlp;
3233 struct Scsi_Host *shost;
3234 struct lpfc_hba *phba;
3235
3236 if (!vport)
3237 return NULL;
3238 ndlp = lpfc_findnode_did(vport, Fabric_DID);
3239 if (!ndlp)
3240 return NULL;
3241 phba = vport->phba;
3242 if (!phba)
3243 return NULL;
3244 if (phba->pport->port_state <= LPFC_FLOGI)
3245 return NULL;
3246 /* If virtual link is not yet instantiated ignore CVL */
3247 if (vport->port_state <= LPFC_FDISC)
3248 return NULL;
3249 shost = lpfc_shost_from_vport(vport);
3250 if (!shost)
3251 return NULL;
3252 lpfc_linkdown_port(vport);
3253 lpfc_cleanup_pending_mbox(vport);
3254 spin_lock_irq(shost->host_lock);
3255 vport->fc_flag |= FC_VPORT_CVL_RCVD;
3256 spin_unlock_irq(shost->host_lock);
3257
3258 return ndlp;
3259}
3260
3261/**
3262 * lpfc_sli4_perform_all_vport_cvl - Perform clear virtual link on all vports
3263 * @vport: pointer to lpfc hba data structure.
3264 *
3265 * This routine is to perform Clear Virtual Link (CVL) on all vports in
3266 * response to a FCF dead event.
3267 **/
3268static void
3269lpfc_sli4_perform_all_vport_cvl(struct lpfc_hba *phba)
3270{
3271 struct lpfc_vport **vports;
3272 int i;
3273
3274 vports = lpfc_create_vport_work_array(phba);
3275 if (vports)
3276 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++)
3277 lpfc_sli4_perform_vport_cvl(vports[i]);
3278 lpfc_destroy_vport_work_array(phba, vports);
3279}
3280
3281/**
3186 * lpfc_sli4_async_fcoe_evt - Process the asynchronous fcoe event 3282 * lpfc_sli4_async_fcoe_evt - Process the asynchronous fcoe event
3187 * @phba: pointer to lpfc hba data structure. 3283 * @phba: pointer to lpfc hba data structure.
3188 * @acqe_link: pointer to the async fcoe completion queue entry. 3284 * @acqe_link: pointer to the async fcoe completion queue entry.
@@ -3198,7 +3294,6 @@ lpfc_sli4_async_fcoe_evt(struct lpfc_hba *phba,
3198 struct lpfc_vport *vport; 3294 struct lpfc_vport *vport;
3199 struct lpfc_nodelist *ndlp; 3295 struct lpfc_nodelist *ndlp;
3200 struct Scsi_Host *shost; 3296 struct Scsi_Host *shost;
3201 uint32_t link_state;
3202 int active_vlink_present; 3297 int active_vlink_present;
3203 struct lpfc_vport **vports; 3298 struct lpfc_vport **vports;
3204 int i; 3299 int i;
@@ -3208,10 +3303,11 @@ lpfc_sli4_async_fcoe_evt(struct lpfc_hba *phba,
3208 switch (event_type) { 3303 switch (event_type) {
3209 case LPFC_FCOE_EVENT_TYPE_NEW_FCF: 3304 case LPFC_FCOE_EVENT_TYPE_NEW_FCF:
3210 case LPFC_FCOE_EVENT_TYPE_FCF_PARAM_MOD: 3305 case LPFC_FCOE_EVENT_TYPE_FCF_PARAM_MOD:
3211 lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY, 3306 lpfc_printf_log(phba, KERN_ERR, LOG_FIP | LOG_DISCOVERY,
3212 "2546 New FCF found index 0x%x tag 0x%x\n", 3307 "2546 New FCF found/FCF parameter modified event: "
3213 acqe_fcoe->index, 3308 "evt_tag:x%x, fcf_index:x%x\n",
3214 acqe_fcoe->event_tag); 3309 acqe_fcoe->event_tag, acqe_fcoe->index);
3310
3215 spin_lock_irq(&phba->hbalock); 3311 spin_lock_irq(&phba->hbalock);
3216 if ((phba->fcf.fcf_flag & FCF_SCAN_DONE) || 3312 if ((phba->fcf.fcf_flag & FCF_SCAN_DONE) ||
3217 (phba->hba_flag & FCF_DISC_INPROGRESS)) { 3313 (phba->hba_flag & FCF_DISC_INPROGRESS)) {
@@ -3222,6 +3318,7 @@ lpfc_sli4_async_fcoe_evt(struct lpfc_hba *phba,
3222 spin_unlock_irq(&phba->hbalock); 3318 spin_unlock_irq(&phba->hbalock);
3223 break; 3319 break;
3224 } 3320 }
3321
3225 if (phba->fcf.fcf_flag & FCF_REDISC_EVT) { 3322 if (phba->fcf.fcf_flag & FCF_REDISC_EVT) {
3226 /* 3323 /*
3227 * If fast FCF failover rescan event is pending, 3324 * If fast FCF failover rescan event is pending,
@@ -3232,12 +3329,33 @@ lpfc_sli4_async_fcoe_evt(struct lpfc_hba *phba,
3232 } 3329 }
3233 spin_unlock_irq(&phba->hbalock); 3330 spin_unlock_irq(&phba->hbalock);
3234 3331
3235 /* Read the FCF table and re-discover SAN. */ 3332 if ((phba->fcf.fcf_flag & FCF_DISCOVERY) &&
3236 rc = lpfc_sli4_read_fcf_record(phba, LPFC_FCOE_FCF_GET_FIRST); 3333 !(phba->fcf.fcf_flag & FCF_REDISC_FOV)) {
3334 /*
3335 * During period of FCF discovery, read the FCF
3336 * table record indexed by the event to update
3337 * FCF round robin failover eligible FCF bmask.
3338 */
3339 lpfc_printf_log(phba, KERN_INFO, LOG_FIP |
3340 LOG_DISCOVERY,
3341 "2779 Read new FCF record with "
3342 "fcf_index:x%x for updating FCF "
3343 "round robin failover bmask\n",
3344 acqe_fcoe->index);
3345 rc = lpfc_sli4_read_fcf_rec(phba, acqe_fcoe->index);
3346 }
3347
3348 /* Otherwise, scan the entire FCF table and re-discover SAN */
3349 lpfc_printf_log(phba, KERN_INFO, LOG_FIP | LOG_DISCOVERY,
3350 "2770 Start FCF table scan due to new FCF "
3351 "event: evt_tag:x%x, fcf_index:x%x\n",
3352 acqe_fcoe->event_tag, acqe_fcoe->index);
3353 rc = lpfc_sli4_fcf_scan_read_fcf_rec(phba,
3354 LPFC_FCOE_FCF_GET_FIRST);
3237 if (rc) 3355 if (rc)
3238 lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY, 3356 lpfc_printf_log(phba, KERN_ERR, LOG_FIP | LOG_DISCOVERY,
3239 "2547 Read FCF record failed 0x%x\n", 3357 "2547 Issue FCF scan read FCF mailbox "
3240 rc); 3358 "command failed 0x%x\n", rc);
3241 break; 3359 break;
3242 3360
3243 case LPFC_FCOE_EVENT_TYPE_FCF_TABLE_FULL: 3361 case LPFC_FCOE_EVENT_TYPE_FCF_TABLE_FULL:
@@ -3248,47 +3366,63 @@ lpfc_sli4_async_fcoe_evt(struct lpfc_hba *phba,
3248 break; 3366 break;
3249 3367
3250 case LPFC_FCOE_EVENT_TYPE_FCF_DEAD: 3368 case LPFC_FCOE_EVENT_TYPE_FCF_DEAD:
3251 lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY, 3369 lpfc_printf_log(phba, KERN_ERR, LOG_FIP | LOG_DISCOVERY,
3252 "2549 FCF disconnected from network index 0x%x" 3370 "2549 FCF disconnected from network index 0x%x"
3253 " tag 0x%x\n", acqe_fcoe->index, 3371 " tag 0x%x\n", acqe_fcoe->index,
3254 acqe_fcoe->event_tag); 3372 acqe_fcoe->event_tag);
3255 /* If the event is not for currently used fcf do nothing */ 3373 /* If the event is not for currently used fcf do nothing */
3256 if (phba->fcf.current_rec.fcf_indx != acqe_fcoe->index) 3374 if (phba->fcf.current_rec.fcf_indx != acqe_fcoe->index)
3257 break; 3375 break;
3258 /* 3376 /* We request port to rediscover the entire FCF table for
3259 * Currently, driver support only one FCF - so treat this as 3377 * a fast recovery from case that the current FCF record
3260 * a link down, but save the link state because we don't want 3378 * is no longer valid if we are not in the middle of FCF
3261 * it to be changed to Link Down unless it is already down. 3379 * failover process already.
3262 */ 3380 */
3263 link_state = phba->link_state; 3381 spin_lock_irq(&phba->hbalock);
3264 lpfc_linkdown(phba); 3382 if (phba->fcf.fcf_flag & FCF_DISCOVERY) {
3265 phba->link_state = link_state; 3383 spin_unlock_irq(&phba->hbalock);
3266 /* Unregister FCF if no devices connected to it */ 3384 /* Update FLOGI FCF failover eligible FCF bmask */
3267 lpfc_unregister_unused_fcf(phba); 3385 lpfc_sli4_fcf_rr_index_clear(phba, acqe_fcoe->index);
3386 break;
3387 }
3388 /* Mark the fast failover process in progress */
3389 phba->fcf.fcf_flag |= FCF_DEAD_DISC;
3390 spin_unlock_irq(&phba->hbalock);
3391 lpfc_printf_log(phba, KERN_INFO, LOG_FIP | LOG_DISCOVERY,
3392 "2771 Start FCF fast failover process due to "
3393 "FCF DEAD event: evt_tag:x%x, fcf_index:x%x "
3394 "\n", acqe_fcoe->event_tag, acqe_fcoe->index);
3395 rc = lpfc_sli4_redisc_fcf_table(phba);
3396 if (rc) {
3397 lpfc_printf_log(phba, KERN_ERR, LOG_FIP |
3398 LOG_DISCOVERY,
3399 "2772 Issue FCF rediscover mabilbox "
3400 "command failed, fail through to FCF "
3401 "dead event\n");
3402 spin_lock_irq(&phba->hbalock);
3403 phba->fcf.fcf_flag &= ~FCF_DEAD_DISC;
3404 spin_unlock_irq(&phba->hbalock);
3405 /*
3406 * Last resort will fail over by treating this
3407 * as a link down to FCF registration.
3408 */
3409 lpfc_sli4_fcf_dead_failthrough(phba);
3410 } else
3411 /* Handling fast FCF failover to a DEAD FCF event
3412 * is considered equalivant to receiving CVL to all
3413 * vports.
3414 */
3415 lpfc_sli4_perform_all_vport_cvl(phba);
3268 break; 3416 break;
3269 case LPFC_FCOE_EVENT_TYPE_CVL: 3417 case LPFC_FCOE_EVENT_TYPE_CVL:
3270 lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY, 3418 lpfc_printf_log(phba, KERN_ERR, LOG_FIP | LOG_DISCOVERY,
3271 "2718 Clear Virtual Link Received for VPI 0x%x" 3419 "2718 Clear Virtual Link Received for VPI 0x%x"
3272 " tag 0x%x\n", acqe_fcoe->index, acqe_fcoe->event_tag); 3420 " tag 0x%x\n", acqe_fcoe->index, acqe_fcoe->event_tag);
3273 vport = lpfc_find_vport_by_vpid(phba, 3421 vport = lpfc_find_vport_by_vpid(phba,
3274 acqe_fcoe->index - phba->vpi_base); 3422 acqe_fcoe->index - phba->vpi_base);
3275 if (!vport) 3423 ndlp = lpfc_sli4_perform_vport_cvl(vport);
3276 break;
3277 ndlp = lpfc_findnode_did(vport, Fabric_DID);
3278 if (!ndlp) 3424 if (!ndlp)
3279 break; 3425 break;
3280 shost = lpfc_shost_from_vport(vport);
3281 if (phba->pport->port_state <= LPFC_FLOGI)
3282 break;
3283 /* If virtual link is not yet instantiated ignore CVL */
3284 if (vport->port_state <= LPFC_FDISC)
3285 break;
3286
3287 lpfc_linkdown_port(vport);
3288 lpfc_cleanup_pending_mbox(vport);
3289 spin_lock_irq(shost->host_lock);
3290 vport->fc_flag |= FC_VPORT_CVL_RCVD;
3291 spin_unlock_irq(shost->host_lock);
3292 active_vlink_present = 0; 3426 active_vlink_present = 0;
3293 3427
3294 vports = lpfc_create_vport_work_array(phba); 3428 vports = lpfc_create_vport_work_array(phba);
@@ -3311,6 +3445,7 @@ lpfc_sli4_async_fcoe_evt(struct lpfc_hba *phba,
3311 * re-instantiate the Vlink using FDISC. 3445 * re-instantiate the Vlink using FDISC.
3312 */ 3446 */
3313 mod_timer(&ndlp->nlp_delayfunc, jiffies + HZ); 3447 mod_timer(&ndlp->nlp_delayfunc, jiffies + HZ);
3448 shost = lpfc_shost_from_vport(vport);
3314 spin_lock_irq(shost->host_lock); 3449 spin_lock_irq(shost->host_lock);
3315 ndlp->nlp_flag |= NLP_DELAY_TMO; 3450 ndlp->nlp_flag |= NLP_DELAY_TMO;
3316 spin_unlock_irq(shost->host_lock); 3451 spin_unlock_irq(shost->host_lock);
@@ -3321,15 +3456,38 @@ lpfc_sli4_async_fcoe_evt(struct lpfc_hba *phba,
3321 * Otherwise, we request port to rediscover 3456 * Otherwise, we request port to rediscover
3322 * the entire FCF table for a fast recovery 3457 * the entire FCF table for a fast recovery
3323 * from possible case that the current FCF 3458 * from possible case that the current FCF
3324 * is no longer valid. 3459 * is no longer valid if we are not already
3460 * in the FCF failover process.
3325 */ 3461 */
3462 spin_lock_irq(&phba->hbalock);
3463 if (phba->fcf.fcf_flag & FCF_DISCOVERY) {
3464 spin_unlock_irq(&phba->hbalock);
3465 break;
3466 }
3467 /* Mark the fast failover process in progress */
3468 phba->fcf.fcf_flag |= FCF_ACVL_DISC;
3469 spin_unlock_irq(&phba->hbalock);
3470 lpfc_printf_log(phba, KERN_INFO, LOG_FIP |
3471 LOG_DISCOVERY,
3472 "2773 Start FCF fast failover due "
3473 "to CVL event: evt_tag:x%x\n",
3474 acqe_fcoe->event_tag);
3326 rc = lpfc_sli4_redisc_fcf_table(phba); 3475 rc = lpfc_sli4_redisc_fcf_table(phba);
3327 if (rc) 3476 if (rc) {
3477 lpfc_printf_log(phba, KERN_ERR, LOG_FIP |
3478 LOG_DISCOVERY,
3479 "2774 Issue FCF rediscover "
3480 "mabilbox command failed, "
3481 "through to CVL event\n");
3482 spin_lock_irq(&phba->hbalock);
3483 phba->fcf.fcf_flag &= ~FCF_ACVL_DISC;
3484 spin_unlock_irq(&phba->hbalock);
3328 /* 3485 /*
3329 * Last resort will be re-try on the 3486 * Last resort will be re-try on the
3330 * the current registered FCF entry. 3487 * the current registered FCF entry.
3331 */ 3488 */
3332 lpfc_retry_pport_discovery(phba); 3489 lpfc_retry_pport_discovery(phba);
3490 }
3333 } 3491 }
3334 break; 3492 break;
3335 default: 3493 default:
@@ -3426,11 +3584,14 @@ void lpfc_sli4_fcf_redisc_event_proc(struct lpfc_hba *phba)
3426 spin_unlock_irq(&phba->hbalock); 3584 spin_unlock_irq(&phba->hbalock);
3427 3585
3428 /* Scan FCF table from the first entry to re-discover SAN */ 3586 /* Scan FCF table from the first entry to re-discover SAN */
3429 rc = lpfc_sli4_read_fcf_record(phba, LPFC_FCOE_FCF_GET_FIRST); 3587 lpfc_printf_log(phba, KERN_INFO, LOG_FIP | LOG_DISCOVERY,
3588 "2777 Start FCF table scan after FCF "
3589 "rediscovery quiescent period over\n");
3590 rc = lpfc_sli4_fcf_scan_read_fcf_rec(phba, LPFC_FCOE_FCF_GET_FIRST);
3430 if (rc) 3591 if (rc)
3431 lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY, 3592 lpfc_printf_log(phba, KERN_ERR, LOG_FIP | LOG_DISCOVERY,
3432 "2747 Post FCF rediscovery read FCF record " 3593 "2747 Issue FCF scan read FCF mailbox "
3433 "failed 0x%x\n", rc); 3594 "command failed 0x%x\n", rc);
3434} 3595}
3435 3596
3436/** 3597/**
@@ -3722,6 +3883,7 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
3722 int rc, i, hbq_count, buf_size, dma_buf_size, max_buf_size; 3883 int rc, i, hbq_count, buf_size, dma_buf_size, max_buf_size;
3723 uint8_t pn_page[LPFC_MAX_SUPPORTED_PAGES] = {0}; 3884 uint8_t pn_page[LPFC_MAX_SUPPORTED_PAGES] = {0};
3724 struct lpfc_mqe *mqe; 3885 struct lpfc_mqe *mqe;
3886 int longs;
3725 3887
3726 /* Before proceed, wait for POST done and device ready */ 3888 /* Before proceed, wait for POST done and device ready */
3727 rc = lpfc_sli4_post_status_check(phba); 3889 rc = lpfc_sli4_post_status_check(phba);
@@ -3898,13 +4060,24 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
3898 goto out_free_active_sgl; 4060 goto out_free_active_sgl;
3899 } 4061 }
3900 4062
4063 /* Allocate eligible FCF bmask memory for FCF round robin failover */
4064 longs = (LPFC_SLI4_FCF_TBL_INDX_MAX + BITS_PER_LONG - 1)/BITS_PER_LONG;
4065 phba->fcf.fcf_rr_bmask = kzalloc(longs * sizeof(unsigned long),
4066 GFP_KERNEL);
4067 if (!phba->fcf.fcf_rr_bmask) {
4068 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
4069 "2759 Failed allocate memory for FCF round "
4070 "robin failover bmask\n");
4071 goto out_remove_rpi_hdrs;
4072 }
4073
3901 phba->sli4_hba.fcp_eq_hdl = kzalloc((sizeof(struct lpfc_fcp_eq_hdl) * 4074 phba->sli4_hba.fcp_eq_hdl = kzalloc((sizeof(struct lpfc_fcp_eq_hdl) *
3902 phba->cfg_fcp_eq_count), GFP_KERNEL); 4075 phba->cfg_fcp_eq_count), GFP_KERNEL);
3903 if (!phba->sli4_hba.fcp_eq_hdl) { 4076 if (!phba->sli4_hba.fcp_eq_hdl) {
3904 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 4077 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
3905 "2572 Failed allocate memory for fast-path " 4078 "2572 Failed allocate memory for fast-path "
3906 "per-EQ handle array\n"); 4079 "per-EQ handle array\n");
3907 goto out_remove_rpi_hdrs; 4080 goto out_free_fcf_rr_bmask;
3908 } 4081 }
3909 4082
3910 phba->sli4_hba.msix_entries = kzalloc((sizeof(struct msix_entry) * 4083 phba->sli4_hba.msix_entries = kzalloc((sizeof(struct msix_entry) *
@@ -3957,6 +4130,8 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
3957 4130
3958out_free_fcp_eq_hdl: 4131out_free_fcp_eq_hdl:
3959 kfree(phba->sli4_hba.fcp_eq_hdl); 4132 kfree(phba->sli4_hba.fcp_eq_hdl);
4133out_free_fcf_rr_bmask:
4134 kfree(phba->fcf.fcf_rr_bmask);
3960out_remove_rpi_hdrs: 4135out_remove_rpi_hdrs:
3961 lpfc_sli4_remove_rpi_hdrs(phba); 4136 lpfc_sli4_remove_rpi_hdrs(phba);
3962out_free_active_sgl: 4137out_free_active_sgl:
@@ -4002,6 +4177,9 @@ lpfc_sli4_driver_resource_unset(struct lpfc_hba *phba)
4002 lpfc_sli4_remove_rpi_hdrs(phba); 4177 lpfc_sli4_remove_rpi_hdrs(phba);
4003 lpfc_sli4_remove_rpis(phba); 4178 lpfc_sli4_remove_rpis(phba);
4004 4179
4180 /* Free eligible FCF index bmask */
4181 kfree(phba->fcf.fcf_rr_bmask);
4182
4005 /* Free the ELS sgl list */ 4183 /* Free the ELS sgl list */
4006 lpfc_free_active_sgl(phba); 4184 lpfc_free_active_sgl(phba);
4007 lpfc_free_sgl_list(phba); 4185 lpfc_free_sgl_list(phba);
@@ -4397,6 +4575,7 @@ lpfc_init_sgl_list(struct lpfc_hba *phba)
4397 4575
4398 /* The list order is used by later block SGL registraton */ 4576 /* The list order is used by later block SGL registraton */
4399 spin_lock_irq(&phba->hbalock); 4577 spin_lock_irq(&phba->hbalock);
4578 sglq_entry->state = SGL_FREED;
4400 list_add_tail(&sglq_entry->list, &phba->sli4_hba.lpfc_sgl_list); 4579 list_add_tail(&sglq_entry->list, &phba->sli4_hba.lpfc_sgl_list);
4401 phba->sli4_hba.lpfc_els_sgl_array[i] = sglq_entry; 4580 phba->sli4_hba.lpfc_els_sgl_array[i] = sglq_entry;
4402 phba->sli4_hba.total_sglq_bufs++; 4581 phba->sli4_hba.total_sglq_bufs++;
diff --git a/drivers/scsi/lpfc/lpfc_logmsg.h b/drivers/scsi/lpfc/lpfc_logmsg.h
index 954ba57970a3..bb59e9273126 100644
--- a/drivers/scsi/lpfc/lpfc_logmsg.h
+++ b/drivers/scsi/lpfc/lpfc_logmsg.h
@@ -35,6 +35,7 @@
35#define LOG_VPORT 0x00004000 /* NPIV events */ 35#define LOG_VPORT 0x00004000 /* NPIV events */
36#define LOF_SECURITY 0x00008000 /* Security events */ 36#define LOF_SECURITY 0x00008000 /* Security events */
37#define LOG_EVENT 0x00010000 /* CT,TEMP,DUMP, logging */ 37#define LOG_EVENT 0x00010000 /* CT,TEMP,DUMP, logging */
38#define LOG_FIP 0x00020000 /* FIP events */
38#define LOG_ALL_MSG 0xffffffff /* LOG all messages */ 39#define LOG_ALL_MSG 0xffffffff /* LOG all messages */
39 40
40#define lpfc_printf_vlog(vport, level, mask, fmt, arg...) \ 41#define lpfc_printf_vlog(vport, level, mask, fmt, arg...) \
diff --git a/drivers/scsi/lpfc/lpfc_mbox.c b/drivers/scsi/lpfc/lpfc_mbox.c
index 6c4dce1a30ca..1e61ae3bc4eb 100644
--- a/drivers/scsi/lpfc/lpfc_mbox.c
+++ b/drivers/scsi/lpfc/lpfc_mbox.c
@@ -1748,7 +1748,7 @@ lpfc_sli4_mbox_opcode_get(struct lpfc_hba *phba, struct lpfcMboxq *mbox)
1748} 1748}
1749 1749
1750/** 1750/**
1751 * lpfc_sli4_mbx_read_fcf_record - Allocate and construct read fcf mbox cmd 1751 * lpfc_sli4_mbx_read_fcf_rec - Allocate and construct read fcf mbox cmd
1752 * @phba: pointer to lpfc hba data structure. 1752 * @phba: pointer to lpfc hba data structure.
1753 * @fcf_index: index to fcf table. 1753 * @fcf_index: index to fcf table.
1754 * 1754 *
@@ -1759,9 +1759,9 @@ lpfc_sli4_mbox_opcode_get(struct lpfc_hba *phba, struct lpfcMboxq *mbox)
1759 * NULL. 1759 * NULL.
1760 **/ 1760 **/
1761int 1761int
1762lpfc_sli4_mbx_read_fcf_record(struct lpfc_hba *phba, 1762lpfc_sli4_mbx_read_fcf_rec(struct lpfc_hba *phba,
1763 struct lpfcMboxq *mboxq, 1763 struct lpfcMboxq *mboxq,
1764 uint16_t fcf_index) 1764 uint16_t fcf_index)
1765{ 1765{
1766 void *virt_addr; 1766 void *virt_addr;
1767 dma_addr_t phys_addr; 1767 dma_addr_t phys_addr;
diff --git a/drivers/scsi/lpfc/lpfc_scsi.c b/drivers/scsi/lpfc/lpfc_scsi.c
index 483fb74bc592..b16bb2c9978b 100644
--- a/drivers/scsi/lpfc/lpfc_scsi.c
+++ b/drivers/scsi/lpfc/lpfc_scsi.c
@@ -620,23 +620,40 @@ lpfc_sli4_fcp_xri_aborted(struct lpfc_hba *phba,
620 uint16_t xri = bf_get(lpfc_wcqe_xa_xri, axri); 620 uint16_t xri = bf_get(lpfc_wcqe_xa_xri, axri);
621 struct lpfc_scsi_buf *psb, *next_psb; 621 struct lpfc_scsi_buf *psb, *next_psb;
622 unsigned long iflag = 0; 622 unsigned long iflag = 0;
623 struct lpfc_iocbq *iocbq;
624 int i;
623 625
624 spin_lock_irqsave(&phba->sli4_hba.abts_scsi_buf_list_lock, iflag); 626 spin_lock_irqsave(&phba->hbalock, iflag);
627 spin_lock(&phba->sli4_hba.abts_scsi_buf_list_lock);
625 list_for_each_entry_safe(psb, next_psb, 628 list_for_each_entry_safe(psb, next_psb,
626 &phba->sli4_hba.lpfc_abts_scsi_buf_list, list) { 629 &phba->sli4_hba.lpfc_abts_scsi_buf_list, list) {
627 if (psb->cur_iocbq.sli4_xritag == xri) { 630 if (psb->cur_iocbq.sli4_xritag == xri) {
628 list_del(&psb->list); 631 list_del(&psb->list);
629 psb->exch_busy = 0; 632 psb->exch_busy = 0;
630 psb->status = IOSTAT_SUCCESS; 633 psb->status = IOSTAT_SUCCESS;
631 spin_unlock_irqrestore( 634 spin_unlock(
632 &phba->sli4_hba.abts_scsi_buf_list_lock, 635 &phba->sli4_hba.abts_scsi_buf_list_lock);
633 iflag); 636 spin_unlock_irqrestore(&phba->hbalock, iflag);
634 lpfc_release_scsi_buf_s4(phba, psb); 637 lpfc_release_scsi_buf_s4(phba, psb);
635 return; 638 return;
636 } 639 }
637 } 640 }
638 spin_unlock_irqrestore(&phba->sli4_hba.abts_scsi_buf_list_lock, 641 spin_unlock(&phba->sli4_hba.abts_scsi_buf_list_lock);
639 iflag); 642 for (i = 1; i <= phba->sli.last_iotag; i++) {
643 iocbq = phba->sli.iocbq_lookup[i];
644
645 if (!(iocbq->iocb_flag & LPFC_IO_FCP) ||
646 (iocbq->iocb_flag & LPFC_IO_LIBDFC))
647 continue;
648 if (iocbq->sli4_xritag != xri)
649 continue;
650 psb = container_of(iocbq, struct lpfc_scsi_buf, cur_iocbq);
651 psb->exch_busy = 0;
652 spin_unlock_irqrestore(&phba->hbalock, iflag);
653 return;
654
655 }
656 spin_unlock_irqrestore(&phba->hbalock, iflag);
640} 657}
641 658
642/** 659/**
@@ -1006,6 +1023,7 @@ lpfc_scsi_prep_dma_buf_s3(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd)
1006 struct scatterlist *sgel = NULL; 1023 struct scatterlist *sgel = NULL;
1007 struct fcp_cmnd *fcp_cmnd = lpfc_cmd->fcp_cmnd; 1024 struct fcp_cmnd *fcp_cmnd = lpfc_cmd->fcp_cmnd;
1008 struct ulp_bde64 *bpl = lpfc_cmd->fcp_bpl; 1025 struct ulp_bde64 *bpl = lpfc_cmd->fcp_bpl;
1026 struct lpfc_iocbq *iocbq = &lpfc_cmd->cur_iocbq;
1009 IOCB_t *iocb_cmd = &lpfc_cmd->cur_iocbq.iocb; 1027 IOCB_t *iocb_cmd = &lpfc_cmd->cur_iocbq.iocb;
1010 struct ulp_bde64 *data_bde = iocb_cmd->unsli3.fcp_ext.dbde; 1028 struct ulp_bde64 *data_bde = iocb_cmd->unsli3.fcp_ext.dbde;
1011 dma_addr_t physaddr; 1029 dma_addr_t physaddr;
@@ -1056,6 +1074,7 @@ lpfc_scsi_prep_dma_buf_s3(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd)
1056 physaddr = sg_dma_address(sgel); 1074 physaddr = sg_dma_address(sgel);
1057 if (phba->sli_rev == 3 && 1075 if (phba->sli_rev == 3 &&
1058 !(phba->sli3_options & LPFC_SLI3_BG_ENABLED) && 1076 !(phba->sli3_options & LPFC_SLI3_BG_ENABLED) &&
1077 !(iocbq->iocb_flag & DSS_SECURITY_OP) &&
1059 nseg <= LPFC_EXT_DATA_BDE_COUNT) { 1078 nseg <= LPFC_EXT_DATA_BDE_COUNT) {
1060 data_bde->tus.f.bdeFlags = BUFF_TYPE_BDE_64; 1079 data_bde->tus.f.bdeFlags = BUFF_TYPE_BDE_64;
1061 data_bde->tus.f.bdeSize = sg_dma_len(sgel); 1080 data_bde->tus.f.bdeSize = sg_dma_len(sgel);
@@ -1082,7 +1101,8 @@ lpfc_scsi_prep_dma_buf_s3(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd)
1082 * explicitly reinitialized since all iocb memory resources are reused. 1101 * explicitly reinitialized since all iocb memory resources are reused.
1083 */ 1102 */
1084 if (phba->sli_rev == 3 && 1103 if (phba->sli_rev == 3 &&
1085 !(phba->sli3_options & LPFC_SLI3_BG_ENABLED)) { 1104 !(phba->sli3_options & LPFC_SLI3_BG_ENABLED) &&
1105 !(iocbq->iocb_flag & DSS_SECURITY_OP)) {
1086 if (num_bde > LPFC_EXT_DATA_BDE_COUNT) { 1106 if (num_bde > LPFC_EXT_DATA_BDE_COUNT) {
1087 /* 1107 /*
1088 * The extended IOCB format can only fit 3 BDE or a BPL. 1108 * The extended IOCB format can only fit 3 BDE or a BPL.
@@ -1107,6 +1127,7 @@ lpfc_scsi_prep_dma_buf_s3(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd)
1107 } else { 1127 } else {
1108 iocb_cmd->un.fcpi64.bdl.bdeSize = 1128 iocb_cmd->un.fcpi64.bdl.bdeSize =
1109 ((num_bde + 2) * sizeof(struct ulp_bde64)); 1129 ((num_bde + 2) * sizeof(struct ulp_bde64));
1130 iocb_cmd->unsli3.fcp_ext.ebde_count = (num_bde + 1);
1110 } 1131 }
1111 fcp_cmnd->fcpDl = cpu_to_be32(scsi_bufflen(scsi_cmnd)); 1132 fcp_cmnd->fcpDl = cpu_to_be32(scsi_bufflen(scsi_cmnd));
1112 1133
@@ -2079,8 +2100,7 @@ lpfc_handle_fcp_err(struct lpfc_vport *vport, struct lpfc_scsi_buf *lpfc_cmd,
2079 2100
2080 if (resp_info & RSP_LEN_VALID) { 2101 if (resp_info & RSP_LEN_VALID) {
2081 rsplen = be32_to_cpu(fcprsp->rspRspLen); 2102 rsplen = be32_to_cpu(fcprsp->rspRspLen);
2082 if ((rsplen != 0 && rsplen != 4 && rsplen != 8) || 2103 if (rsplen != 0 && rsplen != 4 && rsplen != 8) {
2083 (fcprsp->rspInfo3 != RSP_NO_FAILURE)) {
2084 lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP, 2104 lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
2085 "2719 Invalid response length: " 2105 "2719 Invalid response length: "
2086 "tgt x%x lun x%x cmnd x%x rsplen x%x\n", 2106 "tgt x%x lun x%x cmnd x%x rsplen x%x\n",
@@ -2090,6 +2110,17 @@ lpfc_handle_fcp_err(struct lpfc_vport *vport, struct lpfc_scsi_buf *lpfc_cmd,
2090 host_status = DID_ERROR; 2110 host_status = DID_ERROR;
2091 goto out; 2111 goto out;
2092 } 2112 }
2113 if (fcprsp->rspInfo3 != RSP_NO_FAILURE) {
2114 lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
2115 "2757 Protocol failure detected during "
2116 "processing of FCP I/O op: "
2117 "tgt x%x lun x%x cmnd x%x rspInfo3 x%x\n",
2118 cmnd->device->id,
2119 cmnd->device->lun, cmnd->cmnd[0],
2120 fcprsp->rspInfo3);
2121 host_status = DID_ERROR;
2122 goto out;
2123 }
2093 } 2124 }
2094 2125
2095 if ((resp_info & SNS_LEN_VALID) && fcprsp->rspSnsLen) { 2126 if ((resp_info & SNS_LEN_VALID) && fcprsp->rspSnsLen) {
diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c
index 35e3b96d4e07..fe6660ca6452 100644
--- a/drivers/scsi/lpfc/lpfc_sli.c
+++ b/drivers/scsi/lpfc/lpfc_sli.c
@@ -494,7 +494,7 @@ __lpfc_clear_active_sglq(struct lpfc_hba *phba, uint16_t xritag)
494 * 494 *
495 * Returns sglq ponter = success, NULL = Failure. 495 * Returns sglq ponter = success, NULL = Failure.
496 **/ 496 **/
497static struct lpfc_sglq * 497struct lpfc_sglq *
498__lpfc_get_active_sglq(struct lpfc_hba *phba, uint16_t xritag) 498__lpfc_get_active_sglq(struct lpfc_hba *phba, uint16_t xritag)
499{ 499{
500 uint16_t adj_xri; 500 uint16_t adj_xri;
@@ -526,6 +526,7 @@ __lpfc_sli_get_sglq(struct lpfc_hba *phba)
526 return NULL; 526 return NULL;
527 adj_xri = sglq->sli4_xritag - phba->sli4_hba.max_cfg_param.xri_base; 527 adj_xri = sglq->sli4_xritag - phba->sli4_hba.max_cfg_param.xri_base;
528 phba->sli4_hba.lpfc_sglq_active_list[adj_xri] = sglq; 528 phba->sli4_hba.lpfc_sglq_active_list[adj_xri] = sglq;
529 sglq->state = SGL_ALLOCATED;
529 return sglq; 530 return sglq;
530} 531}
531 532
@@ -580,15 +581,18 @@ __lpfc_sli_release_iocbq_s4(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq)
580 else 581 else
581 sglq = __lpfc_clear_active_sglq(phba, iocbq->sli4_xritag); 582 sglq = __lpfc_clear_active_sglq(phba, iocbq->sli4_xritag);
582 if (sglq) { 583 if (sglq) {
583 if (iocbq->iocb_flag & LPFC_EXCHANGE_BUSY) { 584 if ((iocbq->iocb_flag & LPFC_EXCHANGE_BUSY) &&
585 (sglq->state != SGL_XRI_ABORTED)) {
584 spin_lock_irqsave(&phba->sli4_hba.abts_sgl_list_lock, 586 spin_lock_irqsave(&phba->sli4_hba.abts_sgl_list_lock,
585 iflag); 587 iflag);
586 list_add(&sglq->list, 588 list_add(&sglq->list,
587 &phba->sli4_hba.lpfc_abts_els_sgl_list); 589 &phba->sli4_hba.lpfc_abts_els_sgl_list);
588 spin_unlock_irqrestore( 590 spin_unlock_irqrestore(
589 &phba->sli4_hba.abts_sgl_list_lock, iflag); 591 &phba->sli4_hba.abts_sgl_list_lock, iflag);
590 } else 592 } else {
593 sglq->state = SGL_FREED;
591 list_add(&sglq->list, &phba->sli4_hba.lpfc_sgl_list); 594 list_add(&sglq->list, &phba->sli4_hba.lpfc_sgl_list);
595 }
592 } 596 }
593 597
594 598
@@ -2258,41 +2262,56 @@ lpfc_sli_process_sol_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
2258 spin_unlock_irqrestore(&phba->hbalock, 2262 spin_unlock_irqrestore(&phba->hbalock,
2259 iflag); 2263 iflag);
2260 } 2264 }
2261 if ((phba->sli_rev == LPFC_SLI_REV4) && 2265 if (phba->sli_rev == LPFC_SLI_REV4) {
2262 (saveq->iocb_flag & LPFC_EXCHANGE_BUSY)) { 2266 if (saveq->iocb_flag &
2263 /* Set cmdiocb flag for the exchange 2267 LPFC_EXCHANGE_BUSY) {
2264 * busy so sgl (xri) will not be 2268 /* Set cmdiocb flag for the
2265 * released until the abort xri is 2269 * exchange busy so sgl (xri)
2266 * received from hba, clear the 2270 * will not be released until
2267 * LPFC_DRIVER_ABORTED bit in case 2271 * the abort xri is received
2268 * it was driver initiated abort. 2272 * from hba.
2269 */ 2273 */
2270 spin_lock_irqsave(&phba->hbalock, 2274 spin_lock_irqsave(
2271 iflag); 2275 &phba->hbalock, iflag);
2272 cmdiocbp->iocb_flag &= 2276 cmdiocbp->iocb_flag |=
2273 ~LPFC_DRIVER_ABORTED; 2277 LPFC_EXCHANGE_BUSY;
2274 cmdiocbp->iocb_flag |= 2278 spin_unlock_irqrestore(
2275 LPFC_EXCHANGE_BUSY; 2279 &phba->hbalock, iflag);
2276 spin_unlock_irqrestore(&phba->hbalock, 2280 }
2277 iflag); 2281 if (cmdiocbp->iocb_flag &
2278 cmdiocbp->iocb.ulpStatus = 2282 LPFC_DRIVER_ABORTED) {
2279 IOSTAT_LOCAL_REJECT; 2283 /*
2280 cmdiocbp->iocb.un.ulpWord[4] = 2284 * Clear LPFC_DRIVER_ABORTED
2281 IOERR_ABORT_REQUESTED; 2285 * bit in case it was driver
2282 /* 2286 * initiated abort.
2283 * For SLI4, irsiocb contains NO_XRI 2287 */
2284 * in sli_xritag, it shall not affect 2288 spin_lock_irqsave(
2285 * releasing sgl (xri) process. 2289 &phba->hbalock, iflag);
2286 */ 2290 cmdiocbp->iocb_flag &=
2287 saveq->iocb.ulpStatus = 2291 ~LPFC_DRIVER_ABORTED;
2288 IOSTAT_LOCAL_REJECT; 2292 spin_unlock_irqrestore(
2289 saveq->iocb.un.ulpWord[4] = 2293 &phba->hbalock, iflag);
2290 IOERR_SLI_ABORTED; 2294 cmdiocbp->iocb.ulpStatus =
2291 spin_lock_irqsave(&phba->hbalock, 2295 IOSTAT_LOCAL_REJECT;
2292 iflag); 2296 cmdiocbp->iocb.un.ulpWord[4] =
2293 saveq->iocb_flag |= LPFC_DELAY_MEM_FREE; 2297 IOERR_ABORT_REQUESTED;
2294 spin_unlock_irqrestore(&phba->hbalock, 2298 /*
2295 iflag); 2299 * For SLI4, irsiocb contains
2300 * NO_XRI in sli_xritag, it
2301 * shall not affect releasing
2302 * sgl (xri) process.
2303 */
2304 saveq->iocb.ulpStatus =
2305 IOSTAT_LOCAL_REJECT;
2306 saveq->iocb.un.ulpWord[4] =
2307 IOERR_SLI_ABORTED;
2308 spin_lock_irqsave(
2309 &phba->hbalock, iflag);
2310 saveq->iocb_flag |=
2311 LPFC_DELAY_MEM_FREE;
2312 spin_unlock_irqrestore(
2313 &phba->hbalock, iflag);
2314 }
2296 } 2315 }
2297 } 2316 }
2298 (cmdiocbp->iocb_cmpl) (phba, cmdiocbp, saveq); 2317 (cmdiocbp->iocb_cmpl) (phba, cmdiocbp, saveq);
@@ -2515,14 +2534,16 @@ lpfc_sli_handle_fast_ring_event(struct lpfc_hba *phba,
2515 2534
2516 cmdiocbq = lpfc_sli_iocbq_lookup(phba, pring, 2535 cmdiocbq = lpfc_sli_iocbq_lookup(phba, pring,
2517 &rspiocbq); 2536 &rspiocbq);
2518 if ((cmdiocbq) && (cmdiocbq->iocb_cmpl)) { 2537 if (unlikely(!cmdiocbq))
2519 spin_unlock_irqrestore(&phba->hbalock, 2538 break;
2520 iflag); 2539 if (cmdiocbq->iocb_flag & LPFC_DRIVER_ABORTED)
2521 (cmdiocbq->iocb_cmpl)(phba, cmdiocbq, 2540 cmdiocbq->iocb_flag &= ~LPFC_DRIVER_ABORTED;
2522 &rspiocbq); 2541 if (cmdiocbq->iocb_cmpl) {
2523 spin_lock_irqsave(&phba->hbalock, 2542 spin_unlock_irqrestore(&phba->hbalock, iflag);
2524 iflag); 2543 (cmdiocbq->iocb_cmpl)(phba, cmdiocbq,
2525 } 2544 &rspiocbq);
2545 spin_lock_irqsave(&phba->hbalock, iflag);
2546 }
2526 break; 2547 break;
2527 case LPFC_UNSOL_IOCB: 2548 case LPFC_UNSOL_IOCB:
2528 spin_unlock_irqrestore(&phba->hbalock, iflag); 2549 spin_unlock_irqrestore(&phba->hbalock, iflag);
@@ -3091,6 +3112,12 @@ lpfc_sli_brdready_s3(struct lpfc_hba *phba, uint32_t mask)
3091 3112
3092 /* Check to see if any errors occurred during init */ 3113 /* Check to see if any errors occurred during init */
3093 if ((status & HS_FFERM) || (i >= 20)) { 3114 if ((status & HS_FFERM) || (i >= 20)) {
3115 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
3116 "2751 Adapter failed to restart, "
3117 "status reg x%x, FW Data: A8 x%x AC x%x\n",
3118 status,
3119 readl(phba->MBslimaddr + 0xa8),
3120 readl(phba->MBslimaddr + 0xac));
3094 phba->link_state = LPFC_HBA_ERROR; 3121 phba->link_state = LPFC_HBA_ERROR;
3095 retval = 1; 3122 retval = 1;
3096 } 3123 }
@@ -3278,6 +3305,9 @@ lpfc_sli_brdkill(struct lpfc_hba *phba)
3278 if (retval != MBX_SUCCESS) { 3305 if (retval != MBX_SUCCESS) {
3279 if (retval != MBX_BUSY) 3306 if (retval != MBX_BUSY)
3280 mempool_free(pmb, phba->mbox_mem_pool); 3307 mempool_free(pmb, phba->mbox_mem_pool);
3308 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
3309 "2752 KILL_BOARD command failed retval %d\n",
3310 retval);
3281 spin_lock_irq(&phba->hbalock); 3311 spin_lock_irq(&phba->hbalock);
3282 phba->link_flag &= ~LS_IGNORE_ERATT; 3312 phba->link_flag &= ~LS_IGNORE_ERATT;
3283 spin_unlock_irq(&phba->hbalock); 3313 spin_unlock_irq(&phba->hbalock);
@@ -4035,7 +4065,7 @@ lpfc_sli_hba_setup(struct lpfc_hba *phba)
4035 4065
4036lpfc_sli_hba_setup_error: 4066lpfc_sli_hba_setup_error:
4037 phba->link_state = LPFC_HBA_ERROR; 4067 phba->link_state = LPFC_HBA_ERROR;
4038 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 4068 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
4039 "0445 Firmware initialization failed\n"); 4069 "0445 Firmware initialization failed\n");
4040 return rc; 4070 return rc;
4041} 4071}
@@ -4388,7 +4418,13 @@ lpfc_sli4_hba_setup(struct lpfc_hba *phba)
4388 spin_unlock_irq(&phba->hbalock); 4418 spin_unlock_irq(&phba->hbalock);
4389 4419
4390 /* Read the port's service parameters. */ 4420 /* Read the port's service parameters. */
4391 lpfc_read_sparam(phba, mboxq, vport->vpi); 4421 rc = lpfc_read_sparam(phba, mboxq, vport->vpi);
4422 if (rc) {
4423 phba->link_state = LPFC_HBA_ERROR;
4424 rc = -ENOMEM;
4425 goto out_free_vpd;
4426 }
4427
4392 mboxq->vport = vport; 4428 mboxq->vport = vport;
4393 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); 4429 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
4394 mp = (struct lpfc_dmabuf *) mboxq->context1; 4430 mp = (struct lpfc_dmabuf *) mboxq->context1;
@@ -4483,6 +4519,10 @@ lpfc_sli4_hba_setup(struct lpfc_hba *phba)
4483 /* Post receive buffers to the device */ 4519 /* Post receive buffers to the device */
4484 lpfc_sli4_rb_setup(phba); 4520 lpfc_sli4_rb_setup(phba);
4485 4521
4522 /* Reset HBA FCF states after HBA reset */
4523 phba->fcf.fcf_flag = 0;
4524 phba->fcf.current_rec.flag = 0;
4525
4486 /* Start the ELS watchdog timer */ 4526 /* Start the ELS watchdog timer */
4487 mod_timer(&vport->els_tmofunc, 4527 mod_timer(&vport->els_tmofunc,
4488 jiffies + HZ * (phba->fc_ratov * 2)); 4528 jiffies + HZ * (phba->fc_ratov * 2));
@@ -7436,6 +7476,7 @@ lpfc_sli_wake_iocb_wait(struct lpfc_hba *phba,
7436{ 7476{
7437 wait_queue_head_t *pdone_q; 7477 wait_queue_head_t *pdone_q;
7438 unsigned long iflags; 7478 unsigned long iflags;
7479 struct lpfc_scsi_buf *lpfc_cmd;
7439 7480
7440 spin_lock_irqsave(&phba->hbalock, iflags); 7481 spin_lock_irqsave(&phba->hbalock, iflags);
7441 cmdiocbq->iocb_flag |= LPFC_IO_WAKE; 7482 cmdiocbq->iocb_flag |= LPFC_IO_WAKE;
@@ -7443,6 +7484,14 @@ lpfc_sli_wake_iocb_wait(struct lpfc_hba *phba,
7443 memcpy(&((struct lpfc_iocbq *)cmdiocbq->context2)->iocb, 7484 memcpy(&((struct lpfc_iocbq *)cmdiocbq->context2)->iocb,
7444 &rspiocbq->iocb, sizeof(IOCB_t)); 7485 &rspiocbq->iocb, sizeof(IOCB_t));
7445 7486
7487 /* Set the exchange busy flag for task management commands */
7488 if ((cmdiocbq->iocb_flag & LPFC_IO_FCP) &&
7489 !(cmdiocbq->iocb_flag & LPFC_IO_LIBDFC)) {
7490 lpfc_cmd = container_of(cmdiocbq, struct lpfc_scsi_buf,
7491 cur_iocbq);
7492 lpfc_cmd->exch_busy = rspiocbq->iocb_flag & LPFC_EXCHANGE_BUSY;
7493 }
7494
7446 pdone_q = cmdiocbq->context_un.wait_queue; 7495 pdone_q = cmdiocbq->context_un.wait_queue;
7447 if (pdone_q) 7496 if (pdone_q)
7448 wake_up(pdone_q); 7497 wake_up(pdone_q);
@@ -9061,6 +9110,12 @@ lpfc_sli4_fp_handle_fcp_wcqe(struct lpfc_hba *phba,
9061 /* Fake the irspiocb and copy necessary response information */ 9110 /* Fake the irspiocb and copy necessary response information */
9062 lpfc_sli4_iocb_param_transfer(phba, &irspiocbq, cmdiocbq, wcqe); 9111 lpfc_sli4_iocb_param_transfer(phba, &irspiocbq, cmdiocbq, wcqe);
9063 9112
9113 if (cmdiocbq->iocb_flag & LPFC_DRIVER_ABORTED) {
9114 spin_lock_irqsave(&phba->hbalock, iflags);
9115 cmdiocbq->iocb_flag &= ~LPFC_DRIVER_ABORTED;
9116 spin_unlock_irqrestore(&phba->hbalock, iflags);
9117 }
9118
9064 /* Pass the cmd_iocb and the rsp state to the upper layer */ 9119 /* Pass the cmd_iocb and the rsp state to the upper layer */
9065 (cmdiocbq->iocb_cmpl)(phba, cmdiocbq, &irspiocbq); 9120 (cmdiocbq->iocb_cmpl)(phba, cmdiocbq, &irspiocbq);
9066} 9121}
@@ -11941,15 +11996,19 @@ lpfc_sli4_build_dflt_fcf_record(struct lpfc_hba *phba,
11941} 11996}
11942 11997
11943/** 11998/**
11944 * lpfc_sli4_read_fcf_record - Read the driver's default FCF Record. 11999 * lpfc_sli4_fcf_scan_read_fcf_rec - Read hba fcf record for fcf scan.
11945 * @phba: pointer to lpfc hba data structure. 12000 * @phba: pointer to lpfc hba data structure.
11946 * @fcf_index: FCF table entry offset. 12001 * @fcf_index: FCF table entry offset.
11947 * 12002 *
11948 * This routine is invoked to read up to @fcf_num of FCF record from the 12003 * This routine is invoked to scan the entire FCF table by reading FCF
11949 * device starting with the given @fcf_index. 12004 * record and processing it one at a time starting from the @fcf_index
12005 * for initial FCF discovery or fast FCF failover rediscovery.
12006 *
12007 * Return 0 if the mailbox command is submitted sucessfully, none 0
12008 * otherwise.
11950 **/ 12009 **/
11951int 12010int
11952lpfc_sli4_read_fcf_record(struct lpfc_hba *phba, uint16_t fcf_index) 12011lpfc_sli4_fcf_scan_read_fcf_rec(struct lpfc_hba *phba, uint16_t fcf_index)
11953{ 12012{
11954 int rc = 0, error; 12013 int rc = 0, error;
11955 LPFC_MBOXQ_t *mboxq; 12014 LPFC_MBOXQ_t *mboxq;
@@ -11961,17 +12020,17 @@ lpfc_sli4_read_fcf_record(struct lpfc_hba *phba, uint16_t fcf_index)
11961 "2000 Failed to allocate mbox for " 12020 "2000 Failed to allocate mbox for "
11962 "READ_FCF cmd\n"); 12021 "READ_FCF cmd\n");
11963 error = -ENOMEM; 12022 error = -ENOMEM;
11964 goto fail_fcfscan; 12023 goto fail_fcf_scan;
11965 } 12024 }
11966 /* Construct the read FCF record mailbox command */ 12025 /* Construct the read FCF record mailbox command */
11967 rc = lpfc_sli4_mbx_read_fcf_record(phba, mboxq, fcf_index); 12026 rc = lpfc_sli4_mbx_read_fcf_rec(phba, mboxq, fcf_index);
11968 if (rc) { 12027 if (rc) {
11969 error = -EINVAL; 12028 error = -EINVAL;
11970 goto fail_fcfscan; 12029 goto fail_fcf_scan;
11971 } 12030 }
11972 /* Issue the mailbox command asynchronously */ 12031 /* Issue the mailbox command asynchronously */
11973 mboxq->vport = phba->pport; 12032 mboxq->vport = phba->pport;
11974 mboxq->mbox_cmpl = lpfc_mbx_cmpl_read_fcf_record; 12033 mboxq->mbox_cmpl = lpfc_mbx_cmpl_fcf_scan_read_fcf_rec;
11975 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT); 12034 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT);
11976 if (rc == MBX_NOT_FINISHED) 12035 if (rc == MBX_NOT_FINISHED)
11977 error = -EIO; 12036 error = -EIO;
@@ -11979,9 +12038,13 @@ lpfc_sli4_read_fcf_record(struct lpfc_hba *phba, uint16_t fcf_index)
11979 spin_lock_irq(&phba->hbalock); 12038 spin_lock_irq(&phba->hbalock);
11980 phba->hba_flag |= FCF_DISC_INPROGRESS; 12039 phba->hba_flag |= FCF_DISC_INPROGRESS;
11981 spin_unlock_irq(&phba->hbalock); 12040 spin_unlock_irq(&phba->hbalock);
12041 /* Reset FCF round robin index bmask for new scan */
12042 if (fcf_index == LPFC_FCOE_FCF_GET_FIRST)
12043 memset(phba->fcf.fcf_rr_bmask, 0,
12044 sizeof(*phba->fcf.fcf_rr_bmask));
11982 error = 0; 12045 error = 0;
11983 } 12046 }
11984fail_fcfscan: 12047fail_fcf_scan:
11985 if (error) { 12048 if (error) {
11986 if (mboxq) 12049 if (mboxq)
11987 lpfc_sli4_mbox_cmd_free(phba, mboxq); 12050 lpfc_sli4_mbox_cmd_free(phba, mboxq);
@@ -11994,6 +12057,181 @@ fail_fcfscan:
11994} 12057}
11995 12058
11996/** 12059/**
12060 * lpfc_sli4_fcf_rr_read_fcf_rec - Read hba fcf record for round robin fcf.
12061 * @phba: pointer to lpfc hba data structure.
12062 * @fcf_index: FCF table entry offset.
12063 *
12064 * This routine is invoked to read an FCF record indicated by @fcf_index
12065 * and to use it for FLOGI round robin FCF failover.
12066 *
12067 * Return 0 if the mailbox command is submitted sucessfully, none 0
12068 * otherwise.
12069 **/
12070int
12071lpfc_sli4_fcf_rr_read_fcf_rec(struct lpfc_hba *phba, uint16_t fcf_index)
12072{
12073 int rc = 0, error;
12074 LPFC_MBOXQ_t *mboxq;
12075
12076 mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
12077 if (!mboxq) {
12078 lpfc_printf_log(phba, KERN_ERR, LOG_FIP | LOG_INIT,
12079 "2763 Failed to allocate mbox for "
12080 "READ_FCF cmd\n");
12081 error = -ENOMEM;
12082 goto fail_fcf_read;
12083 }
12084 /* Construct the read FCF record mailbox command */
12085 rc = lpfc_sli4_mbx_read_fcf_rec(phba, mboxq, fcf_index);
12086 if (rc) {
12087 error = -EINVAL;
12088 goto fail_fcf_read;
12089 }
12090 /* Issue the mailbox command asynchronously */
12091 mboxq->vport = phba->pport;
12092 mboxq->mbox_cmpl = lpfc_mbx_cmpl_fcf_rr_read_fcf_rec;
12093 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT);
12094 if (rc == MBX_NOT_FINISHED)
12095 error = -EIO;
12096 else
12097 error = 0;
12098
12099fail_fcf_read:
12100 if (error && mboxq)
12101 lpfc_sli4_mbox_cmd_free(phba, mboxq);
12102 return error;
12103}
12104
12105/**
12106 * lpfc_sli4_read_fcf_rec - Read hba fcf record for update eligible fcf bmask.
12107 * @phba: pointer to lpfc hba data structure.
12108 * @fcf_index: FCF table entry offset.
12109 *
12110 * This routine is invoked to read an FCF record indicated by @fcf_index to
12111 * determine whether it's eligible for FLOGI round robin failover list.
12112 *
12113 * Return 0 if the mailbox command is submitted sucessfully, none 0
12114 * otherwise.
12115 **/
12116int
12117lpfc_sli4_read_fcf_rec(struct lpfc_hba *phba, uint16_t fcf_index)
12118{
12119 int rc = 0, error;
12120 LPFC_MBOXQ_t *mboxq;
12121
12122 mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
12123 if (!mboxq) {
12124 lpfc_printf_log(phba, KERN_ERR, LOG_FIP | LOG_INIT,
12125 "2758 Failed to allocate mbox for "
12126 "READ_FCF cmd\n");
12127 error = -ENOMEM;
12128 goto fail_fcf_read;
12129 }
12130 /* Construct the read FCF record mailbox command */
12131 rc = lpfc_sli4_mbx_read_fcf_rec(phba, mboxq, fcf_index);
12132 if (rc) {
12133 error = -EINVAL;
12134 goto fail_fcf_read;
12135 }
12136 /* Issue the mailbox command asynchronously */
12137 mboxq->vport = phba->pport;
12138 mboxq->mbox_cmpl = lpfc_mbx_cmpl_read_fcf_rec;
12139 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT);
12140 if (rc == MBX_NOT_FINISHED)
12141 error = -EIO;
12142 else
12143 error = 0;
12144
12145fail_fcf_read:
12146 if (error && mboxq)
12147 lpfc_sli4_mbox_cmd_free(phba, mboxq);
12148 return error;
12149}
12150
12151/**
12152 * lpfc_sli4_fcf_rr_next_index_get - Get next eligible fcf record index
12153 * @phba: pointer to lpfc hba data structure.
12154 *
12155 * This routine is to get the next eligible FCF record index in a round
12156 * robin fashion. If the next eligible FCF record index equals to the
12157 * initial round robin FCF record index, LPFC_FCOE_FCF_NEXT_NONE (0xFFFF)
12158 * shall be returned, otherwise, the next eligible FCF record's index
12159 * shall be returned.
12160 **/
12161uint16_t
12162lpfc_sli4_fcf_rr_next_index_get(struct lpfc_hba *phba)
12163{
12164 uint16_t next_fcf_index;
12165
12166 /* Search from the currently registered FCF index */
12167 next_fcf_index = find_next_bit(phba->fcf.fcf_rr_bmask,
12168 LPFC_SLI4_FCF_TBL_INDX_MAX,
12169 phba->fcf.current_rec.fcf_indx);
12170 /* Wrap around condition on phba->fcf.fcf_rr_bmask */
12171 if (next_fcf_index >= LPFC_SLI4_FCF_TBL_INDX_MAX)
12172 next_fcf_index = find_next_bit(phba->fcf.fcf_rr_bmask,
12173 LPFC_SLI4_FCF_TBL_INDX_MAX, 0);
12174 /* Round robin failover stop condition */
12175 if (next_fcf_index == phba->fcf.fcf_rr_init_indx)
12176 return LPFC_FCOE_FCF_NEXT_NONE;
12177
12178 return next_fcf_index;
12179}
12180
12181/**
12182 * lpfc_sli4_fcf_rr_index_set - Set bmask with eligible fcf record index
12183 * @phba: pointer to lpfc hba data structure.
12184 *
12185 * This routine sets the FCF record index in to the eligible bmask for
12186 * round robin failover search. It checks to make sure that the index
12187 * does not go beyond the range of the driver allocated bmask dimension
12188 * before setting the bit.
12189 *
12190 * Returns 0 if the index bit successfully set, otherwise, it returns
12191 * -EINVAL.
12192 **/
12193int
12194lpfc_sli4_fcf_rr_index_set(struct lpfc_hba *phba, uint16_t fcf_index)
12195{
12196 if (fcf_index >= LPFC_SLI4_FCF_TBL_INDX_MAX) {
12197 lpfc_printf_log(phba, KERN_ERR, LOG_FIP,
12198 "2610 HBA FCF index reached driver's "
12199 "book keeping dimension: fcf_index:%d, "
12200 "driver_bmask_max:%d\n",
12201 fcf_index, LPFC_SLI4_FCF_TBL_INDX_MAX);
12202 return -EINVAL;
12203 }
12204 /* Set the eligible FCF record index bmask */
12205 set_bit(fcf_index, phba->fcf.fcf_rr_bmask);
12206
12207 return 0;
12208}
12209
12210/**
12211 * lpfc_sli4_fcf_rr_index_set - Clear bmask from eligible fcf record index
12212 * @phba: pointer to lpfc hba data structure.
12213 *
12214 * This routine clears the FCF record index from the eligible bmask for
12215 * round robin failover search. It checks to make sure that the index
12216 * does not go beyond the range of the driver allocated bmask dimension
12217 * before clearing the bit.
12218 **/
12219void
12220lpfc_sli4_fcf_rr_index_clear(struct lpfc_hba *phba, uint16_t fcf_index)
12221{
12222 if (fcf_index >= LPFC_SLI4_FCF_TBL_INDX_MAX) {
12223 lpfc_printf_log(phba, KERN_ERR, LOG_FIP,
12224 "2762 HBA FCF index goes beyond driver's "
12225 "book keeping dimension: fcf_index:%d, "
12226 "driver_bmask_max:%d\n",
12227 fcf_index, LPFC_SLI4_FCF_TBL_INDX_MAX);
12228 return;
12229 }
12230 /* Clear the eligible FCF record index bmask */
12231 clear_bit(fcf_index, phba->fcf.fcf_rr_bmask);
12232}
12233
12234/**
11997 * lpfc_mbx_cmpl_redisc_fcf_table - completion routine for rediscover FCF table 12235 * lpfc_mbx_cmpl_redisc_fcf_table - completion routine for rediscover FCF table
11998 * @phba: pointer to lpfc hba data structure. 12236 * @phba: pointer to lpfc hba data structure.
11999 * 12237 *
@@ -12014,21 +12252,40 @@ lpfc_mbx_cmpl_redisc_fcf_table(struct lpfc_hba *phba, LPFC_MBOXQ_t *mbox)
12014 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, 12252 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status,
12015 &redisc_fcf->header.cfg_shdr.response); 12253 &redisc_fcf->header.cfg_shdr.response);
12016 if (shdr_status || shdr_add_status) { 12254 if (shdr_status || shdr_add_status) {
12017 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 12255 lpfc_printf_log(phba, KERN_ERR, LOG_FIP,
12018 "2746 Requesting for FCF rediscovery failed " 12256 "2746 Requesting for FCF rediscovery failed "
12019 "status x%x add_status x%x\n", 12257 "status x%x add_status x%x\n",
12020 shdr_status, shdr_add_status); 12258 shdr_status, shdr_add_status);
12021 /* 12259 if (phba->fcf.fcf_flag & FCF_ACVL_DISC) {
12022 * Request failed, last resort to re-try current 12260 spin_lock_irq(&phba->hbalock);
12023 * registered FCF entry 12261 phba->fcf.fcf_flag &= ~FCF_ACVL_DISC;
12024 */ 12262 spin_unlock_irq(&phba->hbalock);
12025 lpfc_retry_pport_discovery(phba); 12263 /*
12026 } else 12264 * CVL event triggered FCF rediscover request failed,
12265 * last resort to re-try current registered FCF entry.
12266 */
12267 lpfc_retry_pport_discovery(phba);
12268 } else {
12269 spin_lock_irq(&phba->hbalock);
12270 phba->fcf.fcf_flag &= ~FCF_DEAD_DISC;
12271 spin_unlock_irq(&phba->hbalock);
12272 /*
12273 * DEAD FCF event triggered FCF rediscover request
12274 * failed, last resort to fail over as a link down
12275 * to FCF registration.
12276 */
12277 lpfc_sli4_fcf_dead_failthrough(phba);
12278 }
12279 } else {
12280 lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
12281 "2775 Start FCF rediscovery quiescent period "
12282 "wait timer before scaning FCF table\n");
12027 /* 12283 /*
12028 * Start FCF rediscovery wait timer for pending FCF 12284 * Start FCF rediscovery wait timer for pending FCF
12029 * before rescan FCF record table. 12285 * before rescan FCF record table.
12030 */ 12286 */
12031 lpfc_fcf_redisc_wait_start_timer(phba); 12287 lpfc_fcf_redisc_wait_start_timer(phba);
12288 }
12032 12289
12033 mempool_free(mbox, phba->mbox_mem_pool); 12290 mempool_free(mbox, phba->mbox_mem_pool);
12034} 12291}
@@ -12047,6 +12304,9 @@ lpfc_sli4_redisc_fcf_table(struct lpfc_hba *phba)
12047 struct lpfc_mbx_redisc_fcf_tbl *redisc_fcf; 12304 struct lpfc_mbx_redisc_fcf_tbl *redisc_fcf;
12048 int rc, length; 12305 int rc, length;
12049 12306
12307 /* Cancel retry delay timers to all vports before FCF rediscover */
12308 lpfc_cancel_all_vport_retry_delay_timer(phba);
12309
12050 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 12310 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
12051 if (!mbox) { 12311 if (!mbox) {
12052 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 12312 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
@@ -12078,6 +12338,31 @@ lpfc_sli4_redisc_fcf_table(struct lpfc_hba *phba)
12078} 12338}
12079 12339
12080/** 12340/**
12341 * lpfc_sli4_fcf_dead_failthrough - Failthrough routine to fcf dead event
12342 * @phba: pointer to lpfc hba data structure.
12343 *
12344 * This function is the failover routine as a last resort to the FCF DEAD
12345 * event when driver failed to perform fast FCF failover.
12346 **/
12347void
12348lpfc_sli4_fcf_dead_failthrough(struct lpfc_hba *phba)
12349{
12350 uint32_t link_state;
12351
12352 /*
12353 * Last resort as FCF DEAD event failover will treat this as
12354 * a link down, but save the link state because we don't want
12355 * it to be changed to Link Down unless it is already down.
12356 */
12357 link_state = phba->link_state;
12358 lpfc_linkdown(phba);
12359 phba->link_state = link_state;
12360
12361 /* Unregister FCF if no devices connected to it */
12362 lpfc_unregister_unused_fcf(phba);
12363}
12364
12365/**
12081 * lpfc_sli_read_link_ste - Read region 23 to decide if link is disabled. 12366 * lpfc_sli_read_link_ste - Read region 23 to decide if link is disabled.
12082 * @phba: pointer to lpfc hba data structure. 12367 * @phba: pointer to lpfc hba data structure.
12083 * 12368 *
diff --git a/drivers/scsi/lpfc/lpfc_sli.h b/drivers/scsi/lpfc/lpfc_sli.h
index dfcf5437d1f5..b4a639c47616 100644
--- a/drivers/scsi/lpfc/lpfc_sli.h
+++ b/drivers/scsi/lpfc/lpfc_sli.h
@@ -62,6 +62,7 @@ struct lpfc_iocbq {
62#define LPFC_DELAY_MEM_FREE 0x20 /* Defer free'ing of FC data */ 62#define LPFC_DELAY_MEM_FREE 0x20 /* Defer free'ing of FC data */
63#define LPFC_EXCHANGE_BUSY 0x40 /* SLI4 hba reported XB in response */ 63#define LPFC_EXCHANGE_BUSY 0x40 /* SLI4 hba reported XB in response */
64#define LPFC_USE_FCPWQIDX 0x80 /* Submit to specified FCPWQ index */ 64#define LPFC_USE_FCPWQIDX 0x80 /* Submit to specified FCPWQ index */
65#define DSS_SECURITY_OP 0x100 /* security IO */
65 66
66#define LPFC_FIP_ELS_ID_MASK 0xc000 /* ELS_ID range 0-3, non-shifted mask */ 67#define LPFC_FIP_ELS_ID_MASK 0xc000 /* ELS_ID range 0-3, non-shifted mask */
67#define LPFC_FIP_ELS_ID_SHIFT 14 68#define LPFC_FIP_ELS_ID_SHIFT 14
diff --git a/drivers/scsi/lpfc/lpfc_sli4.h b/drivers/scsi/lpfc/lpfc_sli4.h
index 86308836600f..4a35e7b9bc5b 100644
--- a/drivers/scsi/lpfc/lpfc_sli4.h
+++ b/drivers/scsi/lpfc/lpfc_sli4.h
@@ -153,15 +153,27 @@ struct lpfc_fcf {
153#define FCF_REGISTERED 0x02 /* FCF registered with FW */ 153#define FCF_REGISTERED 0x02 /* FCF registered with FW */
154#define FCF_SCAN_DONE 0x04 /* FCF table scan done */ 154#define FCF_SCAN_DONE 0x04 /* FCF table scan done */
155#define FCF_IN_USE 0x08 /* Atleast one discovery completed */ 155#define FCF_IN_USE 0x08 /* Atleast one discovery completed */
156#define FCF_REDISC_PEND 0x10 /* FCF rediscovery pending */ 156#define FCF_INIT_DISC 0x10 /* Initial FCF discovery */
157#define FCF_REDISC_EVT 0x20 /* FCF rediscovery event to worker thread */ 157#define FCF_DEAD_DISC 0x20 /* FCF DEAD fast FCF failover discovery */
158#define FCF_REDISC_FOV 0x40 /* Post FCF rediscovery fast failover */ 158#define FCF_ACVL_DISC 0x40 /* All CVL fast FCF failover discovery */
159#define FCF_DISCOVERY (FCF_INIT_DISC | FCF_DEAD_DISC | FCF_ACVL_DISC)
160#define FCF_REDISC_PEND 0x80 /* FCF rediscovery pending */
161#define FCF_REDISC_EVT 0x100 /* FCF rediscovery event to worker thread */
162#define FCF_REDISC_FOV 0x200 /* Post FCF rediscovery fast failover */
159 uint32_t addr_mode; 163 uint32_t addr_mode;
164 uint16_t fcf_rr_init_indx;
160 struct lpfc_fcf_rec current_rec; 165 struct lpfc_fcf_rec current_rec;
161 struct lpfc_fcf_rec failover_rec; 166 struct lpfc_fcf_rec failover_rec;
162 struct timer_list redisc_wait; 167 struct timer_list redisc_wait;
168 unsigned long *fcf_rr_bmask; /* Eligible FCF indexes for RR failover */
163}; 169};
164 170
171/*
172 * Maximum FCF table index, it is for driver internal book keeping, it
173 * just needs to be no less than the supported HBA's FCF table size.
174 */
175#define LPFC_SLI4_FCF_TBL_INDX_MAX 32
176
165#define LPFC_REGION23_SIGNATURE "RG23" 177#define LPFC_REGION23_SIGNATURE "RG23"
166#define LPFC_REGION23_VERSION 1 178#define LPFC_REGION23_VERSION 1
167#define LPFC_REGION23_LAST_REC 0xff 179#define LPFC_REGION23_LAST_REC 0xff
@@ -431,11 +443,18 @@ enum lpfc_sge_type {
431 SCSI_BUFF_TYPE 443 SCSI_BUFF_TYPE
432}; 444};
433 445
446enum lpfc_sgl_state {
447 SGL_FREED,
448 SGL_ALLOCATED,
449 SGL_XRI_ABORTED
450};
451
434struct lpfc_sglq { 452struct lpfc_sglq {
435 /* lpfc_sglqs are used in double linked lists */ 453 /* lpfc_sglqs are used in double linked lists */
436 struct list_head list; 454 struct list_head list;
437 struct list_head clist; 455 struct list_head clist;
438 enum lpfc_sge_type buff_type; /* is this a scsi sgl */ 456 enum lpfc_sge_type buff_type; /* is this a scsi sgl */
457 enum lpfc_sgl_state state;
439 uint16_t iotag; /* pre-assigned IO tag */ 458 uint16_t iotag; /* pre-assigned IO tag */
440 uint16_t sli4_xritag; /* pre-assigned XRI, (OXID) tag. */ 459 uint16_t sli4_xritag; /* pre-assigned XRI, (OXID) tag. */
441 struct sli4_sge *sgl; /* pre-assigned SGL */ 460 struct sli4_sge *sgl; /* pre-assigned SGL */
@@ -463,8 +482,8 @@ void lpfc_sli4_mbox_cmd_free(struct lpfc_hba *, struct lpfcMboxq *);
463void lpfc_sli4_mbx_sge_set(struct lpfcMboxq *, uint32_t, dma_addr_t, uint32_t); 482void lpfc_sli4_mbx_sge_set(struct lpfcMboxq *, uint32_t, dma_addr_t, uint32_t);
464void lpfc_sli4_mbx_sge_get(struct lpfcMboxq *, uint32_t, 483void lpfc_sli4_mbx_sge_get(struct lpfcMboxq *, uint32_t,
465 struct lpfc_mbx_sge *); 484 struct lpfc_mbx_sge *);
466int lpfc_sli4_mbx_read_fcf_record(struct lpfc_hba *, struct lpfcMboxq *, 485int lpfc_sli4_mbx_read_fcf_rec(struct lpfc_hba *, struct lpfcMboxq *,
467 uint16_t); 486 uint16_t);
468 487
469void lpfc_sli4_hba_reset(struct lpfc_hba *); 488void lpfc_sli4_hba_reset(struct lpfc_hba *);
470struct lpfc_queue *lpfc_sli4_queue_alloc(struct lpfc_hba *, uint32_t, 489struct lpfc_queue *lpfc_sli4_queue_alloc(struct lpfc_hba *, uint32_t,
@@ -523,8 +542,13 @@ int lpfc_sli4_init_vpi(struct lpfc_hba *, uint16_t);
523uint32_t lpfc_sli4_cq_release(struct lpfc_queue *, bool); 542uint32_t lpfc_sli4_cq_release(struct lpfc_queue *, bool);
524uint32_t lpfc_sli4_eq_release(struct lpfc_queue *, bool); 543uint32_t lpfc_sli4_eq_release(struct lpfc_queue *, bool);
525void lpfc_sli4_fcfi_unreg(struct lpfc_hba *, uint16_t); 544void lpfc_sli4_fcfi_unreg(struct lpfc_hba *, uint16_t);
526int lpfc_sli4_read_fcf_record(struct lpfc_hba *, uint16_t); 545int lpfc_sli4_fcf_scan_read_fcf_rec(struct lpfc_hba *, uint16_t);
527void lpfc_mbx_cmpl_read_fcf_record(struct lpfc_hba *, LPFC_MBOXQ_t *); 546int lpfc_sli4_fcf_rr_read_fcf_rec(struct lpfc_hba *, uint16_t);
547int lpfc_sli4_read_fcf_rec(struct lpfc_hba *, uint16_t);
548void lpfc_mbx_cmpl_fcf_scan_read_fcf_rec(struct lpfc_hba *, LPFC_MBOXQ_t *);
549void lpfc_mbx_cmpl_fcf_rr_read_fcf_rec(struct lpfc_hba *, LPFC_MBOXQ_t *);
550void lpfc_mbx_cmpl_read_fcf_rec(struct lpfc_hba *, LPFC_MBOXQ_t *);
551int lpfc_sli4_unregister_fcf(struct lpfc_hba *);
528int lpfc_sli4_post_status_check(struct lpfc_hba *); 552int lpfc_sli4_post_status_check(struct lpfc_hba *);
529uint8_t lpfc_sli4_mbox_opcode_get(struct lpfc_hba *, struct lpfcMboxq *); 553uint8_t lpfc_sli4_mbox_opcode_get(struct lpfc_hba *, struct lpfcMboxq *);
530 554
diff --git a/drivers/scsi/lpfc/lpfc_version.h b/drivers/scsi/lpfc/lpfc_version.h
index ac276aa46fba..013deec5dae8 100644
--- a/drivers/scsi/lpfc/lpfc_version.h
+++ b/drivers/scsi/lpfc/lpfc_version.h
@@ -18,7 +18,7 @@
18 * included with this package. * 18 * included with this package. *
19 *******************************************************************/ 19 *******************************************************************/
20 20
21#define LPFC_DRIVER_VERSION "8.3.9" 21#define LPFC_DRIVER_VERSION "8.3.10"
22#define LPFC_DRIVER_NAME "lpfc" 22#define LPFC_DRIVER_NAME "lpfc"
23#define LPFC_SP_DRIVER_HANDLER_NAME "lpfc:sp" 23#define LPFC_SP_DRIVER_HANDLER_NAME "lpfc:sp"
24#define LPFC_FP_DRIVER_HANDLER_NAME "lpfc:fp" 24#define LPFC_FP_DRIVER_HANDLER_NAME "lpfc:fp"
diff --git a/drivers/scsi/lpfc/lpfc_vport.c b/drivers/scsi/lpfc/lpfc_vport.c
index dc86e873102a..869f76cbc58a 100644
--- a/drivers/scsi/lpfc/lpfc_vport.c
+++ b/drivers/scsi/lpfc/lpfc_vport.c
@@ -123,7 +123,12 @@ lpfc_vport_sparm(struct lpfc_hba *phba, struct lpfc_vport *vport)
123 } 123 }
124 mb = &pmb->u.mb; 124 mb = &pmb->u.mb;
125 125
126 lpfc_read_sparam(phba, pmb, vport->vpi); 126 rc = lpfc_read_sparam(phba, pmb, vport->vpi);
127 if (rc) {
128 mempool_free(pmb, phba->mbox_mem_pool);
129 return -ENOMEM;
130 }
131
127 /* 132 /*
128 * Grab buffer pointer and clear context1 so we can use 133 * Grab buffer pointer and clear context1 so we can use
129 * lpfc_sli_issue_box_wait 134 * lpfc_sli_issue_box_wait
diff --git a/drivers/scsi/osd/osd_initiator.c b/drivers/scsi/osd/osd_initiator.c
index 24223473f573..60de85091502 100644
--- a/drivers/scsi/osd/osd_initiator.c
+++ b/drivers/scsi/osd/osd_initiator.c
@@ -1433,6 +1433,10 @@ int osd_finalize_request(struct osd_request *or,
1433 cdbh->command_specific_options |= or->attributes_mode; 1433 cdbh->command_specific_options |= or->attributes_mode;
1434 if (or->attributes_mode == OSD_CDB_GET_ATTR_PAGE_SET_ONE) { 1434 if (or->attributes_mode == OSD_CDB_GET_ATTR_PAGE_SET_ONE) {
1435 ret = _osd_req_finalize_attr_page(or); 1435 ret = _osd_req_finalize_attr_page(or);
1436 if (ret) {
1437 OSD_DEBUG("_osd_req_finalize_attr_page failed\n");
1438 return ret;
1439 }
1436 } else { 1440 } else {
1437 /* TODO: I think that for the GET_ATTR command these 2 should 1441 /* TODO: I think that for the GET_ATTR command these 2 should
1438 * be reversed to keep them in execution order (for embeded 1442 * be reversed to keep them in execution order (for embeded
diff --git a/drivers/scsi/pcmcia/nsp_cs.c b/drivers/scsi/pcmcia/nsp_cs.c
index c2341af587a3..021246454872 100644
--- a/drivers/scsi/pcmcia/nsp_cs.c
+++ b/drivers/scsi/pcmcia/nsp_cs.c
@@ -1717,6 +1717,7 @@ static int nsp_cs_config(struct pcmcia_device *link)
1717 cfg_mem->data = data; 1717 cfg_mem->data = data;
1718 1718
1719 ret = pcmcia_loop_config(link, nsp_cs_config_check, cfg_mem); 1719 ret = pcmcia_loop_config(link, nsp_cs_config_check, cfg_mem);
1720 if (ret)
1720 goto cs_failed; 1721 goto cs_failed;
1721 1722
1722 if (link->conf.Attributes & CONF_ENABLE_IRQ) { 1723 if (link->conf.Attributes & CONF_ENABLE_IRQ) {
diff --git a/drivers/scsi/raid_class.c b/drivers/scsi/raid_class.c
index bd88349b8526..2c146b44d95f 100644
--- a/drivers/scsi/raid_class.c
+++ b/drivers/scsi/raid_class.c
@@ -63,6 +63,7 @@ static int raid_match(struct attribute_container *cont, struct device *dev)
63 * emulated RAID devices, so start with SCSI */ 63 * emulated RAID devices, so start with SCSI */
64 struct raid_internal *i = ac_to_raid_internal(cont); 64 struct raid_internal *i = ac_to_raid_internal(cont);
65 65
66#if defined(CONFIG_SCSI) || defined(CONFIG_SCSI_MODULE)
66 if (scsi_is_sdev_device(dev)) { 67 if (scsi_is_sdev_device(dev)) {
67 struct scsi_device *sdev = to_scsi_device(dev); 68 struct scsi_device *sdev = to_scsi_device(dev);
68 69
@@ -71,6 +72,7 @@ static int raid_match(struct attribute_container *cont, struct device *dev)
71 72
72 return i->f->is_raid(dev); 73 return i->f->is_raid(dev);
73 } 74 }
75#endif
74 /* FIXME: look at other subsystems too */ 76 /* FIXME: look at other subsystems too */
75 return 0; 77 return 0;
76} 78}
diff --git a/drivers/scsi/scsi_transport_fc.c b/drivers/scsi/scsi_transport_fc.c
index 79660ee3e211..1d5b72173dd8 100644
--- a/drivers/scsi/scsi_transport_fc.c
+++ b/drivers/scsi/scsi_transport_fc.c
@@ -1232,6 +1232,15 @@ store_fc_vport_delete(struct device *dev, struct device_attribute *attr,
1232{ 1232{
1233 struct fc_vport *vport = transport_class_to_vport(dev); 1233 struct fc_vport *vport = transport_class_to_vport(dev);
1234 struct Scsi_Host *shost = vport_to_shost(vport); 1234 struct Scsi_Host *shost = vport_to_shost(vport);
1235 unsigned long flags;
1236
1237 spin_lock_irqsave(shost->host_lock, flags);
1238 if (vport->flags & (FC_VPORT_DEL | FC_VPORT_CREATING)) {
1239 spin_unlock_irqrestore(shost->host_lock, flags);
1240 return -EBUSY;
1241 }
1242 vport->flags |= FC_VPORT_DELETING;
1243 spin_unlock_irqrestore(shost->host_lock, flags);
1235 1244
1236 fc_queue_work(shost, &vport->vport_delete_work); 1245 fc_queue_work(shost, &vport->vport_delete_work);
1237 return count; 1246 return count;
@@ -1821,6 +1830,9 @@ store_fc_host_vport_delete(struct device *dev, struct device_attribute *attr,
1821 list_for_each_entry(vport, &fc_host->vports, peers) { 1830 list_for_each_entry(vport, &fc_host->vports, peers) {
1822 if ((vport->channel == 0) && 1831 if ((vport->channel == 0) &&
1823 (vport->port_name == wwpn) && (vport->node_name == wwnn)) { 1832 (vport->port_name == wwpn) && (vport->node_name == wwnn)) {
1833 if (vport->flags & (FC_VPORT_DEL | FC_VPORT_CREATING))
1834 break;
1835 vport->flags |= FC_VPORT_DELETING;
1824 match = 1; 1836 match = 1;
1825 break; 1837 break;
1826 } 1838 }
@@ -3370,18 +3382,6 @@ fc_vport_terminate(struct fc_vport *vport)
3370 unsigned long flags; 3382 unsigned long flags;
3371 int stat; 3383 int stat;
3372 3384
3373 spin_lock_irqsave(shost->host_lock, flags);
3374 if (vport->flags & FC_VPORT_CREATING) {
3375 spin_unlock_irqrestore(shost->host_lock, flags);
3376 return -EBUSY;
3377 }
3378 if (vport->flags & (FC_VPORT_DEL)) {
3379 spin_unlock_irqrestore(shost->host_lock, flags);
3380 return -EALREADY;
3381 }
3382 vport->flags |= FC_VPORT_DELETING;
3383 spin_unlock_irqrestore(shost->host_lock, flags);
3384
3385 if (i->f->vport_delete) 3385 if (i->f->vport_delete)
3386 stat = i->f->vport_delete(vport); 3386 stat = i->f->vport_delete(vport);
3387 else 3387 else
diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
index 83881dfb33c0..7b75c8a2a49d 100644
--- a/drivers/scsi/sd.c
+++ b/drivers/scsi/sd.c
@@ -1948,7 +1948,7 @@ static void sd_read_block_limits(struct scsi_disk *sdkp)
1948{ 1948{
1949 struct request_queue *q = sdkp->disk->queue; 1949 struct request_queue *q = sdkp->disk->queue;
1950 unsigned int sector_sz = sdkp->device->sector_size; 1950 unsigned int sector_sz = sdkp->device->sector_size;
1951 const int vpd_len = 32; 1951 const int vpd_len = 64;
1952 unsigned char *buffer = kmalloc(vpd_len, GFP_KERNEL); 1952 unsigned char *buffer = kmalloc(vpd_len, GFP_KERNEL);
1953 1953
1954 if (!buffer || 1954 if (!buffer ||
@@ -1998,7 +1998,7 @@ static void sd_read_block_characteristics(struct scsi_disk *sdkp)
1998{ 1998{
1999 unsigned char *buffer; 1999 unsigned char *buffer;
2000 u16 rot; 2000 u16 rot;
2001 const int vpd_len = 32; 2001 const int vpd_len = 64;
2002 2002
2003 buffer = kmalloc(vpd_len, GFP_KERNEL); 2003 buffer = kmalloc(vpd_len, GFP_KERNEL);
2004 2004
diff --git a/drivers/serial/cpm_uart/cpm_uart_cpm2.c b/drivers/serial/cpm_uart/cpm_uart_cpm2.c
index a9802e76b5fa..722eac18f382 100644
--- a/drivers/serial/cpm_uart/cpm_uart_cpm2.c
+++ b/drivers/serial/cpm_uart/cpm_uart_cpm2.c
@@ -61,7 +61,7 @@ void __iomem *cpm_uart_map_pram(struct uart_cpm_port *port,
61 void __iomem *pram; 61 void __iomem *pram;
62 unsigned long offset; 62 unsigned long offset;
63 struct resource res; 63 struct resource res;
64 unsigned long len; 64 resource_size_t len;
65 65
66 /* Don't remap parameter RAM if it has already been initialized 66 /* Don't remap parameter RAM if it has already been initialized
67 * during console setup. 67 * during console setup.
@@ -74,7 +74,7 @@ void __iomem *cpm_uart_map_pram(struct uart_cpm_port *port,
74 if (of_address_to_resource(np, 1, &res)) 74 if (of_address_to_resource(np, 1, &res))
75 return NULL; 75 return NULL;
76 76
77 len = 1 + res.end - res.start; 77 len = resource_size(&res);
78 pram = ioremap(res.start, len); 78 pram = ioremap(res.start, len);
79 if (!pram) 79 if (!pram)
80 return NULL; 80 return NULL;
diff --git a/drivers/serial/serial_cs.c b/drivers/serial/serial_cs.c
index e91db4b38012..175d202ab37e 100644
--- a/drivers/serial/serial_cs.c
+++ b/drivers/serial/serial_cs.c
@@ -745,6 +745,7 @@ static struct pcmcia_device_id serial_ids[] = {
745 PCMCIA_PFC_DEVICE_PROD_ID13(1, "Xircom", "REM10", 0x2e3ee845, 0x76df1d29), 745 PCMCIA_PFC_DEVICE_PROD_ID13(1, "Xircom", "REM10", 0x2e3ee845, 0x76df1d29),
746 PCMCIA_PFC_DEVICE_PROD_ID13(1, "Xircom", "XEM5600", 0x2e3ee845, 0xf1403719), 746 PCMCIA_PFC_DEVICE_PROD_ID13(1, "Xircom", "XEM5600", 0x2e3ee845, 0xf1403719),
747 PCMCIA_PFC_DEVICE_PROD_ID12(1, "AnyCom", "Fast Ethernet + 56K COMBO", 0x578ba6e7, 0xb0ac62c4), 747 PCMCIA_PFC_DEVICE_PROD_ID12(1, "AnyCom", "Fast Ethernet + 56K COMBO", 0x578ba6e7, 0xb0ac62c4),
748 PCMCIA_PFC_DEVICE_PROD_ID12(1, "ATKK", "LM33-PCM-T", 0xba9eb7e2, 0x077c174e),
748 PCMCIA_PFC_DEVICE_PROD_ID12(1, "D-Link", "DME336T", 0x1a424a1c, 0xb23897ff), 749 PCMCIA_PFC_DEVICE_PROD_ID12(1, "D-Link", "DME336T", 0x1a424a1c, 0xb23897ff),
749 PCMCIA_PFC_DEVICE_PROD_ID12(1, "Gateway 2000", "XJEM3336", 0xdd9989be, 0x662c394c), 750 PCMCIA_PFC_DEVICE_PROD_ID12(1, "Gateway 2000", "XJEM3336", 0xdd9989be, 0x662c394c),
750 PCMCIA_PFC_DEVICE_PROD_ID12(1, "Grey Cell", "GCS3000", 0x2a151fac, 0x48b932ae), 751 PCMCIA_PFC_DEVICE_PROD_ID12(1, "Grey Cell", "GCS3000", 0x2a151fac, 0x48b932ae),
diff --git a/drivers/serial/sh-sci.c b/drivers/serial/sh-sci.c
index 980f39449ee5..f7b9aff88f4a 100644
--- a/drivers/serial/sh-sci.c
+++ b/drivers/serial/sh-sci.c
@@ -50,7 +50,6 @@
50#include <linux/list.h> 50#include <linux/list.h>
51#include <linux/dmaengine.h> 51#include <linux/dmaengine.h>
52#include <linux/scatterlist.h> 52#include <linux/scatterlist.h>
53#include <linux/timer.h>
54 53
55#ifdef CONFIG_SUPERH 54#ifdef CONFIG_SUPERH
56#include <asm/sh_bios.h> 55#include <asm/sh_bios.h>
diff --git a/drivers/serial/sunsab.c b/drivers/serial/sunsab.c
index d514e28d0755..d2e0321049e2 100644
--- a/drivers/serial/sunsab.c
+++ b/drivers/serial/sunsab.c
@@ -474,7 +474,7 @@ static void sunsab_stop_rx(struct uart_port *port)
474{ 474{
475 struct uart_sunsab_port *up = (struct uart_sunsab_port *) port; 475 struct uart_sunsab_port *up = (struct uart_sunsab_port *) port;
476 476
477 up->interrupt_mask0 |= SAB82532_ISR0_TCD; 477 up->interrupt_mask0 |= SAB82532_IMR0_TCD;
478 writeb(up->interrupt_mask1, &up->regs->w.imr0); 478 writeb(up->interrupt_mask1, &up->regs->w.imr0);
479} 479}
480 480
diff --git a/drivers/serial/uartlite.c b/drivers/serial/uartlite.c
index ab2ab3c81834..f0a6c61b17f7 100644
--- a/drivers/serial/uartlite.c
+++ b/drivers/serial/uartlite.c
@@ -19,7 +19,7 @@
19#include <linux/interrupt.h> 19#include <linux/interrupt.h>
20#include <linux/init.h> 20#include <linux/init.h>
21#include <asm/io.h> 21#include <asm/io.h>
22#if defined(CONFIG_OF) 22#if defined(CONFIG_OF) && (defined(CONFIG_PPC32) || defined(CONFIG_MICROBLAZE))
23#include <linux/of.h> 23#include <linux/of.h>
24#include <linux/of_device.h> 24#include <linux/of_device.h>
25#include <linux/of_platform.h> 25#include <linux/of_platform.h>
@@ -581,7 +581,7 @@ static struct platform_driver ulite_platform_driver = {
581/* --------------------------------------------------------------------- 581/* ---------------------------------------------------------------------
582 * OF bus bindings 582 * OF bus bindings
583 */ 583 */
584#if defined(CONFIG_OF) 584#if defined(CONFIG_OF) && (defined(CONFIG_PPC32) || defined(CONFIG_MICROBLAZE))
585static int __devinit 585static int __devinit
586ulite_of_probe(struct of_device *op, const struct of_device_id *match) 586ulite_of_probe(struct of_device *op, const struct of_device_id *match)
587{ 587{
@@ -631,11 +631,11 @@ static inline void __exit ulite_of_unregister(void)
631{ 631{
632 of_unregister_platform_driver(&ulite_of_driver); 632 of_unregister_platform_driver(&ulite_of_driver);
633} 633}
634#else /* CONFIG_OF */ 634#else /* CONFIG_OF && (CONFIG_PPC32 || CONFIG_MICROBLAZE) */
635/* CONFIG_OF not enabled; do nothing helpers */ 635/* Appropriate config not enabled; do nothing helpers */
636static inline int __init ulite_of_register(void) { return 0; } 636static inline int __init ulite_of_register(void) { return 0; }
637static inline void __exit ulite_of_unregister(void) { } 637static inline void __exit ulite_of_unregister(void) { }
638#endif /* CONFIG_OF */ 638#endif /* CONFIG_OF && (CONFIG_PPC32 || CONFIG_MICROBLAZE) */
639 639
640/* --------------------------------------------------------------------- 640/* ---------------------------------------------------------------------
641 * Module setup/teardown 641 * Module setup/teardown
diff --git a/drivers/sh/intc.c b/drivers/sh/intc.c
index c2750391fd34..a3d8677af6a5 100644
--- a/drivers/sh/intc.c
+++ b/drivers/sh/intc.c
@@ -2,7 +2,7 @@
2 * Shared interrupt handling code for IPR and INTC2 types of IRQs. 2 * Shared interrupt handling code for IPR and INTC2 types of IRQs.
3 * 3 *
4 * Copyright (C) 2007, 2008 Magnus Damm 4 * Copyright (C) 2007, 2008 Magnus Damm
5 * Copyright (C) 2009 Paul Mundt 5 * Copyright (C) 2009, 2010 Paul Mundt
6 * 6 *
7 * Based on intc2.c and ipr.c 7 * Based on intc2.c and ipr.c
8 * 8 *
@@ -26,6 +26,7 @@
26#include <linux/list.h> 26#include <linux/list.h>
27#include <linux/topology.h> 27#include <linux/topology.h>
28#include <linux/bitmap.h> 28#include <linux/bitmap.h>
29#include <linux/cpumask.h>
29 30
30#define _INTC_MK(fn, mode, addr_e, addr_d, width, shift) \ 31#define _INTC_MK(fn, mode, addr_e, addr_d, width, shift) \
31 ((shift) | ((width) << 5) | ((fn) << 9) | ((mode) << 13) | \ 32 ((shift) | ((width) << 5) | ((fn) << 9) | ((mode) << 13) | \
@@ -234,6 +235,10 @@ static inline void _intc_enable(unsigned int irq, unsigned long handle)
234 unsigned int cpu; 235 unsigned int cpu;
235 236
236 for (cpu = 0; cpu < SMP_NR(d, _INTC_ADDR_E(handle)); cpu++) { 237 for (cpu = 0; cpu < SMP_NR(d, _INTC_ADDR_E(handle)); cpu++) {
238#ifdef CONFIG_SMP
239 if (!cpumask_test_cpu(cpu, irq_to_desc(irq)->affinity))
240 continue;
241#endif
237 addr = INTC_REG(d, _INTC_ADDR_E(handle), cpu); 242 addr = INTC_REG(d, _INTC_ADDR_E(handle), cpu);
238 intc_enable_fns[_INTC_MODE(handle)](addr, handle, intc_reg_fns\ 243 intc_enable_fns[_INTC_MODE(handle)](addr, handle, intc_reg_fns\
239 [_INTC_FN(handle)], irq); 244 [_INTC_FN(handle)], irq);
@@ -253,6 +258,10 @@ static void intc_disable(unsigned int irq)
253 unsigned int cpu; 258 unsigned int cpu;
254 259
255 for (cpu = 0; cpu < SMP_NR(d, _INTC_ADDR_D(handle)); cpu++) { 260 for (cpu = 0; cpu < SMP_NR(d, _INTC_ADDR_D(handle)); cpu++) {
261#ifdef CONFIG_SMP
262 if (!cpumask_test_cpu(cpu, irq_to_desc(irq)->affinity))
263 continue;
264#endif
256 addr = INTC_REG(d, _INTC_ADDR_D(handle), cpu); 265 addr = INTC_REG(d, _INTC_ADDR_D(handle), cpu);
257 intc_disable_fns[_INTC_MODE(handle)](addr, handle,intc_reg_fns\ 266 intc_disable_fns[_INTC_MODE(handle)](addr, handle,intc_reg_fns\
258 [_INTC_FN(handle)], irq); 267 [_INTC_FN(handle)], irq);
@@ -301,6 +310,23 @@ static int intc_set_wake(unsigned int irq, unsigned int on)
301 return 0; /* allow wakeup, but setup hardware in intc_suspend() */ 310 return 0; /* allow wakeup, but setup hardware in intc_suspend() */
302} 311}
303 312
313#ifdef CONFIG_SMP
314/*
315 * This is held with the irq desc lock held, so we don't require any
316 * additional locking here at the intc desc level. The affinity mask is
317 * later tested in the enable/disable paths.
318 */
319static int intc_set_affinity(unsigned int irq, const struct cpumask *cpumask)
320{
321 if (!cpumask_intersects(cpumask, cpu_online_mask))
322 return -1;
323
324 cpumask_copy(irq_to_desc(irq)->affinity, cpumask);
325
326 return 0;
327}
328#endif
329
304static void intc_mask_ack(unsigned int irq) 330static void intc_mask_ack(unsigned int irq)
305{ 331{
306 struct intc_desc_int *d = get_intc_desc(irq); 332 struct intc_desc_int *d = get_intc_desc(irq);
@@ -847,6 +873,9 @@ void __init register_intc_controller(struct intc_desc *desc)
847 d->chip.shutdown = intc_disable; 873 d->chip.shutdown = intc_disable;
848 d->chip.set_type = intc_set_sense; 874 d->chip.set_type = intc_set_sense;
849 d->chip.set_wake = intc_set_wake; 875 d->chip.set_wake = intc_set_wake;
876#ifdef CONFIG_SMP
877 d->chip.set_affinity = intc_set_affinity;
878#endif
850 879
851 if (hw->ack_regs) { 880 if (hw->ack_regs) {
852 for (i = 0; i < hw->nr_ack_regs; i++) 881 for (i = 0; i < hw->nr_ack_regs; i++)
diff --git a/drivers/spi/omap2_mcspi.c b/drivers/spi/omap2_mcspi.c
index 715c518b1b68..4dd786b99b8b 100644
--- a/drivers/spi/omap2_mcspi.c
+++ b/drivers/spi/omap2_mcspi.c
@@ -578,6 +578,7 @@ static int omap2_mcspi_setup_transfer(struct spi_device *spi,
578 struct spi_master *spi_cntrl; 578 struct spi_master *spi_cntrl;
579 u32 l = 0, div = 0; 579 u32 l = 0, div = 0;
580 u8 word_len = spi->bits_per_word; 580 u8 word_len = spi->bits_per_word;
581 u32 speed_hz = spi->max_speed_hz;
581 582
582 mcspi = spi_master_get_devdata(spi->master); 583 mcspi = spi_master_get_devdata(spi->master);
583 spi_cntrl = mcspi->master; 584 spi_cntrl = mcspi->master;
@@ -587,9 +588,12 @@ static int omap2_mcspi_setup_transfer(struct spi_device *spi,
587 588
588 cs->word_len = word_len; 589 cs->word_len = word_len;
589 590
590 if (spi->max_speed_hz) { 591 if (t && t->speed_hz)
592 speed_hz = t->speed_hz;
593
594 if (speed_hz) {
591 while (div <= 15 && (OMAP2_MCSPI_MAX_FREQ / (1 << div)) 595 while (div <= 15 && (OMAP2_MCSPI_MAX_FREQ / (1 << div))
592 > spi->max_speed_hz) 596 > speed_hz)
593 div++; 597 div++;
594 } else 598 } else
595 div = 15; 599 div = 15;
@@ -751,11 +755,13 @@ static void omap2_mcspi_cleanup(struct spi_device *spi)
751 mcspi = spi_master_get_devdata(spi->master); 755 mcspi = spi_master_get_devdata(spi->master);
752 mcspi_dma = &mcspi->dma_channels[spi->chip_select]; 756 mcspi_dma = &mcspi->dma_channels[spi->chip_select];
753 757
754 /* Unlink controller state from context save list */ 758 if (spi->controller_state) {
755 cs = spi->controller_state; 759 /* Unlink controller state from context save list */
756 list_del(&cs->node); 760 cs = spi->controller_state;
761 list_del(&cs->node);
757 762
758 kfree(spi->controller_state); 763 kfree(spi->controller_state);
764 }
759 765
760 if (mcspi_dma->dma_rx_channel != -1) { 766 if (mcspi_dma->dma_rx_channel != -1) {
761 omap_free_dma(mcspi_dma->dma_rx_channel); 767 omap_free_dma(mcspi_dma->dma_rx_channel);
diff --git a/drivers/staging/samsung-laptop/samsung-laptop.c b/drivers/staging/samsung-laptop/samsung-laptop.c
index dd7ea4c075db..eb44b60e1eb5 100644
--- a/drivers/staging/samsung-laptop/samsung-laptop.c
+++ b/drivers/staging/samsung-laptop/samsung-laptop.c
@@ -394,6 +394,7 @@ MODULE_DEVICE_TABLE(dmi, samsung_dmi_table);
394 394
395static int __init samsung_init(void) 395static int __init samsung_init(void)
396{ 396{
397 struct backlight_properties props;
397 struct sabi_retval sretval; 398 struct sabi_retval sretval;
398 const char *testStr = "SECLINUX"; 399 const char *testStr = "SECLINUX";
399 void __iomem *memcheck; 400 void __iomem *memcheck;
@@ -486,12 +487,14 @@ static int __init samsung_init(void)
486 goto error_no_platform; 487 goto error_no_platform;
487 488
488 /* create a backlight device to talk to this one */ 489 /* create a backlight device to talk to this one */
490 memset(&props, 0, sizeof(struct backlight_properties));
491 props.max_brightness = MAX_BRIGHT;
489 backlight_device = backlight_device_register("samsung", &sdev->dev, 492 backlight_device = backlight_device_register("samsung", &sdev->dev,
490 NULL, &backlight_ops); 493 NULL, &backlight_ops,
494 &props);
491 if (IS_ERR(backlight_device)) 495 if (IS_ERR(backlight_device))
492 goto error_no_backlight; 496 goto error_no_backlight;
493 497
494 backlight_device->props.max_brightness = MAX_BRIGHT;
495 backlight_device->props.brightness = read_brightness(); 498 backlight_device->props.brightness = read_brightness();
496 backlight_device->props.power = FB_BLANK_UNBLANK; 499 backlight_device->props.power = FB_BLANK_UNBLANK;
497 backlight_update_status(backlight_device); 500 backlight_update_status(backlight_device);
diff --git a/drivers/usb/class/cdc-acm.c b/drivers/usb/class/cdc-acm.c
index 975d556b4787..be6331e2c276 100644
--- a/drivers/usb/class/cdc-acm.c
+++ b/drivers/usb/class/cdc-acm.c
@@ -1441,7 +1441,7 @@ static int acm_resume(struct usb_interface *intf)
1441 wb = acm->delayed_wb; 1441 wb = acm->delayed_wb;
1442 acm->delayed_wb = NULL; 1442 acm->delayed_wb = NULL;
1443 spin_unlock_irq(&acm->write_lock); 1443 spin_unlock_irq(&acm->write_lock);
1444 acm_start_wb(acm, acm->delayed_wb); 1444 acm_start_wb(acm, wb);
1445 } else { 1445 } else {
1446 spin_unlock_irq(&acm->write_lock); 1446 spin_unlock_irq(&acm->write_lock);
1447 } 1447 }
diff --git a/drivers/usb/class/cdc-wdm.c b/drivers/usb/class/cdc-wdm.c
index 18aafcb08fc8..189141ca4e05 100644
--- a/drivers/usb/class/cdc-wdm.c
+++ b/drivers/usb/class/cdc-wdm.c
@@ -52,7 +52,8 @@ MODULE_DEVICE_TABLE (usb, wdm_ids);
52#define WDM_READ 4 52#define WDM_READ 4
53#define WDM_INT_STALL 5 53#define WDM_INT_STALL 5
54#define WDM_POLL_RUNNING 6 54#define WDM_POLL_RUNNING 6
55 55#define WDM_RESPONDING 7
56#define WDM_SUSPENDING 8
56 57
57#define WDM_MAX 16 58#define WDM_MAX 16
58 59
@@ -87,9 +88,7 @@ struct wdm_device {
87 int count; 88 int count;
88 dma_addr_t shandle; 89 dma_addr_t shandle;
89 dma_addr_t ihandle; 90 dma_addr_t ihandle;
90 struct mutex wlock; 91 struct mutex lock;
91 struct mutex rlock;
92 struct mutex plock;
93 wait_queue_head_t wait; 92 wait_queue_head_t wait;
94 struct work_struct rxwork; 93 struct work_struct rxwork;
95 int werr; 94 int werr;
@@ -117,21 +116,22 @@ static void wdm_in_callback(struct urb *urb)
117 int status = urb->status; 116 int status = urb->status;
118 117
119 spin_lock(&desc->iuspin); 118 spin_lock(&desc->iuspin);
119 clear_bit(WDM_RESPONDING, &desc->flags);
120 120
121 if (status) { 121 if (status) {
122 switch (status) { 122 switch (status) {
123 case -ENOENT: 123 case -ENOENT:
124 dev_dbg(&desc->intf->dev, 124 dev_dbg(&desc->intf->dev,
125 "nonzero urb status received: -ENOENT"); 125 "nonzero urb status received: -ENOENT");
126 break; 126 goto skip_error;
127 case -ECONNRESET: 127 case -ECONNRESET:
128 dev_dbg(&desc->intf->dev, 128 dev_dbg(&desc->intf->dev,
129 "nonzero urb status received: -ECONNRESET"); 129 "nonzero urb status received: -ECONNRESET");
130 break; 130 goto skip_error;
131 case -ESHUTDOWN: 131 case -ESHUTDOWN:
132 dev_dbg(&desc->intf->dev, 132 dev_dbg(&desc->intf->dev,
133 "nonzero urb status received: -ESHUTDOWN"); 133 "nonzero urb status received: -ESHUTDOWN");
134 break; 134 goto skip_error;
135 case -EPIPE: 135 case -EPIPE:
136 dev_err(&desc->intf->dev, 136 dev_err(&desc->intf->dev,
137 "nonzero urb status received: -EPIPE\n"); 137 "nonzero urb status received: -EPIPE\n");
@@ -147,6 +147,7 @@ static void wdm_in_callback(struct urb *urb)
147 desc->reslength = urb->actual_length; 147 desc->reslength = urb->actual_length;
148 memmove(desc->ubuf + desc->length, desc->inbuf, desc->reslength); 148 memmove(desc->ubuf + desc->length, desc->inbuf, desc->reslength);
149 desc->length += desc->reslength; 149 desc->length += desc->reslength;
150skip_error:
150 wake_up(&desc->wait); 151 wake_up(&desc->wait);
151 152
152 set_bit(WDM_READ, &desc->flags); 153 set_bit(WDM_READ, &desc->flags);
@@ -229,13 +230,16 @@ static void wdm_int_callback(struct urb *urb)
229 desc->response->transfer_flags |= URB_NO_TRANSFER_DMA_MAP; 230 desc->response->transfer_flags |= URB_NO_TRANSFER_DMA_MAP;
230 spin_lock(&desc->iuspin); 231 spin_lock(&desc->iuspin);
231 clear_bit(WDM_READ, &desc->flags); 232 clear_bit(WDM_READ, &desc->flags);
232 if (!test_bit(WDM_DISCONNECTING, &desc->flags)) { 233 set_bit(WDM_RESPONDING, &desc->flags);
234 if (!test_bit(WDM_DISCONNECTING, &desc->flags)
235 && !test_bit(WDM_SUSPENDING, &desc->flags)) {
233 rv = usb_submit_urb(desc->response, GFP_ATOMIC); 236 rv = usb_submit_urb(desc->response, GFP_ATOMIC);
234 dev_dbg(&desc->intf->dev, "%s: usb_submit_urb %d", 237 dev_dbg(&desc->intf->dev, "%s: usb_submit_urb %d",
235 __func__, rv); 238 __func__, rv);
236 } 239 }
237 spin_unlock(&desc->iuspin); 240 spin_unlock(&desc->iuspin);
238 if (rv < 0) { 241 if (rv < 0) {
242 clear_bit(WDM_RESPONDING, &desc->flags);
239 if (rv == -EPERM) 243 if (rv == -EPERM)
240 return; 244 return;
241 if (rv == -ENOMEM) { 245 if (rv == -ENOMEM) {
@@ -305,14 +309,38 @@ static ssize_t wdm_write
305 if (we < 0) 309 if (we < 0)
306 return -EIO; 310 return -EIO;
307 311
308 r = mutex_lock_interruptible(&desc->wlock); /* concurrent writes */ 312 desc->outbuf = buf = kmalloc(count, GFP_KERNEL);
313 if (!buf) {
314 rv = -ENOMEM;
315 goto outnl;
316 }
317
318 r = copy_from_user(buf, buffer, count);
319 if (r > 0) {
320 kfree(buf);
321 rv = -EFAULT;
322 goto outnl;
323 }
324
325 /* concurrent writes and disconnect */
326 r = mutex_lock_interruptible(&desc->lock);
309 rv = -ERESTARTSYS; 327 rv = -ERESTARTSYS;
310 if (r) 328 if (r) {
329 kfree(buf);
311 goto outnl; 330 goto outnl;
331 }
332
333 if (test_bit(WDM_DISCONNECTING, &desc->flags)) {
334 kfree(buf);
335 rv = -ENODEV;
336 goto outnp;
337 }
312 338
313 r = usb_autopm_get_interface(desc->intf); 339 r = usb_autopm_get_interface(desc->intf);
314 if (r < 0) 340 if (r < 0) {
341 kfree(buf);
315 goto outnp; 342 goto outnp;
343 }
316 344
317 if (!file->f_flags && O_NONBLOCK) 345 if (!file->f_flags && O_NONBLOCK)
318 r = wait_event_interruptible(desc->wait, !test_bit(WDM_IN_USE, 346 r = wait_event_interruptible(desc->wait, !test_bit(WDM_IN_USE,
@@ -320,24 +348,8 @@ static ssize_t wdm_write
320 else 348 else
321 if (test_bit(WDM_IN_USE, &desc->flags)) 349 if (test_bit(WDM_IN_USE, &desc->flags))
322 r = -EAGAIN; 350 r = -EAGAIN;
323 if (r < 0) 351 if (r < 0) {
324 goto out;
325
326 if (test_bit(WDM_DISCONNECTING, &desc->flags)) {
327 rv = -ENODEV;
328 goto out;
329 }
330
331 desc->outbuf = buf = kmalloc(count, GFP_KERNEL);
332 if (!buf) {
333 rv = -ENOMEM;
334 goto out;
335 }
336
337 r = copy_from_user(buf, buffer, count);
338 if (r > 0) {
339 kfree(buf); 352 kfree(buf);
340 rv = -EFAULT;
341 goto out; 353 goto out;
342 } 354 }
343 355
@@ -374,7 +386,7 @@ static ssize_t wdm_write
374out: 386out:
375 usb_autopm_put_interface(desc->intf); 387 usb_autopm_put_interface(desc->intf);
376outnp: 388outnp:
377 mutex_unlock(&desc->wlock); 389 mutex_unlock(&desc->lock);
378outnl: 390outnl:
379 return rv < 0 ? rv : count; 391 return rv < 0 ? rv : count;
380} 392}
@@ -387,7 +399,7 @@ static ssize_t wdm_read
387 struct wdm_device *desc = file->private_data; 399 struct wdm_device *desc = file->private_data;
388 400
389 401
390 rv = mutex_lock_interruptible(&desc->rlock); /*concurrent reads */ 402 rv = mutex_lock_interruptible(&desc->lock); /*concurrent reads */
391 if (rv < 0) 403 if (rv < 0)
392 return -ERESTARTSYS; 404 return -ERESTARTSYS;
393 405
@@ -424,11 +436,8 @@ retry:
424 spin_lock_irq(&desc->iuspin); 436 spin_lock_irq(&desc->iuspin);
425 437
426 if (desc->rerr) { /* read completed, error happened */ 438 if (desc->rerr) { /* read completed, error happened */
427 int t = desc->rerr;
428 desc->rerr = 0; 439 desc->rerr = 0;
429 spin_unlock_irq(&desc->iuspin); 440 spin_unlock_irq(&desc->iuspin);
430 dev_err(&desc->intf->dev,
431 "reading had resulted in %d\n", t);
432 rv = -EIO; 441 rv = -EIO;
433 goto err; 442 goto err;
434 } 443 }
@@ -465,9 +474,7 @@ retry:
465 rv = cntr; 474 rv = cntr;
466 475
467err: 476err:
468 mutex_unlock(&desc->rlock); 477 mutex_unlock(&desc->lock);
469 if (rv < 0 && rv != -EAGAIN)
470 dev_err(&desc->intf->dev, "wdm_read: exit error\n");
471 return rv; 478 return rv;
472} 479}
473 480
@@ -533,7 +540,7 @@ static int wdm_open(struct inode *inode, struct file *file)
533 } 540 }
534 intf->needs_remote_wakeup = 1; 541 intf->needs_remote_wakeup = 1;
535 542
536 mutex_lock(&desc->plock); 543 mutex_lock(&desc->lock);
537 if (!desc->count++) { 544 if (!desc->count++) {
538 rv = usb_submit_urb(desc->validity, GFP_KERNEL); 545 rv = usb_submit_urb(desc->validity, GFP_KERNEL);
539 if (rv < 0) { 546 if (rv < 0) {
@@ -544,7 +551,7 @@ static int wdm_open(struct inode *inode, struct file *file)
544 } else { 551 } else {
545 rv = 0; 552 rv = 0;
546 } 553 }
547 mutex_unlock(&desc->plock); 554 mutex_unlock(&desc->lock);
548 usb_autopm_put_interface(desc->intf); 555 usb_autopm_put_interface(desc->intf);
549out: 556out:
550 mutex_unlock(&wdm_mutex); 557 mutex_unlock(&wdm_mutex);
@@ -556,9 +563,9 @@ static int wdm_release(struct inode *inode, struct file *file)
556 struct wdm_device *desc = file->private_data; 563 struct wdm_device *desc = file->private_data;
557 564
558 mutex_lock(&wdm_mutex); 565 mutex_lock(&wdm_mutex);
559 mutex_lock(&desc->plock); 566 mutex_lock(&desc->lock);
560 desc->count--; 567 desc->count--;
561 mutex_unlock(&desc->plock); 568 mutex_unlock(&desc->lock);
562 569
563 if (!desc->count) { 570 if (!desc->count) {
564 dev_dbg(&desc->intf->dev, "wdm_release: cleanup"); 571 dev_dbg(&desc->intf->dev, "wdm_release: cleanup");
@@ -655,9 +662,7 @@ next_desc:
655 desc = kzalloc(sizeof(struct wdm_device), GFP_KERNEL); 662 desc = kzalloc(sizeof(struct wdm_device), GFP_KERNEL);
656 if (!desc) 663 if (!desc)
657 goto out; 664 goto out;
658 mutex_init(&desc->wlock); 665 mutex_init(&desc->lock);
659 mutex_init(&desc->rlock);
660 mutex_init(&desc->plock);
661 spin_lock_init(&desc->iuspin); 666 spin_lock_init(&desc->iuspin);
662 init_waitqueue_head(&desc->wait); 667 init_waitqueue_head(&desc->wait);
663 desc->wMaxCommand = maxcom; 668 desc->wMaxCommand = maxcom;
@@ -771,14 +776,17 @@ static void wdm_disconnect(struct usb_interface *intf)
771 /* to terminate pending flushes */ 776 /* to terminate pending flushes */
772 clear_bit(WDM_IN_USE, &desc->flags); 777 clear_bit(WDM_IN_USE, &desc->flags);
773 spin_unlock_irqrestore(&desc->iuspin, flags); 778 spin_unlock_irqrestore(&desc->iuspin, flags);
774 cancel_work_sync(&desc->rxwork); 779 mutex_lock(&desc->lock);
775 kill_urbs(desc); 780 kill_urbs(desc);
781 cancel_work_sync(&desc->rxwork);
782 mutex_unlock(&desc->lock);
776 wake_up_all(&desc->wait); 783 wake_up_all(&desc->wait);
777 if (!desc->count) 784 if (!desc->count)
778 cleanup(desc); 785 cleanup(desc);
779 mutex_unlock(&wdm_mutex); 786 mutex_unlock(&wdm_mutex);
780} 787}
781 788
789#ifdef CONFIG_PM
782static int wdm_suspend(struct usb_interface *intf, pm_message_t message) 790static int wdm_suspend(struct usb_interface *intf, pm_message_t message)
783{ 791{
784 struct wdm_device *desc = usb_get_intfdata(intf); 792 struct wdm_device *desc = usb_get_intfdata(intf);
@@ -786,22 +794,30 @@ static int wdm_suspend(struct usb_interface *intf, pm_message_t message)
786 794
787 dev_dbg(&desc->intf->dev, "wdm%d_suspend\n", intf->minor); 795 dev_dbg(&desc->intf->dev, "wdm%d_suspend\n", intf->minor);
788 796
789 mutex_lock(&desc->plock); 797 /* if this is an autosuspend the caller does the locking */
790#ifdef CONFIG_PM 798 if (!(message.event & PM_EVENT_AUTO))
799 mutex_lock(&desc->lock);
800 spin_lock_irq(&desc->iuspin);
801
791 if ((message.event & PM_EVENT_AUTO) && 802 if ((message.event & PM_EVENT_AUTO) &&
792 test_bit(WDM_IN_USE, &desc->flags)) { 803 (test_bit(WDM_IN_USE, &desc->flags)
804 || test_bit(WDM_RESPONDING, &desc->flags))) {
805 spin_unlock_irq(&desc->iuspin);
793 rv = -EBUSY; 806 rv = -EBUSY;
794 } else { 807 } else {
795#endif 808
796 cancel_work_sync(&desc->rxwork); 809 set_bit(WDM_SUSPENDING, &desc->flags);
810 spin_unlock_irq(&desc->iuspin);
811 /* callback submits work - order is essential */
797 kill_urbs(desc); 812 kill_urbs(desc);
798#ifdef CONFIG_PM 813 cancel_work_sync(&desc->rxwork);
799 } 814 }
800#endif 815 if (!(message.event & PM_EVENT_AUTO))
801 mutex_unlock(&desc->plock); 816 mutex_unlock(&desc->lock);
802 817
803 return rv; 818 return rv;
804} 819}
820#endif
805 821
806static int recover_from_urb_loss(struct wdm_device *desc) 822static int recover_from_urb_loss(struct wdm_device *desc)
807{ 823{
@@ -815,23 +831,27 @@ static int recover_from_urb_loss(struct wdm_device *desc)
815 } 831 }
816 return rv; 832 return rv;
817} 833}
834
835#ifdef CONFIG_PM
818static int wdm_resume(struct usb_interface *intf) 836static int wdm_resume(struct usb_interface *intf)
819{ 837{
820 struct wdm_device *desc = usb_get_intfdata(intf); 838 struct wdm_device *desc = usb_get_intfdata(intf);
821 int rv; 839 int rv;
822 840
823 dev_dbg(&desc->intf->dev, "wdm%d_resume\n", intf->minor); 841 dev_dbg(&desc->intf->dev, "wdm%d_resume\n", intf->minor);
824 mutex_lock(&desc->plock); 842
843 clear_bit(WDM_SUSPENDING, &desc->flags);
825 rv = recover_from_urb_loss(desc); 844 rv = recover_from_urb_loss(desc);
826 mutex_unlock(&desc->plock); 845
827 return rv; 846 return rv;
828} 847}
848#endif
829 849
830static int wdm_pre_reset(struct usb_interface *intf) 850static int wdm_pre_reset(struct usb_interface *intf)
831{ 851{
832 struct wdm_device *desc = usb_get_intfdata(intf); 852 struct wdm_device *desc = usb_get_intfdata(intf);
833 853
834 mutex_lock(&desc->plock); 854 mutex_lock(&desc->lock);
835 return 0; 855 return 0;
836} 856}
837 857
@@ -841,7 +861,7 @@ static int wdm_post_reset(struct usb_interface *intf)
841 int rv; 861 int rv;
842 862
843 rv = recover_from_urb_loss(desc); 863 rv = recover_from_urb_loss(desc);
844 mutex_unlock(&desc->plock); 864 mutex_unlock(&desc->lock);
845 return 0; 865 return 0;
846} 866}
847 867
@@ -849,9 +869,11 @@ static struct usb_driver wdm_driver = {
849 .name = "cdc_wdm", 869 .name = "cdc_wdm",
850 .probe = wdm_probe, 870 .probe = wdm_probe,
851 .disconnect = wdm_disconnect, 871 .disconnect = wdm_disconnect,
872#ifdef CONFIG_PM
852 .suspend = wdm_suspend, 873 .suspend = wdm_suspend,
853 .resume = wdm_resume, 874 .resume = wdm_resume,
854 .reset_resume = wdm_resume, 875 .reset_resume = wdm_resume,
876#endif
855 .pre_reset = wdm_pre_reset, 877 .pre_reset = wdm_pre_reset,
856 .post_reset = wdm_post_reset, 878 .post_reset = wdm_post_reset,
857 .id_table = wdm_ids, 879 .id_table = wdm_ids,
diff --git a/drivers/usb/core/devio.c b/drivers/usb/core/devio.c
index e909ff7b9094..3466fdc5bb11 100644
--- a/drivers/usb/core/devio.c
+++ b/drivers/usb/core/devio.c
@@ -1207,6 +1207,13 @@ static int proc_do_submiturb(struct dev_state *ps, struct usbdevfs_urb *uurb,
1207 free_async(as); 1207 free_async(as);
1208 return -ENOMEM; 1208 return -ENOMEM;
1209 } 1209 }
1210 /* Isochronous input data may end up being discontiguous
1211 * if some of the packets are short. Clear the buffer so
1212 * that the gaps don't leak kernel data to userspace.
1213 */
1214 if (is_in && uurb->type == USBDEVFS_URB_TYPE_ISO)
1215 memset(as->urb->transfer_buffer, 0,
1216 uurb->buffer_length);
1210 } 1217 }
1211 as->urb->dev = ps->dev; 1218 as->urb->dev = ps->dev;
1212 as->urb->pipe = (uurb->type << 30) | 1219 as->urb->pipe = (uurb->type << 30) |
@@ -1345,10 +1352,14 @@ static int processcompl(struct async *as, void __user * __user *arg)
1345 void __user *addr = as->userurb; 1352 void __user *addr = as->userurb;
1346 unsigned int i; 1353 unsigned int i;
1347 1354
1348 if (as->userbuffer && urb->actual_length) 1355 if (as->userbuffer && urb->actual_length) {
1349 if (copy_to_user(as->userbuffer, urb->transfer_buffer, 1356 if (urb->number_of_packets > 0) /* Isochronous */
1350 urb->actual_length)) 1357 i = urb->transfer_buffer_length;
1358 else /* Non-Isoc */
1359 i = urb->actual_length;
1360 if (copy_to_user(as->userbuffer, urb->transfer_buffer, i))
1351 goto err_out; 1361 goto err_out;
1362 }
1352 if (put_user(as->status, &userurb->status)) 1363 if (put_user(as->status, &userurb->status))
1353 goto err_out; 1364 goto err_out;
1354 if (put_user(urb->actual_length, &userurb->actual_length)) 1365 if (put_user(urb->actual_length, &userurb->actual_length))
diff --git a/drivers/usb/core/urb.c b/drivers/usb/core/urb.c
index 27080561a1c2..45a32dadb406 100644
--- a/drivers/usb/core/urb.c
+++ b/drivers/usb/core/urb.c
@@ -453,6 +453,7 @@ int usb_submit_urb(struct urb *urb, gfp_t mem_flags)
453 if (urb->interval > (1 << 15)) 453 if (urb->interval > (1 << 15))
454 return -EINVAL; 454 return -EINVAL;
455 max = 1 << 15; 455 max = 1 << 15;
456 break;
456 case USB_SPEED_WIRELESS: 457 case USB_SPEED_WIRELESS:
457 if (urb->interval > 16) 458 if (urb->interval > 16)
458 return -EINVAL; 459 return -EINVAL;
diff --git a/drivers/usb/gadget/Kconfig b/drivers/usb/gadget/Kconfig
index 7460cd797f45..11a3e0fa4331 100644
--- a/drivers/usb/gadget/Kconfig
+++ b/drivers/usb/gadget/Kconfig
@@ -747,7 +747,7 @@ config USB_MASS_STORAGE
747 which may be used with composite framework. 747 which may be used with composite framework.
748 748
749 Say "y" to link the driver statically, or "m" to build 749 Say "y" to link the driver statically, or "m" to build
750 a dynamically linked module called "g_file_storage". If unsure, 750 a dynamically linked module called "g_mass_storage". If unsure,
751 consider File-backed Storage Gadget. 751 consider File-backed Storage Gadget.
752 752
753config USB_G_SERIAL 753config USB_G_SERIAL
diff --git a/drivers/usb/gadget/epautoconf.c b/drivers/usb/gadget/epautoconf.c
index 65a5f94cbc04..3568de210f79 100644
--- a/drivers/usb/gadget/epautoconf.c
+++ b/drivers/usb/gadget/epautoconf.c
@@ -266,7 +266,7 @@ struct usb_ep * __init usb_ep_autoconfig (
266 } 266 }
267 267
268#ifdef CONFIG_BLACKFIN 268#ifdef CONFIG_BLACKFIN
269 } else if (gadget_is_musbhsfc(gadget) || gadget_is_musbhdrc(gadget)) { 269 } else if (gadget_is_musbhdrc(gadget)) {
270 if ((USB_ENDPOINT_XFER_BULK == type) || 270 if ((USB_ENDPOINT_XFER_BULK == type) ||
271 (USB_ENDPOINT_XFER_ISOC == type)) { 271 (USB_ENDPOINT_XFER_ISOC == type)) {
272 if (USB_DIR_IN & desc->bEndpointAddress) 272 if (USB_DIR_IN & desc->bEndpointAddress)
diff --git a/drivers/usb/gadget/f_mass_storage.c b/drivers/usb/gadget/f_mass_storage.c
index 5a3cdd08f1d0..f4911c09022e 100644
--- a/drivers/usb/gadget/f_mass_storage.c
+++ b/drivers/usb/gadget/f_mass_storage.c
@@ -2910,7 +2910,7 @@ static void fsg_unbind(struct usb_configuration *c, struct usb_function *f)
2910} 2910}
2911 2911
2912 2912
2913static int fsg_bind(struct usb_configuration *c, struct usb_function *f) 2913static int __init fsg_bind(struct usb_configuration *c, struct usb_function *f)
2914{ 2914{
2915 struct fsg_dev *fsg = fsg_from_func(f); 2915 struct fsg_dev *fsg = fsg_from_func(f);
2916 struct usb_gadget *gadget = c->cdev->gadget; 2916 struct usb_gadget *gadget = c->cdev->gadget;
@@ -2954,7 +2954,6 @@ static int fsg_bind(struct usb_configuration *c, struct usb_function *f)
2954autoconf_fail: 2954autoconf_fail:
2955 ERROR(fsg, "unable to autoconfigure all endpoints\n"); 2955 ERROR(fsg, "unable to autoconfigure all endpoints\n");
2956 rc = -ENOTSUPP; 2956 rc = -ENOTSUPP;
2957 fsg_unbind(c, f);
2958 return rc; 2957 return rc;
2959} 2958}
2960 2959
diff --git a/drivers/usb/gadget/gadget_chips.h b/drivers/usb/gadget/gadget_chips.h
index 1edbc12fff18..e511fec9f26d 100644
--- a/drivers/usb/gadget/gadget_chips.h
+++ b/drivers/usb/gadget/gadget_chips.h
@@ -136,6 +136,12 @@
136#define gadget_is_r8a66597(g) 0 136#define gadget_is_r8a66597(g) 0
137#endif 137#endif
138 138
139#ifdef CONFIG_USB_S3C_HSOTG
140#define gadget_is_s3c_hsotg(g) (!strcmp("s3c-hsotg", (g)->name))
141#else
142#define gadget_is_s3c_hsotg(g) 0
143#endif
144
139 145
140/** 146/**
141 * usb_gadget_controller_number - support bcdDevice id convention 147 * usb_gadget_controller_number - support bcdDevice id convention
@@ -192,6 +198,8 @@ static inline int usb_gadget_controller_number(struct usb_gadget *gadget)
192 return 0x24; 198 return 0x24;
193 else if (gadget_is_r8a66597(gadget)) 199 else if (gadget_is_r8a66597(gadget))
194 return 0x25; 200 return 0x25;
201 else if (gadget_is_s3c_hsotg(gadget))
202 return 0x26;
195 return -ENOENT; 203 return -ENOENT;
196} 204}
197 205
diff --git a/drivers/usb/gadget/goku_udc.c b/drivers/usb/gadget/goku_udc.c
index e8edc640381e..1088d08c7ed8 100644
--- a/drivers/usb/gadget/goku_udc.c
+++ b/drivers/usb/gadget/goku_udc.c
@@ -1768,7 +1768,7 @@ static int goku_probe(struct pci_dev *pdev, const struct pci_device_id *id)
1768 * usb_gadget_driver_{register,unregister}() must change. 1768 * usb_gadget_driver_{register,unregister}() must change.
1769 */ 1769 */
1770 if (the_controller) { 1770 if (the_controller) {
1771 WARNING(dev, "ignoring %s\n", pci_name(pdev)); 1771 pr_warning("ignoring %s\n", pci_name(pdev));
1772 return -EBUSY; 1772 return -EBUSY;
1773 } 1773 }
1774 if (!pdev->irq) { 1774 if (!pdev->irq) {
diff --git a/drivers/usb/gadget/multi.c b/drivers/usb/gadget/multi.c
index 76496f5d272c..a930d7fd7e7a 100644
--- a/drivers/usb/gadget/multi.c
+++ b/drivers/usb/gadget/multi.c
@@ -211,8 +211,6 @@ static int __init cdc_do_config(struct usb_configuration *c)
211 ret = fsg_add(c->cdev, c, fsg_common); 211 ret = fsg_add(c->cdev, c, fsg_common);
212 if (ret < 0) 212 if (ret < 0)
213 return ret; 213 return ret;
214 if (ret < 0)
215 return ret;
216 214
217 return 0; 215 return 0;
218} 216}
diff --git a/drivers/usb/gadget/r8a66597-udc.c b/drivers/usb/gadget/r8a66597-udc.c
index 8b45145b9136..5e13d23b5f0c 100644
--- a/drivers/usb/gadget/r8a66597-udc.c
+++ b/drivers/usb/gadget/r8a66597-udc.c
@@ -23,6 +23,7 @@
23#include <linux/module.h> 23#include <linux/module.h>
24#include <linux/interrupt.h> 24#include <linux/interrupt.h>
25#include <linux/delay.h> 25#include <linux/delay.h>
26#include <linux/err.h>
26#include <linux/io.h> 27#include <linux/io.h>
27#include <linux/platform_device.h> 28#include <linux/platform_device.h>
28#include <linux/clk.h> 29#include <linux/clk.h>
diff --git a/drivers/usb/host/Makefile b/drivers/usb/host/Makefile
index 4e0c67f1f51b..b6315aa47f7a 100644
--- a/drivers/usb/host/Makefile
+++ b/drivers/usb/host/Makefile
@@ -12,7 +12,7 @@ fhci-objs := fhci-hcd.o fhci-hub.o fhci-q.o fhci-mem.o \
12ifeq ($(CONFIG_FHCI_DEBUG),y) 12ifeq ($(CONFIG_FHCI_DEBUG),y)
13fhci-objs += fhci-dbg.o 13fhci-objs += fhci-dbg.o
14endif 14endif
15xhci-objs := xhci-hcd.o xhci-mem.o xhci-pci.o xhci-ring.o xhci-hub.o xhci-dbg.o 15xhci-hcd-objs := xhci.o xhci-mem.o xhci-pci.o xhci-ring.o xhci-hub.o xhci-dbg.o
16 16
17obj-$(CONFIG_USB_WHCI_HCD) += whci/ 17obj-$(CONFIG_USB_WHCI_HCD) += whci/
18 18
@@ -25,7 +25,7 @@ obj-$(CONFIG_USB_ISP1362_HCD) += isp1362-hcd.o
25obj-$(CONFIG_USB_OHCI_HCD) += ohci-hcd.o 25obj-$(CONFIG_USB_OHCI_HCD) += ohci-hcd.o
26obj-$(CONFIG_USB_UHCI_HCD) += uhci-hcd.o 26obj-$(CONFIG_USB_UHCI_HCD) += uhci-hcd.o
27obj-$(CONFIG_USB_FHCI_HCD) += fhci.o 27obj-$(CONFIG_USB_FHCI_HCD) += fhci.o
28obj-$(CONFIG_USB_XHCI_HCD) += xhci.o 28obj-$(CONFIG_USB_XHCI_HCD) += xhci-hcd.o
29obj-$(CONFIG_USB_SL811_HCD) += sl811-hcd.o 29obj-$(CONFIG_USB_SL811_HCD) += sl811-hcd.o
30obj-$(CONFIG_USB_SL811_CS) += sl811_cs.o 30obj-$(CONFIG_USB_SL811_CS) += sl811_cs.o
31obj-$(CONFIG_USB_U132_HCD) += u132-hcd.o 31obj-$(CONFIG_USB_U132_HCD) += u132-hcd.o
diff --git a/drivers/usb/host/ehci-hcd.c b/drivers/usb/host/ehci-hcd.c
index d8d6d3461d32..dc55a62859c6 100644
--- a/drivers/usb/host/ehci-hcd.c
+++ b/drivers/usb/host/ehci-hcd.c
@@ -995,7 +995,7 @@ rescan:
995 /* endpoints can be iso streams. for now, we don't 995 /* endpoints can be iso streams. for now, we don't
996 * accelerate iso completions ... so spin a while. 996 * accelerate iso completions ... so spin a while.
997 */ 997 */
998 if (qh->hw->hw_info1 == 0) { 998 if (qh->hw == NULL) {
999 ehci_vdbg (ehci, "iso delay\n"); 999 ehci_vdbg (ehci, "iso delay\n");
1000 goto idle_timeout; 1000 goto idle_timeout;
1001 } 1001 }
diff --git a/drivers/usb/host/ehci-sched.c b/drivers/usb/host/ehci-sched.c
index 39340ae00ac4..a0aaaaff2560 100644
--- a/drivers/usb/host/ehci-sched.c
+++ b/drivers/usb/host/ehci-sched.c
@@ -1123,8 +1123,8 @@ iso_stream_find (struct ehci_hcd *ehci, struct urb *urb)
1123 urb->interval); 1123 urb->interval);
1124 } 1124 }
1125 1125
1126 /* if dev->ep [epnum] is a QH, info1.maxpacket is nonzero */ 1126 /* if dev->ep [epnum] is a QH, hw is set */
1127 } else if (unlikely (stream->hw_info1 != 0)) { 1127 } else if (unlikely (stream->hw != NULL)) {
1128 ehci_dbg (ehci, "dev %s ep%d%s, not iso??\n", 1128 ehci_dbg (ehci, "dev %s ep%d%s, not iso??\n",
1129 urb->dev->devpath, epnum, 1129 urb->dev->devpath, epnum,
1130 usb_pipein(urb->pipe) ? "in" : "out"); 1130 usb_pipein(urb->pipe) ? "in" : "out");
@@ -1565,13 +1565,27 @@ itd_patch(
1565static inline void 1565static inline void
1566itd_link (struct ehci_hcd *ehci, unsigned frame, struct ehci_itd *itd) 1566itd_link (struct ehci_hcd *ehci, unsigned frame, struct ehci_itd *itd)
1567{ 1567{
1568 /* always prepend ITD/SITD ... only QH tree is order-sensitive */ 1568 union ehci_shadow *prev = &ehci->pshadow[frame];
1569 itd->itd_next = ehci->pshadow [frame]; 1569 __hc32 *hw_p = &ehci->periodic[frame];
1570 itd->hw_next = ehci->periodic [frame]; 1570 union ehci_shadow here = *prev;
1571 ehci->pshadow [frame].itd = itd; 1571 __hc32 type = 0;
1572
1573 /* skip any iso nodes which might belong to previous microframes */
1574 while (here.ptr) {
1575 type = Q_NEXT_TYPE(ehci, *hw_p);
1576 if (type == cpu_to_hc32(ehci, Q_TYPE_QH))
1577 break;
1578 prev = periodic_next_shadow(ehci, prev, type);
1579 hw_p = shadow_next_periodic(ehci, &here, type);
1580 here = *prev;
1581 }
1582
1583 itd->itd_next = here;
1584 itd->hw_next = *hw_p;
1585 prev->itd = itd;
1572 itd->frame = frame; 1586 itd->frame = frame;
1573 wmb (); 1587 wmb ();
1574 ehci->periodic[frame] = cpu_to_hc32(ehci, itd->itd_dma | Q_TYPE_ITD); 1588 *hw_p = cpu_to_hc32(ehci, itd->itd_dma | Q_TYPE_ITD);
1575} 1589}
1576 1590
1577/* fit urb's itds into the selected schedule slot; activate as needed */ 1591/* fit urb's itds into the selected schedule slot; activate as needed */
diff --git a/drivers/usb/host/ehci.h b/drivers/usb/host/ehci.h
index 2d85e21ff282..b1dce96dd621 100644
--- a/drivers/usb/host/ehci.h
+++ b/drivers/usb/host/ehci.h
@@ -394,9 +394,8 @@ struct ehci_iso_sched {
394 * acts like a qh would, if EHCI had them for ISO. 394 * acts like a qh would, if EHCI had them for ISO.
395 */ 395 */
396struct ehci_iso_stream { 396struct ehci_iso_stream {
397 /* first two fields match QH, but info1 == 0 */ 397 /* first field matches ehci_hq, but is NULL */
398 __hc32 hw_next; 398 struct ehci_qh_hw *hw;
399 __hc32 hw_info1;
400 399
401 u32 refcount; 400 u32 refcount;
402 u8 bEndpointAddress; 401 u8 bEndpointAddress;
diff --git a/drivers/usb/host/r8a66597-hcd.c b/drivers/usb/host/r8a66597-hcd.c
index bee558aed427..f71a73a93d0c 100644
--- a/drivers/usb/host/r8a66597-hcd.c
+++ b/drivers/usb/host/r8a66597-hcd.c
@@ -418,7 +418,7 @@ static u8 alloc_usb_address(struct r8a66597 *r8a66597, struct urb *urb)
418 418
419/* this function must be called with interrupt disabled */ 419/* this function must be called with interrupt disabled */
420static void free_usb_address(struct r8a66597 *r8a66597, 420static void free_usb_address(struct r8a66597 *r8a66597,
421 struct r8a66597_device *dev) 421 struct r8a66597_device *dev, int reset)
422{ 422{
423 int port; 423 int port;
424 424
@@ -430,7 +430,13 @@ static void free_usb_address(struct r8a66597 *r8a66597,
430 dev->state = USB_STATE_DEFAULT; 430 dev->state = USB_STATE_DEFAULT;
431 r8a66597->address_map &= ~(1 << dev->address); 431 r8a66597->address_map &= ~(1 << dev->address);
432 dev->address = 0; 432 dev->address = 0;
433 dev_set_drvdata(&dev->udev->dev, NULL); 433 /*
434 * Only when resetting USB, it is necessary to erase drvdata. When
435 * a usb device with usb hub is disconnect, "dev->udev" is already
436 * freed on usb_desconnect(). So we cannot access the data.
437 */
438 if (reset)
439 dev_set_drvdata(&dev->udev->dev, NULL);
434 list_del(&dev->device_list); 440 list_del(&dev->device_list);
435 kfree(dev); 441 kfree(dev);
436 442
@@ -1069,7 +1075,7 @@ static void r8a66597_usb_disconnect(struct r8a66597 *r8a66597, int port)
1069 struct r8a66597_device *dev = r8a66597->root_hub[port].dev; 1075 struct r8a66597_device *dev = r8a66597->root_hub[port].dev;
1070 1076
1071 disable_r8a66597_pipe_all(r8a66597, dev); 1077 disable_r8a66597_pipe_all(r8a66597, dev);
1072 free_usb_address(r8a66597, dev); 1078 free_usb_address(r8a66597, dev, 0);
1073 1079
1074 start_root_hub_sampling(r8a66597, port, 0); 1080 start_root_hub_sampling(r8a66597, port, 0);
1075} 1081}
@@ -2085,7 +2091,7 @@ static void update_usb_address_map(struct r8a66597 *r8a66597,
2085 spin_lock_irqsave(&r8a66597->lock, flags); 2091 spin_lock_irqsave(&r8a66597->lock, flags);
2086 dev = get_r8a66597_device(r8a66597, addr); 2092 dev = get_r8a66597_device(r8a66597, addr);
2087 disable_r8a66597_pipe_all(r8a66597, dev); 2093 disable_r8a66597_pipe_all(r8a66597, dev);
2088 free_usb_address(r8a66597, dev); 2094 free_usb_address(r8a66597, dev, 0);
2089 put_child_connect_map(r8a66597, addr); 2095 put_child_connect_map(r8a66597, addr);
2090 spin_unlock_irqrestore(&r8a66597->lock, flags); 2096 spin_unlock_irqrestore(&r8a66597->lock, flags);
2091 } 2097 }
@@ -2228,7 +2234,7 @@ static int r8a66597_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
2228 rh->port |= (1 << USB_PORT_FEAT_RESET); 2234 rh->port |= (1 << USB_PORT_FEAT_RESET);
2229 2235
2230 disable_r8a66597_pipe_all(r8a66597, dev); 2236 disable_r8a66597_pipe_all(r8a66597, dev);
2231 free_usb_address(r8a66597, dev); 2237 free_usb_address(r8a66597, dev, 1);
2232 2238
2233 r8a66597_mdfy(r8a66597, USBRST, USBRST | UACT, 2239 r8a66597_mdfy(r8a66597, USBRST, USBRST | UACT,
2234 get_dvstctr_reg(port)); 2240 get_dvstctr_reg(port));
diff --git a/drivers/usb/host/xhci-mem.c b/drivers/usb/host/xhci-mem.c
index 49f7d72f8b1b..bba9b19ed1b9 100644
--- a/drivers/usb/host/xhci-mem.c
+++ b/drivers/usb/host/xhci-mem.c
@@ -566,8 +566,13 @@ static inline unsigned int xhci_get_endpoint_interval(struct usb_device *udev,
566 if (interval < 3) 566 if (interval < 3)
567 interval = 3; 567 interval = 3;
568 if ((1 << interval) != 8*ep->desc.bInterval) 568 if ((1 << interval) != 8*ep->desc.bInterval)
569 dev_warn(&udev->dev, "ep %#x - rounding interval to %d microframes\n", 569 dev_warn(&udev->dev,
570 ep->desc.bEndpointAddress, 1 << interval); 570 "ep %#x - rounding interval"
571 " to %d microframes, "
572 "ep desc says %d microframes\n",
573 ep->desc.bEndpointAddress,
574 1 << interval,
575 8*ep->desc.bInterval);
571 } 576 }
572 break; 577 break;
573 default: 578 default:
diff --git a/drivers/usb/host/xhci-hcd.c b/drivers/usb/host/xhci.c
index 4cb69e0af834..492a61c2c79d 100644
--- a/drivers/usb/host/xhci-hcd.c
+++ b/drivers/usb/host/xhci.c
@@ -1173,6 +1173,7 @@ static int xhci_configure_endpoint(struct xhci_hcd *xhci,
1173 cmd_completion = &virt_dev->cmd_completion; 1173 cmd_completion = &virt_dev->cmd_completion;
1174 cmd_status = &virt_dev->cmd_status; 1174 cmd_status = &virt_dev->cmd_status;
1175 } 1175 }
1176 init_completion(cmd_completion);
1176 1177
1177 if (!ctx_change) 1178 if (!ctx_change)
1178 ret = xhci_queue_configure_endpoint(xhci, in_ctx->dma, 1179 ret = xhci_queue_configure_endpoint(xhci, in_ctx->dma,
diff --git a/drivers/usb/misc/appledisplay.c b/drivers/usb/misc/appledisplay.c
index 4d2952f1fb13..3adab041355a 100644
--- a/drivers/usb/misc/appledisplay.c
+++ b/drivers/usb/misc/appledisplay.c
@@ -202,6 +202,7 @@ static void appledisplay_work(struct work_struct *work)
202static int appledisplay_probe(struct usb_interface *iface, 202static int appledisplay_probe(struct usb_interface *iface,
203 const struct usb_device_id *id) 203 const struct usb_device_id *id)
204{ 204{
205 struct backlight_properties props;
205 struct appledisplay *pdata; 206 struct appledisplay *pdata;
206 struct usb_device *udev = interface_to_usbdev(iface); 207 struct usb_device *udev = interface_to_usbdev(iface);
207 struct usb_host_interface *iface_desc; 208 struct usb_host_interface *iface_desc;
@@ -279,16 +280,16 @@ static int appledisplay_probe(struct usb_interface *iface,
279 /* Register backlight device */ 280 /* Register backlight device */
280 snprintf(bl_name, sizeof(bl_name), "appledisplay%d", 281 snprintf(bl_name, sizeof(bl_name), "appledisplay%d",
281 atomic_inc_return(&count_displays) - 1); 282 atomic_inc_return(&count_displays) - 1);
283 memset(&props, 0, sizeof(struct backlight_properties));
284 props.max_brightness = 0xff;
282 pdata->bd = backlight_device_register(bl_name, NULL, pdata, 285 pdata->bd = backlight_device_register(bl_name, NULL, pdata,
283 &appledisplay_bl_data); 286 &appledisplay_bl_data, &props);
284 if (IS_ERR(pdata->bd)) { 287 if (IS_ERR(pdata->bd)) {
285 dev_err(&iface->dev, "Backlight registration failed\n"); 288 dev_err(&iface->dev, "Backlight registration failed\n");
286 retval = PTR_ERR(pdata->bd); 289 retval = PTR_ERR(pdata->bd);
287 goto error; 290 goto error;
288 } 291 }
289 292
290 pdata->bd->props.max_brightness = 0xff;
291
292 /* Try to get brightness */ 293 /* Try to get brightness */
293 brightness = appledisplay_bl_get_brightness(pdata->bd); 294 brightness = appledisplay_bl_get_brightness(pdata->bd);
294 295
diff --git a/drivers/usb/musb/musb_core.c b/drivers/usb/musb/musb_core.c
index b4bbf8f2c238..0e8b8ab1d168 100644
--- a/drivers/usb/musb/musb_core.c
+++ b/drivers/usb/musb/musb_core.c
@@ -379,7 +379,6 @@ static irqreturn_t musb_stage0_irq(struct musb *musb, u8 int_usb,
379 u8 devctl, u8 power) 379 u8 devctl, u8 power)
380{ 380{
381 irqreturn_t handled = IRQ_NONE; 381 irqreturn_t handled = IRQ_NONE;
382 void __iomem *mbase = musb->mregs;
383 382
384 DBG(3, "<== Power=%02x, DevCtl=%02x, int_usb=0x%x\n", power, devctl, 383 DBG(3, "<== Power=%02x, DevCtl=%02x, int_usb=0x%x\n", power, devctl,
385 int_usb); 384 int_usb);
@@ -394,6 +393,8 @@ static irqreturn_t musb_stage0_irq(struct musb *musb, u8 int_usb,
394 393
395 if (devctl & MUSB_DEVCTL_HM) { 394 if (devctl & MUSB_DEVCTL_HM) {
396#ifdef CONFIG_USB_MUSB_HDRC_HCD 395#ifdef CONFIG_USB_MUSB_HDRC_HCD
396 void __iomem *mbase = musb->mregs;
397
397 switch (musb->xceiv->state) { 398 switch (musb->xceiv->state) {
398 case OTG_STATE_A_SUSPEND: 399 case OTG_STATE_A_SUSPEND:
399 /* remote wakeup? later, GetPortStatus 400 /* remote wakeup? later, GetPortStatus
@@ -471,6 +472,8 @@ static irqreturn_t musb_stage0_irq(struct musb *musb, u8 int_usb,
471#ifdef CONFIG_USB_MUSB_HDRC_HCD 472#ifdef CONFIG_USB_MUSB_HDRC_HCD
472 /* see manual for the order of the tests */ 473 /* see manual for the order of the tests */
473 if (int_usb & MUSB_INTR_SESSREQ) { 474 if (int_usb & MUSB_INTR_SESSREQ) {
475 void __iomem *mbase = musb->mregs;
476
474 DBG(1, "SESSION_REQUEST (%s)\n", otg_state_string(musb)); 477 DBG(1, "SESSION_REQUEST (%s)\n", otg_state_string(musb));
475 478
476 /* IRQ arrives from ID pin sense or (later, if VBUS power 479 /* IRQ arrives from ID pin sense or (later, if VBUS power
@@ -519,6 +522,8 @@ static irqreturn_t musb_stage0_irq(struct musb *musb, u8 int_usb,
519 case OTG_STATE_A_WAIT_BCON: 522 case OTG_STATE_A_WAIT_BCON:
520 case OTG_STATE_A_WAIT_VRISE: 523 case OTG_STATE_A_WAIT_VRISE:
521 if (musb->vbuserr_retry) { 524 if (musb->vbuserr_retry) {
525 void __iomem *mbase = musb->mregs;
526
522 musb->vbuserr_retry--; 527 musb->vbuserr_retry--;
523 ignore = 1; 528 ignore = 1;
524 devctl |= MUSB_DEVCTL_SESSION; 529 devctl |= MUSB_DEVCTL_SESSION;
@@ -622,6 +627,7 @@ static irqreturn_t musb_stage0_irq(struct musb *musb, u8 int_usb,
622 627
623 if (int_usb & MUSB_INTR_CONNECT) { 628 if (int_usb & MUSB_INTR_CONNECT) {
624 struct usb_hcd *hcd = musb_to_hcd(musb); 629 struct usb_hcd *hcd = musb_to_hcd(musb);
630 void __iomem *mbase = musb->mregs;
625 631
626 handled = IRQ_HANDLED; 632 handled = IRQ_HANDLED;
627 musb->is_active = 1; 633 musb->is_active = 1;
@@ -2007,7 +2013,6 @@ bad_config:
2007 /* host side needs more setup */ 2013 /* host side needs more setup */
2008 if (is_host_enabled(musb)) { 2014 if (is_host_enabled(musb)) {
2009 struct usb_hcd *hcd = musb_to_hcd(musb); 2015 struct usb_hcd *hcd = musb_to_hcd(musb);
2010 u8 busctl;
2011 2016
2012 otg_set_host(musb->xceiv, &hcd->self); 2017 otg_set_host(musb->xceiv, &hcd->self);
2013 2018
@@ -2018,9 +2023,9 @@ bad_config:
2018 2023
2019 /* program PHY to use external vBus if required */ 2024 /* program PHY to use external vBus if required */
2020 if (plat->extvbus) { 2025 if (plat->extvbus) {
2021 busctl = musb_readb(musb->mregs, MUSB_ULPI_BUSCONTROL); 2026 u8 busctl = musb_read_ulpi_buscontrol(musb->mregs);
2022 busctl |= MUSB_ULPI_USE_EXTVBUS; 2027 busctl |= MUSB_ULPI_USE_EXTVBUS;
2023 musb_writeb(musb->mregs, MUSB_ULPI_BUSCONTROL, busctl); 2028 musb_write_ulpi_buscontrol(musb->mregs, busctl);
2024 } 2029 }
2025 } 2030 }
2026 2031
diff --git a/drivers/usb/musb/musb_core.h b/drivers/usb/musb/musb_core.h
index d849fb81c131..cd9f4a9a06c6 100644
--- a/drivers/usb/musb/musb_core.h
+++ b/drivers/usb/musb/musb_core.h
@@ -469,7 +469,7 @@ struct musb_csr_regs {
469 469
470struct musb_context_registers { 470struct musb_context_registers {
471 471
472#if defined(CONFIG_ARCH_OMAP34XX) || defined(CONFIG_ARCH_OMAP2430) 472#ifdef CONFIG_PM
473 u32 otg_sysconfig, otg_forcestandby; 473 u32 otg_sysconfig, otg_forcestandby;
474#endif 474#endif
475 u8 power; 475 u8 power;
@@ -483,7 +483,7 @@ struct musb_context_registers {
483 struct musb_csr_regs index_regs[MUSB_C_NUM_EPS]; 483 struct musb_csr_regs index_regs[MUSB_C_NUM_EPS];
484}; 484};
485 485
486#if defined(CONFIG_ARCH_OMAP34XX) || defined(CONFIG_ARCH_OMAP2430) 486#ifdef CONFIG_PM
487extern void musb_platform_save_context(struct musb *musb, 487extern void musb_platform_save_context(struct musb *musb,
488 struct musb_context_registers *musb_context); 488 struct musb_context_registers *musb_context);
489extern void musb_platform_restore_context(struct musb *musb, 489extern void musb_platform_restore_context(struct musb *musb,
diff --git a/drivers/usb/musb/musb_host.c b/drivers/usb/musb/musb_host.c
index 3421cf9858b5..dec896e888db 100644
--- a/drivers/usb/musb/musb_host.c
+++ b/drivers/usb/musb/musb_host.c
@@ -1689,7 +1689,7 @@ void musb_host_rx(struct musb *musb, u8 epnum)
1689 dma->desired_mode = 1; 1689 dma->desired_mode = 1;
1690 if (rx_count < hw_ep->max_packet_sz_rx) { 1690 if (rx_count < hw_ep->max_packet_sz_rx) {
1691 length = rx_count; 1691 length = rx_count;
1692 dma->bDesiredMode = 0; 1692 dma->desired_mode = 0;
1693 } else { 1693 } else {
1694 length = urb->transfer_buffer_length; 1694 length = urb->transfer_buffer_length;
1695 } 1695 }
diff --git a/drivers/usb/musb/musb_regs.h b/drivers/usb/musb/musb_regs.h
index 8d8062b10e2f..fa55aacc385d 100644
--- a/drivers/usb/musb/musb_regs.h
+++ b/drivers/usb/musb/musb_regs.h
@@ -326,6 +326,11 @@ static inline void musb_write_rxfifoadd(void __iomem *mbase, u16 c_off)
326 musb_writew(mbase, MUSB_RXFIFOADD, c_off); 326 musb_writew(mbase, MUSB_RXFIFOADD, c_off);
327} 327}
328 328
329static inline void musb_write_ulpi_buscontrol(void __iomem *mbase, u8 val)
330{
331 musb_writeb(mbase, MUSB_ULPI_BUSCONTROL, val);
332}
333
329static inline u8 musb_read_txfifosz(void __iomem *mbase) 334static inline u8 musb_read_txfifosz(void __iomem *mbase)
330{ 335{
331 return musb_readb(mbase, MUSB_TXFIFOSZ); 336 return musb_readb(mbase, MUSB_TXFIFOSZ);
@@ -346,6 +351,11 @@ static inline u16 musb_read_rxfifoadd(void __iomem *mbase)
346 return musb_readw(mbase, MUSB_RXFIFOADD); 351 return musb_readw(mbase, MUSB_RXFIFOADD);
347} 352}
348 353
354static inline u8 musb_read_ulpi_buscontrol(void __iomem *mbase)
355{
356 return musb_readb(mbase, MUSB_ULPI_BUSCONTROL);
357}
358
349static inline u8 musb_read_configdata(void __iomem *mbase) 359static inline u8 musb_read_configdata(void __iomem *mbase)
350{ 360{
351 musb_writeb(mbase, MUSB_INDEX, 0); 361 musb_writeb(mbase, MUSB_INDEX, 0);
@@ -510,20 +520,33 @@ static inline void musb_write_rxfifoadd(void __iomem *mbase, u16 c_off)
510{ 520{
511} 521}
512 522
523static inline void musb_write_ulpi_buscontrol(void __iomem *mbase, u8 val)
524{
525}
526
513static inline u8 musb_read_txfifosz(void __iomem *mbase) 527static inline u8 musb_read_txfifosz(void __iomem *mbase)
514{ 528{
529 return 0;
515} 530}
516 531
517static inline u16 musb_read_txfifoadd(void __iomem *mbase) 532static inline u16 musb_read_txfifoadd(void __iomem *mbase)
518{ 533{
534 return 0;
519} 535}
520 536
521static inline u8 musb_read_rxfifosz(void __iomem *mbase) 537static inline u8 musb_read_rxfifosz(void __iomem *mbase)
522{ 538{
539 return 0;
523} 540}
524 541
525static inline u16 musb_read_rxfifoadd(void __iomem *mbase) 542static inline u16 musb_read_rxfifoadd(void __iomem *mbase)
526{ 543{
544 return 0;
545}
546
547static inline u8 musb_read_ulpi_buscontrol(void __iomem *mbase)
548{
549 return 0;
527} 550}
528 551
529static inline u8 musb_read_configdata(void __iomem *mbase) 552static inline u8 musb_read_configdata(void __iomem *mbase)
@@ -577,22 +600,27 @@ static inline void musb_write_txhubport(void __iomem *mbase, u8 epnum,
577 600
578static inline u8 musb_read_rxfunaddr(void __iomem *mbase, u8 epnum) 601static inline u8 musb_read_rxfunaddr(void __iomem *mbase, u8 epnum)
579{ 602{
603 return 0;
580} 604}
581 605
582static inline u8 musb_read_rxhubaddr(void __iomem *mbase, u8 epnum) 606static inline u8 musb_read_rxhubaddr(void __iomem *mbase, u8 epnum)
583{ 607{
608 return 0;
584} 609}
585 610
586static inline u8 musb_read_rxhubport(void __iomem *mbase, u8 epnum) 611static inline u8 musb_read_rxhubport(void __iomem *mbase, u8 epnum)
587{ 612{
613 return 0;
588} 614}
589 615
590static inline u8 musb_read_txfunaddr(void __iomem *mbase, u8 epnum) 616static inline u8 musb_read_txfunaddr(void __iomem *mbase, u8 epnum)
591{ 617{
618 return 0;
592} 619}
593 620
594static inline u8 musb_read_txhubaddr(void __iomem *mbase, u8 epnum) 621static inline u8 musb_read_txhubaddr(void __iomem *mbase, u8 epnum)
595{ 622{
623 return 0;
596} 624}
597 625
598static inline void musb_read_txhubport(void __iomem *mbase, u8 epnum) 626static inline void musb_read_txhubport(void __iomem *mbase, u8 epnum)
diff --git a/drivers/usb/serial/Kconfig b/drivers/usb/serial/Kconfig
index c78b255e3f83..a0ecb42cb33a 100644
--- a/drivers/usb/serial/Kconfig
+++ b/drivers/usb/serial/Kconfig
@@ -474,14 +474,14 @@ config USB_SERIAL_OTI6858
474 474
475config USB_SERIAL_QCAUX 475config USB_SERIAL_QCAUX
476 tristate "USB Qualcomm Auxiliary Serial Port Driver" 476 tristate "USB Qualcomm Auxiliary Serial Port Driver"
477 ---help--- 477 help
478 Say Y here if you want to use the auxiliary serial ports provided 478 Say Y here if you want to use the auxiliary serial ports provided
479 by many modems based on Qualcomm chipsets. These ports often use 479 by many modems based on Qualcomm chipsets. These ports often use
480 a proprietary protocol called DM and cannot be used for AT- or 480 a proprietary protocol called DM and cannot be used for AT- or
481 PPP-based communication. 481 PPP-based communication.
482 482
483 To compile this driver as a module, choose M here: the 483 To compile this driver as a module, choose M here: the
484 module will be called moto_modem. If unsure, choose N. 484 module will be called qcaux. If unsure, choose N.
485 485
486config USB_SERIAL_QUALCOMM 486config USB_SERIAL_QUALCOMM
487 tristate "USB Qualcomm Serial modem" 487 tristate "USB Qualcomm Serial modem"
diff --git a/drivers/usb/serial/console.c b/drivers/usb/serial/console.c
index b22ac3258523..f347da2ef00a 100644
--- a/drivers/usb/serial/console.c
+++ b/drivers/usb/serial/console.c
@@ -181,6 +181,7 @@ static int usb_console_setup(struct console *co, char *options)
181 /* The console is special in terms of closing the device so 181 /* The console is special in terms of closing the device so
182 * indicate this port is now acting as a system console. */ 182 * indicate this port is now acting as a system console. */
183 port->console = 1; 183 port->console = 1;
184 port->port.console = 1;
184 185
185 mutex_unlock(&serial->disc_mutex); 186 mutex_unlock(&serial->disc_mutex);
186 return retval; 187 return retval;
diff --git a/drivers/usb/serial/cp210x.c b/drivers/usb/serial/cp210x.c
index 507382b0a9ed..ec9b0449ccf6 100644
--- a/drivers/usb/serial/cp210x.c
+++ b/drivers/usb/serial/cp210x.c
@@ -313,11 +313,6 @@ static int cp210x_set_config(struct usb_serial_port *port, u8 request,
313 return -EPROTO; 313 return -EPROTO;
314 } 314 }
315 315
316 /* Single data value */
317 result = usb_control_msg(serial->dev,
318 usb_sndctrlpipe(serial->dev, 0),
319 request, REQTYPE_HOST_TO_DEVICE, data[0],
320 0, NULL, 0, 300);
321 return 0; 316 return 0;
322} 317}
323 318
diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c
index 6af0dfa5f5ac..1d7c4fac02e8 100644
--- a/drivers/usb/serial/ftdi_sio.c
+++ b/drivers/usb/serial/ftdi_sio.c
@@ -91,7 +91,7 @@ struct ftdi_private {
91 unsigned long tx_outstanding_bytes; 91 unsigned long tx_outstanding_bytes;
92 unsigned long tx_outstanding_urbs; 92 unsigned long tx_outstanding_urbs;
93 unsigned short max_packet_size; 93 unsigned short max_packet_size;
94 struct mutex cfg_lock; /* Avoid mess by parallel calls of config ioctl() */ 94 struct mutex cfg_lock; /* Avoid mess by parallel calls of config ioctl() and change_speed() */
95}; 95};
96 96
97/* struct ftdi_sio_quirk is used by devices requiring special attention. */ 97/* struct ftdi_sio_quirk is used by devices requiring special attention. */
@@ -658,6 +658,7 @@ static struct usb_device_id id_table_combined [] = {
658 { USB_DEVICE(EVOLUTION_VID, EVOLUTION_ER1_PID) }, 658 { USB_DEVICE(EVOLUTION_VID, EVOLUTION_ER1_PID) },
659 { USB_DEVICE(EVOLUTION_VID, EVO_HYBRID_PID) }, 659 { USB_DEVICE(EVOLUTION_VID, EVO_HYBRID_PID) },
660 { USB_DEVICE(EVOLUTION_VID, EVO_RCM4_PID) }, 660 { USB_DEVICE(EVOLUTION_VID, EVO_RCM4_PID) },
661 { USB_DEVICE(CONTEC_VID, CONTEC_COM1USBH_PID) },
661 { USB_DEVICE(FTDI_VID, FTDI_ARTEMIS_PID) }, 662 { USB_DEVICE(FTDI_VID, FTDI_ARTEMIS_PID) },
662 { USB_DEVICE(FTDI_VID, FTDI_ATIK_ATK16_PID) }, 663 { USB_DEVICE(FTDI_VID, FTDI_ATIK_ATK16_PID) },
663 { USB_DEVICE(FTDI_VID, FTDI_ATIK_ATK16C_PID) }, 664 { USB_DEVICE(FTDI_VID, FTDI_ATIK_ATK16C_PID) },
@@ -1272,8 +1273,8 @@ check_and_exit:
1272 (priv->flags & ASYNC_SPD_MASK)) || 1273 (priv->flags & ASYNC_SPD_MASK)) ||
1273 (((priv->flags & ASYNC_SPD_MASK) == ASYNC_SPD_CUST) && 1274 (((priv->flags & ASYNC_SPD_MASK) == ASYNC_SPD_CUST) &&
1274 (old_priv.custom_divisor != priv->custom_divisor))) { 1275 (old_priv.custom_divisor != priv->custom_divisor))) {
1275 mutex_unlock(&priv->cfg_lock);
1276 change_speed(tty, port); 1276 change_speed(tty, port);
1277 mutex_unlock(&priv->cfg_lock);
1277 } 1278 }
1278 else 1279 else
1279 mutex_unlock(&priv->cfg_lock); 1280 mutex_unlock(&priv->cfg_lock);
@@ -2264,9 +2265,11 @@ static void ftdi_set_termios(struct tty_struct *tty,
2264 clear_mctrl(port, TIOCM_DTR | TIOCM_RTS); 2265 clear_mctrl(port, TIOCM_DTR | TIOCM_RTS);
2265 } else { 2266 } else {
2266 /* set the baudrate determined before */ 2267 /* set the baudrate determined before */
2268 mutex_lock(&priv->cfg_lock);
2267 if (change_speed(tty, port)) 2269 if (change_speed(tty, port))
2268 dev_err(&port->dev, "%s urb failed to set baudrate\n", 2270 dev_err(&port->dev, "%s urb failed to set baudrate\n",
2269 __func__); 2271 __func__);
2272 mutex_unlock(&priv->cfg_lock);
2270 /* Ensure RTS and DTR are raised when baudrate changed from 0 */ 2273 /* Ensure RTS and DTR are raised when baudrate changed from 0 */
2271 if (!old_termios || (old_termios->c_cflag & CBAUD) == B0) 2274 if (!old_termios || (old_termios->c_cflag & CBAUD) == B0)
2272 set_mctrl(port, TIOCM_DTR | TIOCM_RTS); 2275 set_mctrl(port, TIOCM_DTR | TIOCM_RTS);
diff --git a/drivers/usb/serial/ftdi_sio_ids.h b/drivers/usb/serial/ftdi_sio_ids.h
index 0727e198503e..75482cbc3998 100644
--- a/drivers/usb/serial/ftdi_sio_ids.h
+++ b/drivers/usb/serial/ftdi_sio_ids.h
@@ -501,6 +501,13 @@
501#define CONTEC_COM1USBH_PID 0x8311 /* COM-1(USB)H */ 501#define CONTEC_COM1USBH_PID 0x8311 /* COM-1(USB)H */
502 502
503/* 503/*
504 * Contec products (http://www.contec.com)
505 * Submitted by Daniel Sangorrin
506 */
507#define CONTEC_VID 0x06CE /* Vendor ID */
508#define CONTEC_COM1USBH_PID 0x8311 /* COM-1(USB)H */
509
510/*
504 * Definitions for B&B Electronics products. 511 * Definitions for B&B Electronics products.
505 */ 512 */
506#define BANDB_VID 0x0856 /* B&B Electronics Vendor ID */ 513#define BANDB_VID 0x0856 /* B&B Electronics Vendor ID */
diff --git a/drivers/usb/serial/generic.c b/drivers/usb/serial/generic.c
index 89fac36684c5..f804acb138ec 100644
--- a/drivers/usb/serial/generic.c
+++ b/drivers/usb/serial/generic.c
@@ -130,7 +130,7 @@ int usb_serial_generic_open(struct tty_struct *tty, struct usb_serial_port *port
130 spin_unlock_irqrestore(&port->lock, flags); 130 spin_unlock_irqrestore(&port->lock, flags);
131 131
132 /* if we have a bulk endpoint, start reading from it */ 132 /* if we have a bulk endpoint, start reading from it */
133 if (serial->num_bulk_in) { 133 if (port->bulk_in_size) {
134 /* Start reading from the device */ 134 /* Start reading from the device */
135 usb_fill_bulk_urb(port->read_urb, serial->dev, 135 usb_fill_bulk_urb(port->read_urb, serial->dev,
136 usb_rcvbulkpipe(serial->dev, 136 usb_rcvbulkpipe(serial->dev,
@@ -159,10 +159,10 @@ static void generic_cleanup(struct usb_serial_port *port)
159 dbg("%s - port %d", __func__, port->number); 159 dbg("%s - port %d", __func__, port->number);
160 160
161 if (serial->dev) { 161 if (serial->dev) {
162 /* shutdown any bulk reads that might be going on */ 162 /* shutdown any bulk transfers that might be going on */
163 if (serial->num_bulk_out) 163 if (port->bulk_out_size)
164 usb_kill_urb(port->write_urb); 164 usb_kill_urb(port->write_urb);
165 if (serial->num_bulk_in) 165 if (port->bulk_in_size)
166 usb_kill_urb(port->read_urb); 166 usb_kill_urb(port->read_urb);
167 } 167 }
168} 168}
@@ -333,15 +333,15 @@ int usb_serial_generic_write(struct tty_struct *tty,
333 333
334 dbg("%s - port %d", __func__, port->number); 334 dbg("%s - port %d", __func__, port->number);
335 335
336 /* only do something if we have a bulk out endpoint */
337 if (!port->bulk_out_size)
338 return -ENODEV;
339
336 if (count == 0) { 340 if (count == 0) {
337 dbg("%s - write request of 0 bytes", __func__); 341 dbg("%s - write request of 0 bytes", __func__);
338 return 0; 342 return 0;
339 } 343 }
340 344
341 /* only do something if we have a bulk out endpoint */
342 if (!serial->num_bulk_out)
343 return 0;
344
345 if (serial->type->max_in_flight_urbs) 345 if (serial->type->max_in_flight_urbs)
346 return usb_serial_multi_urb_write(tty, port, 346 return usb_serial_multi_urb_write(tty, port,
347 buf, count); 347 buf, count);
@@ -364,14 +364,19 @@ int usb_serial_generic_write_room(struct tty_struct *tty)
364 int room = 0; 364 int room = 0;
365 365
366 dbg("%s - port %d", __func__, port->number); 366 dbg("%s - port %d", __func__, port->number);
367
368 if (!port->bulk_out_size)
369 return 0;
370
367 spin_lock_irqsave(&port->lock, flags); 371 spin_lock_irqsave(&port->lock, flags);
368 if (serial->type->max_in_flight_urbs) { 372 if (serial->type->max_in_flight_urbs) {
369 if (port->urbs_in_flight < serial->type->max_in_flight_urbs) 373 if (port->urbs_in_flight < serial->type->max_in_flight_urbs)
370 room = port->bulk_out_size * 374 room = port->bulk_out_size *
371 (serial->type->max_in_flight_urbs - 375 (serial->type->max_in_flight_urbs -
372 port->urbs_in_flight); 376 port->urbs_in_flight);
373 } else if (serial->num_bulk_out) 377 } else {
374 room = kfifo_avail(&port->write_fifo); 378 room = kfifo_avail(&port->write_fifo);
379 }
375 spin_unlock_irqrestore(&port->lock, flags); 380 spin_unlock_irqrestore(&port->lock, flags);
376 381
377 dbg("%s - returns %d", __func__, room); 382 dbg("%s - returns %d", __func__, room);
@@ -382,15 +387,18 @@ int usb_serial_generic_chars_in_buffer(struct tty_struct *tty)
382{ 387{
383 struct usb_serial_port *port = tty->driver_data; 388 struct usb_serial_port *port = tty->driver_data;
384 struct usb_serial *serial = port->serial; 389 struct usb_serial *serial = port->serial;
385 int chars = 0;
386 unsigned long flags; 390 unsigned long flags;
391 int chars;
387 392
388 dbg("%s - port %d", __func__, port->number); 393 dbg("%s - port %d", __func__, port->number);
389 394
395 if (!port->bulk_out_size)
396 return 0;
397
390 spin_lock_irqsave(&port->lock, flags); 398 spin_lock_irqsave(&port->lock, flags);
391 if (serial->type->max_in_flight_urbs) 399 if (serial->type->max_in_flight_urbs)
392 chars = port->tx_bytes_flight; 400 chars = port->tx_bytes_flight;
393 else if (serial->num_bulk_out) 401 else
394 chars = kfifo_len(&port->write_fifo); 402 chars = kfifo_len(&port->write_fifo);
395 spin_unlock_irqrestore(&port->lock, flags); 403 spin_unlock_irqrestore(&port->lock, flags);
396 404
@@ -415,11 +423,13 @@ void usb_serial_generic_resubmit_read_urb(struct usb_serial_port *port,
415 ((serial->type->read_bulk_callback) ? 423 ((serial->type->read_bulk_callback) ?
416 serial->type->read_bulk_callback : 424 serial->type->read_bulk_callback :
417 usb_serial_generic_read_bulk_callback), port); 425 usb_serial_generic_read_bulk_callback), port);
426
418 result = usb_submit_urb(urb, mem_flags); 427 result = usb_submit_urb(urb, mem_flags);
419 if (result) 428 if (result && result != -EPERM) {
420 dev_err(&port->dev, 429 dev_err(&port->dev,
421 "%s - failed resubmitting read urb, error %d\n", 430 "%s - failed resubmitting read urb, error %d\n",
422 __func__, result); 431 __func__, result);
432 }
423} 433}
424EXPORT_SYMBOL_GPL(usb_serial_generic_resubmit_read_urb); 434EXPORT_SYMBOL_GPL(usb_serial_generic_resubmit_read_urb);
425 435
@@ -498,23 +508,18 @@ void usb_serial_generic_write_bulk_callback(struct urb *urb)
498 if (port->urbs_in_flight < 0) 508 if (port->urbs_in_flight < 0)
499 port->urbs_in_flight = 0; 509 port->urbs_in_flight = 0;
500 spin_unlock_irqrestore(&port->lock, flags); 510 spin_unlock_irqrestore(&port->lock, flags);
501
502 if (status) {
503 dbg("%s - nonzero multi-urb write bulk status "
504 "received: %d", __func__, status);
505 return;
506 }
507 } else { 511 } else {
508 port->write_urb_busy = 0; 512 port->write_urb_busy = 0;
509 513
510 if (status) { 514 if (status)
511 dbg("%s - nonzero multi-urb write bulk status "
512 "received: %d", __func__, status);
513 kfifo_reset_out(&port->write_fifo); 515 kfifo_reset_out(&port->write_fifo);
514 } else 516 else
515 usb_serial_generic_write_start(port); 517 usb_serial_generic_write_start(port);
516 } 518 }
517 519
520 if (status)
521 dbg("%s - non-zero urb status: %d", __func__, status);
522
518 usb_serial_port_softint(port); 523 usb_serial_port_softint(port);
519} 524}
520EXPORT_SYMBOL_GPL(usb_serial_generic_write_bulk_callback); 525EXPORT_SYMBOL_GPL(usb_serial_generic_write_bulk_callback);
diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
index 847b805d63a3..950cb311ca94 100644
--- a/drivers/usb/serial/option.c
+++ b/drivers/usb/serial/option.c
@@ -288,7 +288,9 @@ static int option_resume(struct usb_serial *serial);
288 288
289#define QUALCOMM_VENDOR_ID 0x05C6 289#define QUALCOMM_VENDOR_ID 0x05C6
290 290
291#define MAXON_VENDOR_ID 0x16d8 291#define CMOTECH_VENDOR_ID 0x16d8
292#define CMOTECH_PRODUCT_6008 0x6008
293#define CMOTECH_PRODUCT_6280 0x6280
292 294
293#define TELIT_VENDOR_ID 0x1bc7 295#define TELIT_VENDOR_ID 0x1bc7
294#define TELIT_PRODUCT_UC864E 0x1003 296#define TELIT_PRODUCT_UC864E 0x1003
@@ -309,6 +311,7 @@ static int option_resume(struct usb_serial *serial);
309#define DLINK_VENDOR_ID 0x1186 311#define DLINK_VENDOR_ID 0x1186
310#define DLINK_PRODUCT_DWM_652 0x3e04 312#define DLINK_PRODUCT_DWM_652 0x3e04
311#define DLINK_PRODUCT_DWM_652_U5 0xce16 313#define DLINK_PRODUCT_DWM_652_U5 0xce16
314#define DLINK_PRODUCT_DWM_652_U5A 0xce1e
312 315
313#define QISDA_VENDOR_ID 0x1da5 316#define QISDA_VENDOR_ID 0x1da5
314#define QISDA_PRODUCT_H21_4512 0x4512 317#define QISDA_PRODUCT_H21_4512 0x4512
@@ -332,6 +335,24 @@ static int option_resume(struct usb_serial *serial);
332#define ALCATEL_VENDOR_ID 0x1bbb 335#define ALCATEL_VENDOR_ID 0x1bbb
333#define ALCATEL_PRODUCT_X060S 0x0000 336#define ALCATEL_PRODUCT_X060S 0x0000
334 337
338#define PIRELLI_VENDOR_ID 0x1266
339#define PIRELLI_PRODUCT_C100_1 0x1002
340#define PIRELLI_PRODUCT_C100_2 0x1003
341#define PIRELLI_PRODUCT_1004 0x1004
342#define PIRELLI_PRODUCT_1005 0x1005
343#define PIRELLI_PRODUCT_1006 0x1006
344#define PIRELLI_PRODUCT_1007 0x1007
345#define PIRELLI_PRODUCT_1008 0x1008
346#define PIRELLI_PRODUCT_1009 0x1009
347#define PIRELLI_PRODUCT_100A 0x100a
348#define PIRELLI_PRODUCT_100B 0x100b
349#define PIRELLI_PRODUCT_100C 0x100c
350#define PIRELLI_PRODUCT_100D 0x100d
351#define PIRELLI_PRODUCT_100E 0x100e
352#define PIRELLI_PRODUCT_100F 0x100f
353#define PIRELLI_PRODUCT_1011 0x1011
354#define PIRELLI_PRODUCT_1012 0x1012
355
335/* Airplus products */ 356/* Airplus products */
336#define AIRPLUS_VENDOR_ID 0x1011 357#define AIRPLUS_VENDOR_ID 0x1011
337#define AIRPLUS_PRODUCT_MCD650 0x3198 358#define AIRPLUS_PRODUCT_MCD650 0x3198
@@ -547,7 +568,8 @@ static const struct usb_device_id option_ids[] = {
547 { USB_DEVICE(KYOCERA_VENDOR_ID, KYOCERA_PRODUCT_KPC680) }, 568 { USB_DEVICE(KYOCERA_VENDOR_ID, KYOCERA_PRODUCT_KPC680) },
548 { USB_DEVICE(QUALCOMM_VENDOR_ID, 0x6000)}, /* ZTE AC8700 */ 569 { USB_DEVICE(QUALCOMM_VENDOR_ID, 0x6000)}, /* ZTE AC8700 */
549 { USB_DEVICE(QUALCOMM_VENDOR_ID, 0x6613)}, /* Onda H600/ZTE MF330 */ 570 { USB_DEVICE(QUALCOMM_VENDOR_ID, 0x6613)}, /* Onda H600/ZTE MF330 */
550 { USB_DEVICE(MAXON_VENDOR_ID, 0x6280) }, /* BP3-USB & BP3-EXT HSDPA */ 571 { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_6280) }, /* BP3-USB & BP3-EXT HSDPA */
572 { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_6008) },
551 { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_UC864E) }, 573 { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_UC864E) },
552 { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_UC864G) }, 574 { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_UC864G) },
553 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, ZTE_PRODUCT_MF622, 0xff, 0xff, 0xff) }, /* ZTE WCDMA products */ 575 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, ZTE_PRODUCT_MF622, 0xff, 0xff, 0xff) }, /* ZTE WCDMA products */
@@ -659,6 +681,7 @@ static const struct usb_device_id option_ids[] = {
659 { USB_DEVICE(BENQ_VENDOR_ID, BENQ_PRODUCT_H10) }, 681 { USB_DEVICE(BENQ_VENDOR_ID, BENQ_PRODUCT_H10) },
660 { USB_DEVICE(DLINK_VENDOR_ID, DLINK_PRODUCT_DWM_652) }, 682 { USB_DEVICE(DLINK_VENDOR_ID, DLINK_PRODUCT_DWM_652) },
661 { USB_DEVICE(ALINK_VENDOR_ID, DLINK_PRODUCT_DWM_652_U5) }, /* Yes, ALINK_VENDOR_ID */ 683 { USB_DEVICE(ALINK_VENDOR_ID, DLINK_PRODUCT_DWM_652_U5) }, /* Yes, ALINK_VENDOR_ID */
684 { USB_DEVICE(ALINK_VENDOR_ID, DLINK_PRODUCT_DWM_652_U5A) },
662 { USB_DEVICE(QISDA_VENDOR_ID, QISDA_PRODUCT_H21_4512) }, 685 { USB_DEVICE(QISDA_VENDOR_ID, QISDA_PRODUCT_H21_4512) },
663 { USB_DEVICE(QISDA_VENDOR_ID, QISDA_PRODUCT_H21_4523) }, 686 { USB_DEVICE(QISDA_VENDOR_ID, QISDA_PRODUCT_H21_4523) },
664 { USB_DEVICE(QISDA_VENDOR_ID, QISDA_PRODUCT_H20_4515) }, 687 { USB_DEVICE(QISDA_VENDOR_ID, QISDA_PRODUCT_H20_4515) },
@@ -666,7 +689,6 @@ static const struct usb_device_id option_ids[] = {
666 { USB_DEVICE(TOSHIBA_VENDOR_ID, TOSHIBA_PRODUCT_G450) }, 689 { USB_DEVICE(TOSHIBA_VENDOR_ID, TOSHIBA_PRODUCT_G450) },
667 { USB_DEVICE(TOSHIBA_VENDOR_ID, TOSHIBA_PRODUCT_HSDPA_MINICARD ) }, /* Toshiba 3G HSDPA == Novatel Expedite EU870D MiniCard */ 690 { USB_DEVICE(TOSHIBA_VENDOR_ID, TOSHIBA_PRODUCT_HSDPA_MINICARD ) }, /* Toshiba 3G HSDPA == Novatel Expedite EU870D MiniCard */
668 { USB_DEVICE(ALINK_VENDOR_ID, 0x9000) }, 691 { USB_DEVICE(ALINK_VENDOR_ID, 0x9000) },
669 { USB_DEVICE(ALINK_VENDOR_ID, 0xce16) },
670 { USB_DEVICE_AND_INTERFACE_INFO(ALINK_VENDOR_ID, ALINK_PRODUCT_3GU, 0xff, 0xff, 0xff) }, 692 { USB_DEVICE_AND_INTERFACE_INFO(ALINK_VENDOR_ID, ALINK_PRODUCT_3GU, 0xff, 0xff, 0xff) },
671 { USB_DEVICE(ALCATEL_VENDOR_ID, ALCATEL_PRODUCT_X060S) }, 693 { USB_DEVICE(ALCATEL_VENDOR_ID, ALCATEL_PRODUCT_X060S) },
672 { USB_DEVICE(AIRPLUS_VENDOR_ID, AIRPLUS_PRODUCT_MCD650) }, 694 { USB_DEVICE(AIRPLUS_VENDOR_ID, AIRPLUS_PRODUCT_MCD650) },
@@ -675,6 +697,24 @@ static const struct usb_device_id option_ids[] = {
675 .driver_info = (kernel_ulong_t)&four_g_w14_blacklist 697 .driver_info = (kernel_ulong_t)&four_g_w14_blacklist
676 }, 698 },
677 { USB_DEVICE(HAIER_VENDOR_ID, HAIER_PRODUCT_CE100) }, 699 { USB_DEVICE(HAIER_VENDOR_ID, HAIER_PRODUCT_CE100) },
700 /* Pirelli */
701 { USB_DEVICE(PIRELLI_VENDOR_ID, PIRELLI_PRODUCT_C100_1)},
702 { USB_DEVICE(PIRELLI_VENDOR_ID, PIRELLI_PRODUCT_C100_2)},
703 { USB_DEVICE(PIRELLI_VENDOR_ID, PIRELLI_PRODUCT_1004)},
704 { USB_DEVICE(PIRELLI_VENDOR_ID, PIRELLI_PRODUCT_1005)},
705 { USB_DEVICE(PIRELLI_VENDOR_ID, PIRELLI_PRODUCT_1006)},
706 { USB_DEVICE(PIRELLI_VENDOR_ID, PIRELLI_PRODUCT_1007)},
707 { USB_DEVICE(PIRELLI_VENDOR_ID, PIRELLI_PRODUCT_1008)},
708 { USB_DEVICE(PIRELLI_VENDOR_ID, PIRELLI_PRODUCT_1009)},
709 { USB_DEVICE(PIRELLI_VENDOR_ID, PIRELLI_PRODUCT_100A)},
710 { USB_DEVICE(PIRELLI_VENDOR_ID, PIRELLI_PRODUCT_100B) },
711 { USB_DEVICE(PIRELLI_VENDOR_ID, PIRELLI_PRODUCT_100C) },
712 { USB_DEVICE(PIRELLI_VENDOR_ID, PIRELLI_PRODUCT_100D) },
713 { USB_DEVICE(PIRELLI_VENDOR_ID, PIRELLI_PRODUCT_100E) },
714 { USB_DEVICE(PIRELLI_VENDOR_ID, PIRELLI_PRODUCT_100F) },
715 { USB_DEVICE(PIRELLI_VENDOR_ID, PIRELLI_PRODUCT_1011)},
716 { USB_DEVICE(PIRELLI_VENDOR_ID, PIRELLI_PRODUCT_1012)},
717
678 { } /* Terminating entry */ 718 { } /* Terminating entry */
679}; 719};
680MODULE_DEVICE_TABLE(usb, option_ids); 720MODULE_DEVICE_TABLE(usb, option_ids);
@@ -798,12 +838,19 @@ static int option_probe(struct usb_serial *serial,
798 const struct usb_device_id *id) 838 const struct usb_device_id *id)
799{ 839{
800 struct option_intf_private *data; 840 struct option_intf_private *data;
841
801 /* D-Link DWM 652 still exposes CD-Rom emulation interface in modem mode */ 842 /* D-Link DWM 652 still exposes CD-Rom emulation interface in modem mode */
802 if (serial->dev->descriptor.idVendor == DLINK_VENDOR_ID && 843 if (serial->dev->descriptor.idVendor == DLINK_VENDOR_ID &&
803 serial->dev->descriptor.idProduct == DLINK_PRODUCT_DWM_652 && 844 serial->dev->descriptor.idProduct == DLINK_PRODUCT_DWM_652 &&
804 serial->interface->cur_altsetting->desc.bInterfaceClass == 0x8) 845 serial->interface->cur_altsetting->desc.bInterfaceClass == 0x8)
805 return -ENODEV; 846 return -ENODEV;
806 847
848 /* Bandrich modem and AT command interface is 0xff */
849 if ((serial->dev->descriptor.idVendor == BANDRICH_VENDOR_ID ||
850 serial->dev->descriptor.idVendor == PIRELLI_VENDOR_ID) &&
851 serial->interface->cur_altsetting->desc.bInterfaceClass != 0xff)
852 return -ENODEV;
853
807 data = serial->private = kzalloc(sizeof(struct option_intf_private), GFP_KERNEL); 854 data = serial->private = kzalloc(sizeof(struct option_intf_private), GFP_KERNEL);
808 if (!data) 855 if (!data)
809 return -ENOMEM; 856 return -ENOMEM;
diff --git a/drivers/usb/serial/qcserial.c b/drivers/usb/serial/qcserial.c
index 310ff6ec6567..53a2d5a935a2 100644
--- a/drivers/usb/serial/qcserial.c
+++ b/drivers/usb/serial/qcserial.c
@@ -47,6 +47,35 @@ static const struct usb_device_id id_table[] = {
47 {USB_DEVICE(0x05c6, 0x9221)}, /* Generic Gobi QDL device */ 47 {USB_DEVICE(0x05c6, 0x9221)}, /* Generic Gobi QDL device */
48 {USB_DEVICE(0x05c6, 0x9231)}, /* Generic Gobi QDL device */ 48 {USB_DEVICE(0x05c6, 0x9231)}, /* Generic Gobi QDL device */
49 {USB_DEVICE(0x1f45, 0x0001)}, /* Unknown Gobi QDL device */ 49 {USB_DEVICE(0x1f45, 0x0001)}, /* Unknown Gobi QDL device */
50 {USB_DEVICE(0x413c, 0x8185)}, /* Dell Gobi 2000 QDL device (N0218, VU936) */
51 {USB_DEVICE(0x413c, 0x8186)}, /* Dell Gobi 2000 Modem device (N0218, VU936) */
52 {USB_DEVICE(0x05c6, 0x9224)}, /* Sony Gobi 2000 QDL device (N0279, VU730) */
53 {USB_DEVICE(0x05c6, 0x9225)}, /* Sony Gobi 2000 Modem device (N0279, VU730) */
54 {USB_DEVICE(0x05c6, 0x9244)}, /* Samsung Gobi 2000 QDL device (VL176) */
55 {USB_DEVICE(0x05c6, 0x9245)}, /* Samsung Gobi 2000 Modem device (VL176) */
56 {USB_DEVICE(0x03f0, 0x241d)}, /* HP Gobi 2000 QDL device (VP412) */
57 {USB_DEVICE(0x03f0, 0x251d)}, /* HP Gobi 2000 Modem device (VP412) */
58 {USB_DEVICE(0x05c6, 0x9214)}, /* Acer Gobi 2000 QDL device (VP413) */
59 {USB_DEVICE(0x05c6, 0x9215)}, /* Acer Gobi 2000 Modem device (VP413) */
60 {USB_DEVICE(0x05c6, 0x9264)}, /* Asus Gobi 2000 QDL device (VR305) */
61 {USB_DEVICE(0x05c6, 0x9265)}, /* Asus Gobi 2000 Modem device (VR305) */
62 {USB_DEVICE(0x05c6, 0x9234)}, /* Top Global Gobi 2000 QDL device (VR306) */
63 {USB_DEVICE(0x05c6, 0x9235)}, /* Top Global Gobi 2000 Modem device (VR306) */
64 {USB_DEVICE(0x05c6, 0x9274)}, /* iRex Technologies Gobi 2000 QDL device (VR307) */
65 {USB_DEVICE(0x05c6, 0x9275)}, /* iRex Technologies Gobi 2000 Modem device (VR307) */
66 {USB_DEVICE(0x1199, 0x9000)}, /* Sierra Wireless Gobi 2000 QDL device (VT773) */
67 {USB_DEVICE(0x1199, 0x9001)}, /* Sierra Wireless Gobi 2000 Modem device (VT773) */
68 {USB_DEVICE(0x1199, 0x9002)}, /* Sierra Wireless Gobi 2000 Modem device (VT773) */
69 {USB_DEVICE(0x1199, 0x9003)}, /* Sierra Wireless Gobi 2000 Modem device (VT773) */
70 {USB_DEVICE(0x1199, 0x9004)}, /* Sierra Wireless Gobi 2000 Modem device (VT773) */
71 {USB_DEVICE(0x1199, 0x9005)}, /* Sierra Wireless Gobi 2000 Modem device (VT773) */
72 {USB_DEVICE(0x1199, 0x9006)}, /* Sierra Wireless Gobi 2000 Modem device (VT773) */
73 {USB_DEVICE(0x1199, 0x9007)}, /* Sierra Wireless Gobi 2000 Modem device (VT773) */
74 {USB_DEVICE(0x1199, 0x9008)}, /* Sierra Wireless Gobi 2000 Modem device (VT773) */
75 {USB_DEVICE(0x1199, 0x9009)}, /* Sierra Wireless Gobi 2000 Modem device (VT773) */
76 {USB_DEVICE(0x1199, 0x900a)}, /* Sierra Wireless Gobi 2000 Modem device (VT773) */
77 {USB_DEVICE(0x16d8, 0x8001)}, /* CMDTech Gobi 2000 QDL device (VU922) */
78 {USB_DEVICE(0x16d8, 0x8002)}, /* CMDTech Gobi 2000 Modem device (VU922) */
50 { } /* Terminating entry */ 79 { } /* Terminating entry */
51}; 80};
52MODULE_DEVICE_TABLE(usb, id_table); 81MODULE_DEVICE_TABLE(usb, id_table);
diff --git a/drivers/usb/storage/unusual_devs.h b/drivers/usb/storage/unusual_devs.h
index 98b549b1cab2..ccf1dbbb87ef 100644
--- a/drivers/usb/storage/unusual_devs.h
+++ b/drivers/usb/storage/unusual_devs.h
@@ -374,6 +374,15 @@ UNUSUAL_DEV( 0x04ce, 0x0002, 0x0074, 0x0074,
374 US_SC_DEVICE, US_PR_DEVICE, NULL, 374 US_SC_DEVICE, US_PR_DEVICE, NULL,
375 US_FL_FIX_INQUIRY), 375 US_FL_FIX_INQUIRY),
376 376
377/* Reported by Ondrej Zary <linux@rainbow-software.org>
378 * The device reports one sector more and breaks when that sector is accessed
379 */
380UNUSUAL_DEV( 0x04ce, 0x0002, 0x026c, 0x026c,
381 "ScanLogic",
382 "SL11R-IDE",
383 US_SC_DEVICE, US_PR_DEVICE, NULL,
384 US_FL_FIX_CAPACITY),
385
377/* Reported by Kriston Fincher <kriston@airmail.net> 386/* Reported by Kriston Fincher <kriston@airmail.net>
378 * Patch submitted by Sean Millichamp <sean@bruenor.org> 387 * Patch submitted by Sean Millichamp <sean@bruenor.org>
379 * This is to support the Panasonic PalmCam PV-SD4090 388 * This is to support the Panasonic PalmCam PV-SD4090
@@ -1380,20 +1389,6 @@ UNUSUAL_DEV( 0x0f19, 0x0105, 0x0100, 0x0100,
1380 US_SC_DEVICE, US_PR_DEVICE, NULL, 1389 US_SC_DEVICE, US_PR_DEVICE, NULL,
1381 US_FL_IGNORE_RESIDUE ), 1390 US_FL_IGNORE_RESIDUE ),
1382 1391
1383/* Jeremy Katz <katzj@redhat.com>:
1384 * The Blackberry Pearl can run in two modes; a usb-storage only mode
1385 * and a mode that allows access via mass storage and to its database.
1386 * The berry_charge module will set the device to dual mode and thus we
1387 * should ignore its native mode if that module is built
1388 */
1389#ifdef CONFIG_USB_BERRY_CHARGE
1390UNUSUAL_DEV( 0x0fca, 0x0006, 0x0001, 0x0001,
1391 "RIM",
1392 "Blackberry Pearl",
1393 US_SC_DEVICE, US_PR_DEVICE, NULL,
1394 US_FL_IGNORE_DEVICE ),
1395#endif
1396
1397/* Reported by Michael Stattmann <michael@stattmann.com> */ 1392/* Reported by Michael Stattmann <michael@stattmann.com> */
1398UNUSUAL_DEV( 0x0fce, 0xd008, 0x0000, 0x0000, 1393UNUSUAL_DEV( 0x0fce, 0xd008, 0x0000, 0x0000,
1399 "Sony Ericsson", 1394 "Sony Ericsson",
diff --git a/drivers/uwb/hwa-rc.c b/drivers/uwb/hwa-rc.c
index e7eeb63fab23..b409c228f254 100644
--- a/drivers/uwb/hwa-rc.c
+++ b/drivers/uwb/hwa-rc.c
@@ -891,7 +891,7 @@ static int hwarc_post_reset(struct usb_interface *iface)
891} 891}
892 892
893/** USB device ID's that we handle */ 893/** USB device ID's that we handle */
894static struct usb_device_id hwarc_id_table[] = { 894static const struct usb_device_id hwarc_id_table[] = {
895 /* D-Link DUB-1210 */ 895 /* D-Link DUB-1210 */
896 { USB_DEVICE_AND_INTERFACE_INFO(0x07d1, 0x3d02, 0xe0, 0x01, 0x02), 896 { USB_DEVICE_AND_INTERFACE_INFO(0x07d1, 0x3d02, 0xe0, 0x01, 0x02),
897 .driver_info = WUSB_QUIRK_WHCI_CMD_EVT }, 897 .driver_info = WUSB_QUIRK_WHCI_CMD_EVT },
diff --git a/drivers/uwb/i1480/dfu/usb.c b/drivers/uwb/i1480/dfu/usb.c
index 0bb665a0c024..a99e211a1b87 100644
--- a/drivers/uwb/i1480/dfu/usb.c
+++ b/drivers/uwb/i1480/dfu/usb.c
@@ -120,8 +120,7 @@ int i1480_usb_write(struct i1480 *i1480, u32 memory_address,
120 result = usb_control_msg( 120 result = usb_control_msg(
121 i1480_usb->usb_dev, usb_sndctrlpipe(i1480_usb->usb_dev, 0), 121 i1480_usb->usb_dev, usb_sndctrlpipe(i1480_usb->usb_dev, 0),
122 0xf0, USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE, 122 0xf0, USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
123 cpu_to_le16(memory_address & 0xffff), 123 memory_address, (memory_address >> 16),
124 cpu_to_le16((memory_address >> 16) & 0xffff),
125 i1480->cmd_buf, buffer_size, 100 /* FIXME: arbitrary */); 124 i1480->cmd_buf, buffer_size, 100 /* FIXME: arbitrary */);
126 if (result < 0) 125 if (result < 0)
127 break; 126 break;
@@ -166,8 +165,7 @@ int i1480_usb_read(struct i1480 *i1480, u32 addr, size_t size)
166 result = usb_control_msg( 165 result = usb_control_msg(
167 i1480_usb->usb_dev, usb_rcvctrlpipe(i1480_usb->usb_dev, 0), 166 i1480_usb->usb_dev, usb_rcvctrlpipe(i1480_usb->usb_dev, 0),
168 0xf0, USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE, 167 0xf0, USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
169 cpu_to_le16(itr_addr & 0xffff), 168 itr_addr, (itr_addr >> 16),
170 cpu_to_le16((itr_addr >> 16) & 0xffff),
171 i1480->cmd_buf + itr, itr_size, 169 i1480->cmd_buf + itr, itr_size,
172 100 /* FIXME: arbitrary */); 170 100 /* FIXME: arbitrary */);
173 if (result < 0) { 171 if (result < 0) {
@@ -413,6 +411,10 @@ error:
413 return result; 411 return result;
414} 412}
415 413
414MODULE_FIRMWARE("i1480-pre-phy-0.0.bin");
415MODULE_FIRMWARE("i1480-usb-0.0.bin");
416MODULE_FIRMWARE("i1480-phy-0.0.bin");
417
416#define i1480_USB_DEV(v, p) \ 418#define i1480_USB_DEV(v, p) \
417{ \ 419{ \
418 .match_flags = USB_DEVICE_ID_MATCH_DEVICE \ 420 .match_flags = USB_DEVICE_ID_MATCH_DEVICE \
@@ -430,7 +432,7 @@ error:
430 432
431 433
432/** USB device ID's that we handle */ 434/** USB device ID's that we handle */
433static struct usb_device_id i1480_usb_id_table[] = { 435static const struct usb_device_id i1480_usb_id_table[] = {
434 i1480_USB_DEV(0x8086, 0xdf3b), 436 i1480_USB_DEV(0x8086, 0xdf3b),
435 i1480_USB_DEV(0x15a9, 0x0005), 437 i1480_USB_DEV(0x15a9, 0x0005),
436 i1480_USB_DEV(0x07d1, 0x3802), 438 i1480_USB_DEV(0x07d1, 0x3802),
diff --git a/drivers/uwb/wlp/messages.c b/drivers/uwb/wlp/messages.c
index aa42fcee4c4f..75164866c2d8 100644
--- a/drivers/uwb/wlp/messages.c
+++ b/drivers/uwb/wlp/messages.c
@@ -259,6 +259,63 @@ out:
259} 259}
260 260
261 261
262static ssize_t wlp_get_attribute(struct wlp *wlp, u16 type_code,
263 struct wlp_attr_hdr *attr_hdr, void *value, ssize_t value_len,
264 ssize_t buflen)
265{
266 struct device *dev = &wlp->rc->uwb_dev.dev;
267 ssize_t attr_len = sizeof(*attr_hdr) + value_len;
268 if (buflen < 0)
269 return -EINVAL;
270 if (buflen < attr_len) {
271 dev_err(dev, "WLP: Not enough space in buffer to parse"
272 " attribute field. Need %d, received %zu\n",
273 (int)attr_len, buflen);
274 return -EIO;
275 }
276 if (wlp_check_attr_hdr(wlp, attr_hdr, type_code, value_len) < 0) {
277 dev_err(dev, "WLP: Header verification failed. \n");
278 return -EINVAL;
279 }
280 memcpy(value, (void *)attr_hdr + sizeof(*attr_hdr), value_len);
281 return attr_len;
282}
283
284static ssize_t wlp_vget_attribute(struct wlp *wlp, u16 type_code,
285 struct wlp_attr_hdr *attr_hdr, void *value, ssize_t max_value_len,
286 ssize_t buflen)
287{
288 struct device *dev = &wlp->rc->uwb_dev.dev;
289 size_t len;
290 if (buflen < 0)
291 return -EINVAL;
292 if (buflen < sizeof(*attr_hdr)) {
293 dev_err(dev, "WLP: Not enough space in buffer to parse"
294 " header.\n");
295 return -EIO;
296 }
297 if (le16_to_cpu(attr_hdr->type) != type_code) {
298 dev_err(dev, "WLP: Unexpected attribute type. Got %u, "
299 "expected %u.\n", le16_to_cpu(attr_hdr->type),
300 type_code);
301 return -EINVAL;
302 }
303 len = le16_to_cpu(attr_hdr->length);
304 if (len > max_value_len) {
305 dev_err(dev, "WLP: Attribute larger than maximum "
306 "allowed. Received %zu, max is %d.\n", len,
307 (int)max_value_len);
308 return -EFBIG;
309 }
310 if (buflen < sizeof(*attr_hdr) + len) {
311 dev_err(dev, "WLP: Not enough space in buffer to parse "
312 "variable data.\n");
313 return -EIO;
314 }
315 memcpy(value, (void *)attr_hdr + sizeof(*attr_hdr), len);
316 return sizeof(*attr_hdr) + len;
317}
318
262/** 319/**
263 * Get value of attribute from fixed size attribute field. 320 * Get value of attribute from fixed size attribute field.
264 * 321 *
@@ -274,22 +331,8 @@ out:
274ssize_t wlp_get_##name(struct wlp *wlp, struct wlp_attr_##name *attr, \ 331ssize_t wlp_get_##name(struct wlp *wlp, struct wlp_attr_##name *attr, \
275 type *value, ssize_t buflen) \ 332 type *value, ssize_t buflen) \
276{ \ 333{ \
277 struct device *dev = &wlp->rc->uwb_dev.dev; \ 334 return wlp_get_attribute(wlp, (type_code), &attr->hdr, \
278 if (buflen < 0) \ 335 value, sizeof(*value), buflen); \
279 return -EINVAL; \
280 if (buflen < sizeof(*attr)) { \
281 dev_err(dev, "WLP: Not enough space in buffer to parse" \
282 " attribute field. Need %d, received %zu\n", \
283 (int)sizeof(*attr), buflen); \
284 return -EIO; \
285 } \
286 if (wlp_check_attr_hdr(wlp, &attr->hdr, type_code, \
287 sizeof(attr->name)) < 0) { \
288 dev_err(dev, "WLP: Header verification failed. \n"); \
289 return -EINVAL; \
290 } \
291 *value = attr->name; \
292 return sizeof(*attr); \
293} 336}
294 337
295#define wlp_get_sparse(type, type_code, name) \ 338#define wlp_get_sparse(type, type_code, name) \
@@ -313,35 +356,8 @@ static ssize_t wlp_get_##name(struct wlp *wlp, \
313 struct wlp_attr_##name *attr, \ 356 struct wlp_attr_##name *attr, \
314 type_val *value, ssize_t buflen) \ 357 type_val *value, ssize_t buflen) \
315{ \ 358{ \
316 struct device *dev = &wlp->rc->uwb_dev.dev; \ 359 return wlp_vget_attribute(wlp, (type_code), &attr->hdr, \
317 size_t len; \ 360 value, (max), buflen); \
318 if (buflen < 0) \
319 return -EINVAL; \
320 if (buflen < sizeof(*attr)) { \
321 dev_err(dev, "WLP: Not enough space in buffer to parse" \
322 " header.\n"); \
323 return -EIO; \
324 } \
325 if (le16_to_cpu(attr->hdr.type) != type_code) { \
326 dev_err(dev, "WLP: Unexpected attribute type. Got %u, " \
327 "expected %u.\n", le16_to_cpu(attr->hdr.type), \
328 type_code); \
329 return -EINVAL; \
330 } \
331 len = le16_to_cpu(attr->hdr.length); \
332 if (len > max) { \
333 dev_err(dev, "WLP: Attribute larger than maximum " \
334 "allowed. Received %zu, max is %d.\n", len, \
335 (int)max); \
336 return -EFBIG; \
337 } \
338 if (buflen < sizeof(*attr) + len) { \
339 dev_err(dev, "WLP: Not enough space in buffer to parse "\
340 "variable data.\n"); \
341 return -EIO; \
342 } \
343 memcpy(value, (void *) attr + sizeof(*attr), len); \
344 return sizeof(*attr) + len; \
345} 361}
346 362
347wlp_get(u8, WLP_ATTR_WLP_VER, version) 363wlp_get(u8, WLP_ATTR_WLP_VER, version)
diff --git a/drivers/video/Kconfig b/drivers/video/Kconfig
index dabe804ba575..6e16244f3ed1 100644
--- a/drivers/video/Kconfig
+++ b/drivers/video/Kconfig
@@ -914,7 +914,7 @@ config FB_XVR2500
914 914
915config FB_XVR1000 915config FB_XVR1000
916 bool "Sun XVR-1000 support" 916 bool "Sun XVR-1000 support"
917 depends on SPARC64 917 depends on (FB = y) && SPARC64
918 select FB_CFB_FILLRECT 918 select FB_CFB_FILLRECT
919 select FB_CFB_COPYAREA 919 select FB_CFB_COPYAREA
920 select FB_CFB_IMAGEBLIT 920 select FB_CFB_IMAGEBLIT
@@ -1881,7 +1881,7 @@ config FB_W100
1881 1881
1882config FB_SH_MOBILE_LCDC 1882config FB_SH_MOBILE_LCDC
1883 tristate "SuperH Mobile LCDC framebuffer support" 1883 tristate "SuperH Mobile LCDC framebuffer support"
1884 depends on FB && SUPERH && HAVE_CLK 1884 depends on FB && (SUPERH || ARCH_SHMOBILE) && HAVE_CLK
1885 select FB_SYS_FILLRECT 1885 select FB_SYS_FILLRECT
1886 select FB_SYS_COPYAREA 1886 select FB_SYS_COPYAREA
1887 select FB_SYS_IMAGEBLIT 1887 select FB_SYS_IMAGEBLIT
diff --git a/drivers/video/amba-clcd.c b/drivers/video/amba-clcd.c
index a21efcd10b78..afe21e6eb544 100644
--- a/drivers/video/amba-clcd.c
+++ b/drivers/video/amba-clcd.c
@@ -65,16 +65,16 @@ static void clcdfb_disable(struct clcd_fb *fb)
65 if (fb->board->disable) 65 if (fb->board->disable)
66 fb->board->disable(fb); 66 fb->board->disable(fb);
67 67
68 val = readl(fb->regs + CLCD_CNTL); 68 val = readl(fb->regs + fb->off_cntl);
69 if (val & CNTL_LCDPWR) { 69 if (val & CNTL_LCDPWR) {
70 val &= ~CNTL_LCDPWR; 70 val &= ~CNTL_LCDPWR;
71 writel(val, fb->regs + CLCD_CNTL); 71 writel(val, fb->regs + fb->off_cntl);
72 72
73 clcdfb_sleep(20); 73 clcdfb_sleep(20);
74 } 74 }
75 if (val & CNTL_LCDEN) { 75 if (val & CNTL_LCDEN) {
76 val &= ~CNTL_LCDEN; 76 val &= ~CNTL_LCDEN;
77 writel(val, fb->regs + CLCD_CNTL); 77 writel(val, fb->regs + fb->off_cntl);
78 } 78 }
79 79
80 /* 80 /*
@@ -94,7 +94,7 @@ static void clcdfb_enable(struct clcd_fb *fb, u32 cntl)
94 * Bring up by first enabling.. 94 * Bring up by first enabling..
95 */ 95 */
96 cntl |= CNTL_LCDEN; 96 cntl |= CNTL_LCDEN;
97 writel(cntl, fb->regs + CLCD_CNTL); 97 writel(cntl, fb->regs + fb->off_cntl);
98 98
99 clcdfb_sleep(20); 99 clcdfb_sleep(20);
100 100
@@ -102,7 +102,7 @@ static void clcdfb_enable(struct clcd_fb *fb, u32 cntl)
102 * and now apply power. 102 * and now apply power.
103 */ 103 */
104 cntl |= CNTL_LCDPWR; 104 cntl |= CNTL_LCDPWR;
105 writel(cntl, fb->regs + CLCD_CNTL); 105 writel(cntl, fb->regs + fb->off_cntl);
106 106
107 /* 107 /*
108 * finally, enable the interface. 108 * finally, enable the interface.
@@ -233,7 +233,7 @@ static int clcdfb_set_par(struct fb_info *info)
233 readl(fb->regs + CLCD_TIM0), readl(fb->regs + CLCD_TIM1), 233 readl(fb->regs + CLCD_TIM0), readl(fb->regs + CLCD_TIM1),
234 readl(fb->regs + CLCD_TIM2), readl(fb->regs + CLCD_TIM3), 234 readl(fb->regs + CLCD_TIM2), readl(fb->regs + CLCD_TIM3),
235 readl(fb->regs + CLCD_UBAS), readl(fb->regs + CLCD_LBAS), 235 readl(fb->regs + CLCD_UBAS), readl(fb->regs + CLCD_LBAS),
236 readl(fb->regs + CLCD_IENB), readl(fb->regs + CLCD_CNTL)); 236 readl(fb->regs + fb->off_ienb), readl(fb->regs + fb->off_cntl));
237#endif 237#endif
238 238
239 return 0; 239 return 0;
@@ -345,6 +345,23 @@ static int clcdfb_register(struct clcd_fb *fb)
345{ 345{
346 int ret; 346 int ret;
347 347
348 /*
349 * ARM PL111 always has IENB at 0x1c; it's only PL110
350 * which is reversed on some platforms.
351 */
352 if (amba_manf(fb->dev) == 0x41 && amba_part(fb->dev) == 0x111) {
353 fb->off_ienb = CLCD_PL111_IENB;
354 fb->off_cntl = CLCD_PL111_CNTL;
355 } else {
356#ifdef CONFIG_ARCH_VERSATILE
357 fb->off_ienb = CLCD_PL111_IENB;
358 fb->off_cntl = CLCD_PL111_CNTL;
359#else
360 fb->off_ienb = CLCD_PL110_IENB;
361 fb->off_cntl = CLCD_PL110_CNTL;
362#endif
363 }
364
348 fb->clk = clk_get(&fb->dev->dev, NULL); 365 fb->clk = clk_get(&fb->dev->dev, NULL);
349 if (IS_ERR(fb->clk)) { 366 if (IS_ERR(fb->clk)) {
350 ret = PTR_ERR(fb->clk); 367 ret = PTR_ERR(fb->clk);
@@ -416,7 +433,7 @@ static int clcdfb_register(struct clcd_fb *fb)
416 /* 433 /*
417 * Ensure interrupts are disabled. 434 * Ensure interrupts are disabled.
418 */ 435 */
419 writel(0, fb->regs + CLCD_IENB); 436 writel(0, fb->regs + fb->off_ienb);
420 437
421 fb_set_var(&fb->fb, &fb->fb.var); 438 fb_set_var(&fb->fb, &fb->fb.var);
422 439
diff --git a/drivers/video/atmel_lcdfb.c b/drivers/video/atmel_lcdfb.c
index 3d886c6902f9..11de3bfd4e54 100644
--- a/drivers/video/atmel_lcdfb.c
+++ b/drivers/video/atmel_lcdfb.c
@@ -117,6 +117,7 @@ static struct backlight_ops atmel_lcdc_bl_ops = {
117 117
118static void init_backlight(struct atmel_lcdfb_info *sinfo) 118static void init_backlight(struct atmel_lcdfb_info *sinfo)
119{ 119{
120 struct backlight_properties props;
120 struct backlight_device *bl; 121 struct backlight_device *bl;
121 122
122 sinfo->bl_power = FB_BLANK_UNBLANK; 123 sinfo->bl_power = FB_BLANK_UNBLANK;
@@ -124,8 +125,10 @@ static void init_backlight(struct atmel_lcdfb_info *sinfo)
124 if (sinfo->backlight) 125 if (sinfo->backlight)
125 return; 126 return;
126 127
127 bl = backlight_device_register("backlight", &sinfo->pdev->dev, 128 memset(&props, 0, sizeof(struct backlight_properties));
128 sinfo, &atmel_lcdc_bl_ops); 129 props.max_brightness = 0xff;
130 bl = backlight_device_register("backlight", &sinfo->pdev->dev, sinfo,
131 &atmel_lcdc_bl_ops, &props);
129 if (IS_ERR(bl)) { 132 if (IS_ERR(bl)) {
130 dev_err(&sinfo->pdev->dev, "error %ld on backlight register\n", 133 dev_err(&sinfo->pdev->dev, "error %ld on backlight register\n",
131 PTR_ERR(bl)); 134 PTR_ERR(bl));
@@ -135,7 +138,6 @@ static void init_backlight(struct atmel_lcdfb_info *sinfo)
135 138
136 bl->props.power = FB_BLANK_UNBLANK; 139 bl->props.power = FB_BLANK_UNBLANK;
137 bl->props.fb_blank = FB_BLANK_UNBLANK; 140 bl->props.fb_blank = FB_BLANK_UNBLANK;
138 bl->props.max_brightness = 0xff;
139 bl->props.brightness = atmel_bl_get_brightness(bl); 141 bl->props.brightness = atmel_bl_get_brightness(bl);
140} 142}
141 143
diff --git a/drivers/video/aty/aty128fb.c b/drivers/video/aty/aty128fb.c
index 9ee67d6da710..a489be0c4614 100644
--- a/drivers/video/aty/aty128fb.c
+++ b/drivers/video/aty/aty128fb.c
@@ -1802,6 +1802,7 @@ static void aty128_bl_set_power(struct fb_info *info, int power)
1802 1802
1803static void aty128_bl_init(struct aty128fb_par *par) 1803static void aty128_bl_init(struct aty128fb_par *par)
1804{ 1804{
1805 struct backlight_properties props;
1805 struct fb_info *info = pci_get_drvdata(par->pdev); 1806 struct fb_info *info = pci_get_drvdata(par->pdev);
1806 struct backlight_device *bd; 1807 struct backlight_device *bd;
1807 char name[12]; 1808 char name[12];
@@ -1817,7 +1818,10 @@ static void aty128_bl_init(struct aty128fb_par *par)
1817 1818
1818 snprintf(name, sizeof(name), "aty128bl%d", info->node); 1819 snprintf(name, sizeof(name), "aty128bl%d", info->node);
1819 1820
1820 bd = backlight_device_register(name, info->dev, par, &aty128_bl_data); 1821 memset(&props, 0, sizeof(struct backlight_properties));
1822 props.max_brightness = FB_BACKLIGHT_LEVELS - 1;
1823 bd = backlight_device_register(name, info->dev, par, &aty128_bl_data,
1824 &props);
1821 if (IS_ERR(bd)) { 1825 if (IS_ERR(bd)) {
1822 info->bl_dev = NULL; 1826 info->bl_dev = NULL;
1823 printk(KERN_WARNING "aty128: Backlight registration failed\n"); 1827 printk(KERN_WARNING "aty128: Backlight registration failed\n");
@@ -1829,7 +1833,6 @@ static void aty128_bl_init(struct aty128fb_par *par)
1829 63 * FB_BACKLIGHT_MAX / MAX_LEVEL, 1833 63 * FB_BACKLIGHT_MAX / MAX_LEVEL,
1830 219 * FB_BACKLIGHT_MAX / MAX_LEVEL); 1834 219 * FB_BACKLIGHT_MAX / MAX_LEVEL);
1831 1835
1832 bd->props.max_brightness = FB_BACKLIGHT_LEVELS - 1;
1833 bd->props.brightness = bd->props.max_brightness; 1836 bd->props.brightness = bd->props.max_brightness;
1834 bd->props.power = FB_BLANK_UNBLANK; 1837 bd->props.power = FB_BLANK_UNBLANK;
1835 backlight_update_status(bd); 1838 backlight_update_status(bd);
diff --git a/drivers/video/aty/atyfb_base.c b/drivers/video/aty/atyfb_base.c
index e45ab8db2ddc..29d72851f85b 100644
--- a/drivers/video/aty/atyfb_base.c
+++ b/drivers/video/aty/atyfb_base.c
@@ -2232,6 +2232,7 @@ static struct backlight_ops aty_bl_data = {
2232 2232
2233static void aty_bl_init(struct atyfb_par *par) 2233static void aty_bl_init(struct atyfb_par *par)
2234{ 2234{
2235 struct backlight_properties props;
2235 struct fb_info *info = pci_get_drvdata(par->pdev); 2236 struct fb_info *info = pci_get_drvdata(par->pdev);
2236 struct backlight_device *bd; 2237 struct backlight_device *bd;
2237 char name[12]; 2238 char name[12];
@@ -2243,7 +2244,10 @@ static void aty_bl_init(struct atyfb_par *par)
2243 2244
2244 snprintf(name, sizeof(name), "atybl%d", info->node); 2245 snprintf(name, sizeof(name), "atybl%d", info->node);
2245 2246
2246 bd = backlight_device_register(name, info->dev, par, &aty_bl_data); 2247 memset(&props, 0, sizeof(struct backlight_properties));
2248 props.max_brightness = FB_BACKLIGHT_LEVELS - 1;
2249 bd = backlight_device_register(name, info->dev, par, &aty_bl_data,
2250 &props);
2247 if (IS_ERR(bd)) { 2251 if (IS_ERR(bd)) {
2248 info->bl_dev = NULL; 2252 info->bl_dev = NULL;
2249 printk(KERN_WARNING "aty: Backlight registration failed\n"); 2253 printk(KERN_WARNING "aty: Backlight registration failed\n");
@@ -2255,7 +2259,6 @@ static void aty_bl_init(struct atyfb_par *par)
2255 0x3F * FB_BACKLIGHT_MAX / MAX_LEVEL, 2259 0x3F * FB_BACKLIGHT_MAX / MAX_LEVEL,
2256 0xFF * FB_BACKLIGHT_MAX / MAX_LEVEL); 2260 0xFF * FB_BACKLIGHT_MAX / MAX_LEVEL);
2257 2261
2258 bd->props.max_brightness = FB_BACKLIGHT_LEVELS - 1;
2259 bd->props.brightness = bd->props.max_brightness; 2262 bd->props.brightness = bd->props.max_brightness;
2260 bd->props.power = FB_BLANK_UNBLANK; 2263 bd->props.power = FB_BLANK_UNBLANK;
2261 backlight_update_status(bd); 2264 backlight_update_status(bd);
diff --git a/drivers/video/aty/radeon_backlight.c b/drivers/video/aty/radeon_backlight.c
index fa1198c4ccc5..9fc8c66be3ce 100644
--- a/drivers/video/aty/radeon_backlight.c
+++ b/drivers/video/aty/radeon_backlight.c
@@ -134,6 +134,7 @@ static struct backlight_ops radeon_bl_data = {
134 134
135void radeonfb_bl_init(struct radeonfb_info *rinfo) 135void radeonfb_bl_init(struct radeonfb_info *rinfo)
136{ 136{
137 struct backlight_properties props;
137 struct backlight_device *bd; 138 struct backlight_device *bd;
138 struct radeon_bl_privdata *pdata; 139 struct radeon_bl_privdata *pdata;
139 char name[12]; 140 char name[12];
@@ -155,7 +156,10 @@ void radeonfb_bl_init(struct radeonfb_info *rinfo)
155 156
156 snprintf(name, sizeof(name), "radeonbl%d", rinfo->info->node); 157 snprintf(name, sizeof(name), "radeonbl%d", rinfo->info->node);
157 158
158 bd = backlight_device_register(name, rinfo->info->dev, pdata, &radeon_bl_data); 159 memset(&props, 0, sizeof(struct backlight_properties));
160 props.max_brightness = FB_BACKLIGHT_LEVELS - 1;
161 bd = backlight_device_register(name, rinfo->info->dev, pdata,
162 &radeon_bl_data, &props);
159 if (IS_ERR(bd)) { 163 if (IS_ERR(bd)) {
160 rinfo->info->bl_dev = NULL; 164 rinfo->info->bl_dev = NULL;
161 printk("radeonfb: Backlight registration failed\n"); 165 printk("radeonfb: Backlight registration failed\n");
@@ -185,7 +189,6 @@ void radeonfb_bl_init(struct radeonfb_info *rinfo)
185 63 * FB_BACKLIGHT_MAX / MAX_RADEON_LEVEL, 189 63 * FB_BACKLIGHT_MAX / MAX_RADEON_LEVEL,
186 217 * FB_BACKLIGHT_MAX / MAX_RADEON_LEVEL); 190 217 * FB_BACKLIGHT_MAX / MAX_RADEON_LEVEL);
187 191
188 bd->props.max_brightness = FB_BACKLIGHT_LEVELS - 1;
189 bd->props.brightness = bd->props.max_brightness; 192 bd->props.brightness = bd->props.max_brightness;
190 bd->props.power = FB_BLANK_UNBLANK; 193 bd->props.power = FB_BLANK_UNBLANK;
191 backlight_update_status(bd); 194 backlight_update_status(bd);
diff --git a/drivers/video/backlight/88pm860x_bl.c b/drivers/video/backlight/88pm860x_bl.c
index b8f705cca438..93e25c77aeb2 100644
--- a/drivers/video/backlight/88pm860x_bl.c
+++ b/drivers/video/backlight/88pm860x_bl.c
@@ -187,6 +187,7 @@ static int pm860x_backlight_probe(struct platform_device *pdev)
187 struct pm860x_backlight_data *data; 187 struct pm860x_backlight_data *data;
188 struct backlight_device *bl; 188 struct backlight_device *bl;
189 struct resource *res; 189 struct resource *res;
190 struct backlight_properties props;
190 unsigned char value; 191 unsigned char value;
191 char name[MFD_NAME_SIZE]; 192 char name[MFD_NAME_SIZE];
192 int ret; 193 int ret;
@@ -223,14 +224,15 @@ static int pm860x_backlight_probe(struct platform_device *pdev)
223 return -EINVAL; 224 return -EINVAL;
224 } 225 }
225 226
227 memset(&props, 0, sizeof(struct backlight_properties));
228 props.max_brightness = MAX_BRIGHTNESS;
226 bl = backlight_device_register(name, &pdev->dev, data, 229 bl = backlight_device_register(name, &pdev->dev, data,
227 &pm860x_backlight_ops); 230 &pm860x_backlight_ops, &props);
228 if (IS_ERR(bl)) { 231 if (IS_ERR(bl)) {
229 dev_err(&pdev->dev, "failed to register backlight\n"); 232 dev_err(&pdev->dev, "failed to register backlight\n");
230 kfree(data); 233 kfree(data);
231 return PTR_ERR(bl); 234 return PTR_ERR(bl);
232 } 235 }
233 bl->props.max_brightness = MAX_BRIGHTNESS;
234 bl->props.brightness = MAX_BRIGHTNESS; 236 bl->props.brightness = MAX_BRIGHTNESS;
235 237
236 platform_set_drvdata(pdev, bl); 238 platform_set_drvdata(pdev, bl);
diff --git a/drivers/video/backlight/Kconfig b/drivers/video/backlight/Kconfig
index 0c77fc610212..c025c84601b0 100644
--- a/drivers/video/backlight/Kconfig
+++ b/drivers/video/backlight/Kconfig
@@ -31,6 +31,13 @@ config LCD_CORGI
31 Say y here to support the LCD panels usually found on SHARP 31 Say y here to support the LCD panels usually found on SHARP
32 corgi (C7x0) and spitz (Cxx00) models. 32 corgi (C7x0) and spitz (Cxx00) models.
33 33
34config LCD_L4F00242T03
35 tristate "Epson L4F00242T03 LCD"
36 depends on LCD_CLASS_DEVICE && SPI_MASTER && GENERIC_GPIO
37 help
38 SPI driver for Epson L4F00242T03. This provides basic support
39 for init and powering the LCD up/down through a sysfs interface.
40
34config LCD_LMS283GF05 41config LCD_LMS283GF05
35 tristate "Samsung LMS283GF05 LCD" 42 tristate "Samsung LMS283GF05 LCD"
36 depends on LCD_CLASS_DEVICE && SPI_MASTER && GENERIC_GPIO 43 depends on LCD_CLASS_DEVICE && SPI_MASTER && GENERIC_GPIO
diff --git a/drivers/video/backlight/Makefile b/drivers/video/backlight/Makefile
index 6c704d41462d..09d1f14d6257 100644
--- a/drivers/video/backlight/Makefile
+++ b/drivers/video/backlight/Makefile
@@ -3,6 +3,7 @@
3obj-$(CONFIG_LCD_CLASS_DEVICE) += lcd.o 3obj-$(CONFIG_LCD_CLASS_DEVICE) += lcd.o
4obj-$(CONFIG_LCD_CORGI) += corgi_lcd.o 4obj-$(CONFIG_LCD_CORGI) += corgi_lcd.o
5obj-$(CONFIG_LCD_HP700) += jornada720_lcd.o 5obj-$(CONFIG_LCD_HP700) += jornada720_lcd.o
6obj-$(CONFIG_LCD_L4F00242T03) += l4f00242t03.o
6obj-$(CONFIG_LCD_LMS283GF05) += lms283gf05.o 7obj-$(CONFIG_LCD_LMS283GF05) += lms283gf05.o
7obj-$(CONFIG_LCD_LTV350QV) += ltv350qv.o 8obj-$(CONFIG_LCD_LTV350QV) += ltv350qv.o
8obj-$(CONFIG_LCD_ILI9320) += ili9320.o 9obj-$(CONFIG_LCD_ILI9320) += ili9320.o
diff --git a/drivers/video/backlight/adp5520_bl.c b/drivers/video/backlight/adp5520_bl.c
index 86d95c228adb..5183f0e4d314 100644
--- a/drivers/video/backlight/adp5520_bl.c
+++ b/drivers/video/backlight/adp5520_bl.c
@@ -278,6 +278,7 @@ static const struct attribute_group adp5520_bl_attr_group = {
278 278
279static int __devinit adp5520_bl_probe(struct platform_device *pdev) 279static int __devinit adp5520_bl_probe(struct platform_device *pdev)
280{ 280{
281 struct backlight_properties props;
281 struct backlight_device *bl; 282 struct backlight_device *bl;
282 struct adp5520_bl *data; 283 struct adp5520_bl *data;
283 int ret = 0; 284 int ret = 0;
@@ -300,17 +301,17 @@ static int __devinit adp5520_bl_probe(struct platform_device *pdev)
300 301
301 mutex_init(&data->lock); 302 mutex_init(&data->lock);
302 303
303 bl = backlight_device_register(pdev->name, data->master, 304 memset(&props, 0, sizeof(struct backlight_properties));
304 data, &adp5520_bl_ops); 305 props.max_brightness = ADP5020_MAX_BRIGHTNESS;
306 bl = backlight_device_register(pdev->name, data->master, data,
307 &adp5520_bl_ops, &props);
305 if (IS_ERR(bl)) { 308 if (IS_ERR(bl)) {
306 dev_err(&pdev->dev, "failed to register backlight\n"); 309 dev_err(&pdev->dev, "failed to register backlight\n");
307 kfree(data); 310 kfree(data);
308 return PTR_ERR(bl); 311 return PTR_ERR(bl);
309 } 312 }
310 313
311 bl->props.max_brightness = 314 bl->props.brightness = ADP5020_MAX_BRIGHTNESS;
312 bl->props.brightness = ADP5020_MAX_BRIGHTNESS;
313
314 if (data->pdata->en_ambl_sens) 315 if (data->pdata->en_ambl_sens)
315 ret = sysfs_create_group(&bl->dev.kobj, 316 ret = sysfs_create_group(&bl->dev.kobj,
316 &adp5520_bl_attr_group); 317 &adp5520_bl_attr_group);
diff --git a/drivers/video/backlight/adx_bl.c b/drivers/video/backlight/adx_bl.c
index d769b0bab21a..b0624b983889 100644
--- a/drivers/video/backlight/adx_bl.c
+++ b/drivers/video/backlight/adx_bl.c
@@ -56,7 +56,7 @@ static int adx_backlight_get_brightness(struct backlight_device *bldev)
56 return brightness & 0xff; 56 return brightness & 0xff;
57} 57}
58 58
59static int adx_backlight_check_fb(struct fb_info *fb) 59static int adx_backlight_check_fb(struct backlight_device *bldev, struct fb_info *fb)
60{ 60{
61 return 1; 61 return 1;
62} 62}
@@ -70,6 +70,7 @@ static const struct backlight_ops adx_backlight_ops = {
70 70
71static int __devinit adx_backlight_probe(struct platform_device *pdev) 71static int __devinit adx_backlight_probe(struct platform_device *pdev)
72{ 72{
73 struct backlight_properties props;
73 struct backlight_device *bldev; 74 struct backlight_device *bldev;
74 struct resource *res; 75 struct resource *res;
75 struct adxbl *bl; 76 struct adxbl *bl;
@@ -101,14 +102,15 @@ static int __devinit adx_backlight_probe(struct platform_device *pdev)
101 goto out; 102 goto out;
102 } 103 }
103 104
104 bldev = backlight_device_register(dev_name(&pdev->dev), &pdev->dev, bl, 105 memset(&props, 0, sizeof(struct backlight_properties));
105 &adx_backlight_ops); 106 props.max_brightness = 0xff;
107 bldev = backlight_device_register(dev_name(&pdev->dev), &pdev->dev,
108 bl, &adx_backlight_ops, &props);
106 if (!bldev) { 109 if (!bldev) {
107 ret = -ENOMEM; 110 ret = -ENOMEM;
108 goto out; 111 goto out;
109 } 112 }
110 113
111 bldev->props.max_brightness = 0xff;
112 bldev->props.brightness = 0xff; 114 bldev->props.brightness = 0xff;
113 bldev->props.power = FB_BLANK_UNBLANK; 115 bldev->props.power = FB_BLANK_UNBLANK;
114 116
diff --git a/drivers/video/backlight/atmel-pwm-bl.c b/drivers/video/backlight/atmel-pwm-bl.c
index f625ffc69ad3..2d9760551a4b 100644
--- a/drivers/video/backlight/atmel-pwm-bl.c
+++ b/drivers/video/backlight/atmel-pwm-bl.c
@@ -120,6 +120,7 @@ static const struct backlight_ops atmel_pwm_bl_ops = {
120 120
121static int atmel_pwm_bl_probe(struct platform_device *pdev) 121static int atmel_pwm_bl_probe(struct platform_device *pdev)
122{ 122{
123 struct backlight_properties props;
123 const struct atmel_pwm_bl_platform_data *pdata; 124 const struct atmel_pwm_bl_platform_data *pdata;
124 struct backlight_device *bldev; 125 struct backlight_device *bldev;
125 struct atmel_pwm_bl *pwmbl; 126 struct atmel_pwm_bl *pwmbl;
@@ -165,8 +166,10 @@ static int atmel_pwm_bl_probe(struct platform_device *pdev)
165 goto err_free_gpio; 166 goto err_free_gpio;
166 } 167 }
167 168
168 bldev = backlight_device_register("atmel-pwm-bl", 169 memset(&props, 0, sizeof(struct backlight_properties));
169 &pdev->dev, pwmbl, &atmel_pwm_bl_ops); 170 props.max_brightness = pdata->pwm_duty_max - pdata->pwm_duty_min;
171 bldev = backlight_device_register("atmel-pwm-bl", &pdev->dev, pwmbl,
172 &atmel_pwm_bl_ops, &props);
170 if (IS_ERR(bldev)) { 173 if (IS_ERR(bldev)) {
171 retval = PTR_ERR(bldev); 174 retval = PTR_ERR(bldev);
172 goto err_free_gpio; 175 goto err_free_gpio;
@@ -178,7 +181,6 @@ static int atmel_pwm_bl_probe(struct platform_device *pdev)
178 181
179 /* Power up the backlight by default at middle intesity. */ 182 /* Power up the backlight by default at middle intesity. */
180 bldev->props.power = FB_BLANK_UNBLANK; 183 bldev->props.power = FB_BLANK_UNBLANK;
181 bldev->props.max_brightness = pdata->pwm_duty_max - pdata->pwm_duty_min;
182 bldev->props.brightness = bldev->props.max_brightness / 2; 184 bldev->props.brightness = bldev->props.max_brightness / 2;
183 185
184 retval = atmel_pwm_bl_init_pwm(pwmbl); 186 retval = atmel_pwm_bl_init_pwm(pwmbl);
diff --git a/drivers/video/backlight/backlight.c b/drivers/video/backlight/backlight.c
index 18829cf68b1b..68bb838b9f11 100644
--- a/drivers/video/backlight/backlight.c
+++ b/drivers/video/backlight/backlight.c
@@ -38,7 +38,7 @@ static int fb_notifier_callback(struct notifier_block *self,
38 mutex_lock(&bd->ops_lock); 38 mutex_lock(&bd->ops_lock);
39 if (bd->ops) 39 if (bd->ops)
40 if (!bd->ops->check_fb || 40 if (!bd->ops->check_fb ||
41 bd->ops->check_fb(evdata->info)) { 41 bd->ops->check_fb(bd, evdata->info)) {
42 bd->props.fb_blank = *(int *)evdata->data; 42 bd->props.fb_blank = *(int *)evdata->data;
43 if (bd->props.fb_blank == FB_BLANK_UNBLANK) 43 if (bd->props.fb_blank == FB_BLANK_UNBLANK)
44 bd->props.state &= ~BL_CORE_FBBLANK; 44 bd->props.state &= ~BL_CORE_FBBLANK;
@@ -269,7 +269,8 @@ EXPORT_SYMBOL(backlight_force_update);
269 * ERR_PTR() or a pointer to the newly allocated device. 269 * ERR_PTR() or a pointer to the newly allocated device.
270 */ 270 */
271struct backlight_device *backlight_device_register(const char *name, 271struct backlight_device *backlight_device_register(const char *name,
272 struct device *parent, void *devdata, const struct backlight_ops *ops) 272 struct device *parent, void *devdata, const struct backlight_ops *ops,
273 const struct backlight_properties *props)
273{ 274{
274 struct backlight_device *new_bd; 275 struct backlight_device *new_bd;
275 int rc; 276 int rc;
@@ -289,6 +290,11 @@ struct backlight_device *backlight_device_register(const char *name,
289 dev_set_name(&new_bd->dev, name); 290 dev_set_name(&new_bd->dev, name);
290 dev_set_drvdata(&new_bd->dev, devdata); 291 dev_set_drvdata(&new_bd->dev, devdata);
291 292
293 /* Set default properties */
294 if (props)
295 memcpy(&new_bd->props, props,
296 sizeof(struct backlight_properties));
297
292 rc = device_register(&new_bd->dev); 298 rc = device_register(&new_bd->dev);
293 if (rc) { 299 if (rc) {
294 kfree(new_bd); 300 kfree(new_bd);
diff --git a/drivers/video/backlight/corgi_lcd.c b/drivers/video/backlight/corgi_lcd.c
index b4bcf8043797..73bdd8454c94 100644
--- a/drivers/video/backlight/corgi_lcd.c
+++ b/drivers/video/backlight/corgi_lcd.c
@@ -533,6 +533,7 @@ err_free_backlight_on:
533 533
534static int __devinit corgi_lcd_probe(struct spi_device *spi) 534static int __devinit corgi_lcd_probe(struct spi_device *spi)
535{ 535{
536 struct backlight_properties props;
536 struct corgi_lcd_platform_data *pdata = spi->dev.platform_data; 537 struct corgi_lcd_platform_data *pdata = spi->dev.platform_data;
537 struct corgi_lcd *lcd; 538 struct corgi_lcd *lcd;
538 int ret = 0; 539 int ret = 0;
@@ -559,13 +560,14 @@ static int __devinit corgi_lcd_probe(struct spi_device *spi)
559 lcd->power = FB_BLANK_POWERDOWN; 560 lcd->power = FB_BLANK_POWERDOWN;
560 lcd->mode = (pdata) ? pdata->init_mode : CORGI_LCD_MODE_VGA; 561 lcd->mode = (pdata) ? pdata->init_mode : CORGI_LCD_MODE_VGA;
561 562
562 lcd->bl_dev = backlight_device_register("corgi_bl", &spi->dev, 563 memset(&props, 0, sizeof(struct backlight_properties));
563 lcd, &corgi_bl_ops); 564 props.max_brightness = pdata->max_intensity;
565 lcd->bl_dev = backlight_device_register("corgi_bl", &spi->dev, lcd,
566 &corgi_bl_ops, &props);
564 if (IS_ERR(lcd->bl_dev)) { 567 if (IS_ERR(lcd->bl_dev)) {
565 ret = PTR_ERR(lcd->bl_dev); 568 ret = PTR_ERR(lcd->bl_dev);
566 goto err_unregister_lcd; 569 goto err_unregister_lcd;
567 } 570 }
568 lcd->bl_dev->props.max_brightness = pdata->max_intensity;
569 lcd->bl_dev->props.brightness = pdata->default_intensity; 571 lcd->bl_dev->props.brightness = pdata->default_intensity;
570 lcd->bl_dev->props.power = FB_BLANK_UNBLANK; 572 lcd->bl_dev->props.power = FB_BLANK_UNBLANK;
571 573
diff --git a/drivers/video/backlight/cr_bllcd.c b/drivers/video/backlight/cr_bllcd.c
index da86db4374a0..1cce6031bff2 100644
--- a/drivers/video/backlight/cr_bllcd.c
+++ b/drivers/video/backlight/cr_bllcd.c
@@ -170,6 +170,7 @@ static struct lcd_ops cr_lcd_ops = {
170 170
171static int cr_backlight_probe(struct platform_device *pdev) 171static int cr_backlight_probe(struct platform_device *pdev)
172{ 172{
173 struct backlight_properties props;
173 struct backlight_device *bdp; 174 struct backlight_device *bdp;
174 struct lcd_device *ldp; 175 struct lcd_device *ldp;
175 struct cr_panel *crp; 176 struct cr_panel *crp;
@@ -190,8 +191,9 @@ static int cr_backlight_probe(struct platform_device *pdev)
190 return -ENODEV; 191 return -ENODEV;
191 } 192 }
192 193
193 bdp = backlight_device_register("cr-backlight", 194 memset(&props, 0, sizeof(struct backlight_properties));
194 &pdev->dev, NULL, &cr_backlight_ops); 195 bdp = backlight_device_register("cr-backlight", &pdev->dev, NULL,
196 &cr_backlight_ops, &props);
195 if (IS_ERR(bdp)) { 197 if (IS_ERR(bdp)) {
196 pci_dev_put(lpc_dev); 198 pci_dev_put(lpc_dev);
197 return PTR_ERR(bdp); 199 return PTR_ERR(bdp);
@@ -220,9 +222,7 @@ static int cr_backlight_probe(struct platform_device *pdev)
220 crp->cr_lcd_device = ldp; 222 crp->cr_lcd_device = ldp;
221 crp->cr_backlight_device->props.power = FB_BLANK_UNBLANK; 223 crp->cr_backlight_device->props.power = FB_BLANK_UNBLANK;
222 crp->cr_backlight_device->props.brightness = 0; 224 crp->cr_backlight_device->props.brightness = 0;
223 crp->cr_backlight_device->props.max_brightness = 0;
224 cr_backlight_set_intensity(crp->cr_backlight_device); 225 cr_backlight_set_intensity(crp->cr_backlight_device);
225
226 cr_lcd_set_power(crp->cr_lcd_device, FB_BLANK_UNBLANK); 226 cr_lcd_set_power(crp->cr_lcd_device, FB_BLANK_UNBLANK);
227 227
228 platform_set_drvdata(pdev, crp); 228 platform_set_drvdata(pdev, crp);
diff --git a/drivers/video/backlight/da903x_bl.c b/drivers/video/backlight/da903x_bl.c
index 74cdc640173d..686e4a789238 100644
--- a/drivers/video/backlight/da903x_bl.c
+++ b/drivers/video/backlight/da903x_bl.c
@@ -105,6 +105,7 @@ static int da903x_backlight_probe(struct platform_device *pdev)
105 struct da9034_backlight_pdata *pdata = pdev->dev.platform_data; 105 struct da9034_backlight_pdata *pdata = pdev->dev.platform_data;
106 struct da903x_backlight_data *data; 106 struct da903x_backlight_data *data;
107 struct backlight_device *bl; 107 struct backlight_device *bl;
108 struct backlight_properties props;
108 int max_brightness; 109 int max_brightness;
109 110
110 data = kzalloc(sizeof(*data), GFP_KERNEL); 111 data = kzalloc(sizeof(*data), GFP_KERNEL);
@@ -134,15 +135,15 @@ static int da903x_backlight_probe(struct platform_device *pdev)
134 da903x_write(data->da903x_dev, DA9034_WLED_CONTROL2, 135 da903x_write(data->da903x_dev, DA9034_WLED_CONTROL2,
135 DA9034_WLED_ISET(pdata->output_current)); 136 DA9034_WLED_ISET(pdata->output_current));
136 137
137 bl = backlight_device_register(pdev->name, data->da903x_dev, 138 props.max_brightness = max_brightness;
138 data, &da903x_backlight_ops); 139 bl = backlight_device_register(pdev->name, data->da903x_dev, data,
140 &da903x_backlight_ops, &props);
139 if (IS_ERR(bl)) { 141 if (IS_ERR(bl)) {
140 dev_err(&pdev->dev, "failed to register backlight\n"); 142 dev_err(&pdev->dev, "failed to register backlight\n");
141 kfree(data); 143 kfree(data);
142 return PTR_ERR(bl); 144 return PTR_ERR(bl);
143 } 145 }
144 146
145 bl->props.max_brightness = max_brightness;
146 bl->props.brightness = max_brightness; 147 bl->props.brightness = max_brightness;
147 148
148 platform_set_drvdata(pdev, bl); 149 platform_set_drvdata(pdev, bl);
diff --git a/drivers/video/backlight/generic_bl.c b/drivers/video/backlight/generic_bl.c
index e6d348e63596..312ca619735d 100644
--- a/drivers/video/backlight/generic_bl.c
+++ b/drivers/video/backlight/generic_bl.c
@@ -78,6 +78,7 @@ static const struct backlight_ops genericbl_ops = {
78 78
79static int genericbl_probe(struct platform_device *pdev) 79static int genericbl_probe(struct platform_device *pdev)
80{ 80{
81 struct backlight_properties props;
81 struct generic_bl_info *machinfo = pdev->dev.platform_data; 82 struct generic_bl_info *machinfo = pdev->dev.platform_data;
82 const char *name = "generic-bl"; 83 const char *name = "generic-bl";
83 struct backlight_device *bd; 84 struct backlight_device *bd;
@@ -89,14 +90,15 @@ static int genericbl_probe(struct platform_device *pdev)
89 if (machinfo->name) 90 if (machinfo->name)
90 name = machinfo->name; 91 name = machinfo->name;
91 92
92 bd = backlight_device_register (name, 93 memset(&props, 0, sizeof(struct backlight_properties));
93 &pdev->dev, NULL, &genericbl_ops); 94 props.max_brightness = machinfo->max_intensity;
95 bd = backlight_device_register(name, &pdev->dev, NULL, &genericbl_ops,
96 &props);
94 if (IS_ERR (bd)) 97 if (IS_ERR (bd))
95 return PTR_ERR (bd); 98 return PTR_ERR (bd);
96 99
97 platform_set_drvdata(pdev, bd); 100 platform_set_drvdata(pdev, bd);
98 101
99 bd->props.max_brightness = machinfo->max_intensity;
100 bd->props.power = FB_BLANK_UNBLANK; 102 bd->props.power = FB_BLANK_UNBLANK;
101 bd->props.brightness = machinfo->default_intensity; 103 bd->props.brightness = machinfo->default_intensity;
102 backlight_update_status(bd); 104 backlight_update_status(bd);
diff --git a/drivers/video/backlight/hp680_bl.c b/drivers/video/backlight/hp680_bl.c
index f7cc528d5be7..267d23f8d645 100644
--- a/drivers/video/backlight/hp680_bl.c
+++ b/drivers/video/backlight/hp680_bl.c
@@ -105,16 +105,18 @@ static const struct backlight_ops hp680bl_ops = {
105 105
106static int __devinit hp680bl_probe(struct platform_device *pdev) 106static int __devinit hp680bl_probe(struct platform_device *pdev)
107{ 107{
108 struct backlight_properties props;
108 struct backlight_device *bd; 109 struct backlight_device *bd;
109 110
110 bd = backlight_device_register ("hp680-bl", &pdev->dev, NULL, 111 memset(&props, 0, sizeof(struct backlight_properties));
111 &hp680bl_ops); 112 props.max_brightness = HP680_MAX_INTENSITY;
113 bd = backlight_device_register("hp680-bl", &pdev->dev, NULL,
114 &hp680bl_ops, &props);
112 if (IS_ERR(bd)) 115 if (IS_ERR(bd))
113 return PTR_ERR(bd); 116 return PTR_ERR(bd);
114 117
115 platform_set_drvdata(pdev, bd); 118 platform_set_drvdata(pdev, bd);
116 119
117 bd->props.max_brightness = HP680_MAX_INTENSITY;
118 bd->props.brightness = HP680_DEFAULT_INTENSITY; 120 bd->props.brightness = HP680_DEFAULT_INTENSITY;
119 hp680bl_send_intensity(bd); 121 hp680bl_send_intensity(bd);
120 122
diff --git a/drivers/video/backlight/jornada720_bl.c b/drivers/video/backlight/jornada720_bl.c
index db9071fc5665..2f177b3a4885 100644
--- a/drivers/video/backlight/jornada720_bl.c
+++ b/drivers/video/backlight/jornada720_bl.c
@@ -101,10 +101,14 @@ static const struct backlight_ops jornada_bl_ops = {
101 101
102static int jornada_bl_probe(struct platform_device *pdev) 102static int jornada_bl_probe(struct platform_device *pdev)
103{ 103{
104 struct backlight_properties props;
104 int ret; 105 int ret;
105 struct backlight_device *bd; 106 struct backlight_device *bd;
106 107
107 bd = backlight_device_register(S1D_DEVICENAME, &pdev->dev, NULL, &jornada_bl_ops); 108 memset(&props, 0, sizeof(struct backlight_properties));
109 props.max_brightness = BL_MAX_BRIGHT;
110 bd = backlight_device_register(S1D_DEVICENAME, &pdev->dev, NULL,
111 &jornada_bl_ops, &props);
108 112
109 if (IS_ERR(bd)) { 113 if (IS_ERR(bd)) {
110 ret = PTR_ERR(bd); 114 ret = PTR_ERR(bd);
@@ -117,7 +121,6 @@ static int jornada_bl_probe(struct platform_device *pdev)
117 /* note. make sure max brightness is set otherwise 121 /* note. make sure max brightness is set otherwise
118 you will get seemingly non-related errors when 122 you will get seemingly non-related errors when
119 trying to change brightness */ 123 trying to change brightness */
120 bd->props.max_brightness = BL_MAX_BRIGHT;
121 jornada_bl_update_status(bd); 124 jornada_bl_update_status(bd);
122 125
123 platform_set_drvdata(pdev, bd); 126 platform_set_drvdata(pdev, bd);
diff --git a/drivers/video/backlight/kb3886_bl.c b/drivers/video/backlight/kb3886_bl.c
index 939e7b830cf3..f439a8632287 100644
--- a/drivers/video/backlight/kb3886_bl.c
+++ b/drivers/video/backlight/kb3886_bl.c
@@ -141,20 +141,24 @@ static const struct backlight_ops kb3886bl_ops = {
141 141
142static int kb3886bl_probe(struct platform_device *pdev) 142static int kb3886bl_probe(struct platform_device *pdev)
143{ 143{
144 struct backlight_properties props;
144 struct kb3886bl_machinfo *machinfo = pdev->dev.platform_data; 145 struct kb3886bl_machinfo *machinfo = pdev->dev.platform_data;
145 146
146 bl_machinfo = machinfo; 147 bl_machinfo = machinfo;
147 if (!machinfo->limit_mask) 148 if (!machinfo->limit_mask)
148 machinfo->limit_mask = -1; 149 machinfo->limit_mask = -1;
149 150
151 memset(&props, 0, sizeof(struct backlight_properties));
152 props.max_brightness = machinfo->max_intensity;
150 kb3886_backlight_device = backlight_device_register("kb3886-bl", 153 kb3886_backlight_device = backlight_device_register("kb3886-bl",
151 &pdev->dev, NULL, &kb3886bl_ops); 154 &pdev->dev, NULL,
155 &kb3886bl_ops,
156 &props);
152 if (IS_ERR(kb3886_backlight_device)) 157 if (IS_ERR(kb3886_backlight_device))
153 return PTR_ERR(kb3886_backlight_device); 158 return PTR_ERR(kb3886_backlight_device);
154 159
155 platform_set_drvdata(pdev, kb3886_backlight_device); 160 platform_set_drvdata(pdev, kb3886_backlight_device);
156 161
157 kb3886_backlight_device->props.max_brightness = machinfo->max_intensity;
158 kb3886_backlight_device->props.power = FB_BLANK_UNBLANK; 162 kb3886_backlight_device->props.power = FB_BLANK_UNBLANK;
159 kb3886_backlight_device->props.brightness = machinfo->default_intensity; 163 kb3886_backlight_device->props.brightness = machinfo->default_intensity;
160 backlight_update_status(kb3886_backlight_device); 164 backlight_update_status(kb3886_backlight_device);
diff --git a/drivers/video/backlight/l4f00242t03.c b/drivers/video/backlight/l4f00242t03.c
new file mode 100644
index 000000000000..74abd6994b09
--- /dev/null
+++ b/drivers/video/backlight/l4f00242t03.c
@@ -0,0 +1,257 @@
1/*
2 * l4f00242t03.c -- support for Epson L4F00242T03 LCD
3 *
4 * Copyright 2007-2009 Freescale Semiconductor, Inc. All Rights Reserved.
5 *
6 * Copyright (c) 2009 Alberto Panizzo <maramaopercheseimorto@gmail.com>
7 * Inspired by Marek Vasut work in l4f00242t03.c
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License version 2 as
11 * published by the Free Software Foundation.
12 */
13
14#include <linux/device.h>
15#include <linux/kernel.h>
16#include <linux/delay.h>
17#include <linux/gpio.h>
18#include <linux/lcd.h>
19#include <linux/regulator/consumer.h>
20
21#include <linux/spi/spi.h>
22#include <linux/spi/l4f00242t03.h>
23
24struct l4f00242t03_priv {
25 struct spi_device *spi;
26 struct lcd_device *ld;
27 int lcd_on:1;
28 struct regulator *io_reg;
29 struct regulator *core_reg;
30};
31
32
33static void l4f00242t03_reset(unsigned int gpio)
34{
35 pr_debug("l4f00242t03_reset.\n");
36 gpio_set_value(gpio, 1);
37 mdelay(100);
38 gpio_set_value(gpio, 0);
39 mdelay(10); /* tRES >= 100us */
40 gpio_set_value(gpio, 1);
41 mdelay(20);
42}
43
44#define param(x) ((x) | 0x100)
45
46static void l4f00242t03_lcd_init(struct spi_device *spi)
47{
48 struct l4f00242t03_pdata *pdata = spi->dev.platform_data;
49 struct l4f00242t03_priv *priv = dev_get_drvdata(&spi->dev);
50 const u16 cmd[] = { 0x36, param(0), 0x3A, param(0x60) };
51
52 dev_dbg(&spi->dev, "initializing LCD\n");
53
54 if (priv->io_reg) {
55 regulator_set_voltage(priv->io_reg, 1800000, 1800000);
56 regulator_enable(priv->io_reg);
57 }
58
59 if (priv->core_reg) {
60 regulator_set_voltage(priv->core_reg, 2800000, 2800000);
61 regulator_enable(priv->core_reg);
62 }
63
64 gpio_set_value(pdata->data_enable_gpio, 1);
65 msleep(60);
66 spi_write(spi, (const u8 *)cmd, ARRAY_SIZE(cmd) * sizeof(u16));
67}
68
69static int l4f00242t03_lcd_power_set(struct lcd_device *ld, int power)
70{
71 struct l4f00242t03_priv *priv = lcd_get_data(ld);
72 struct spi_device *spi = priv->spi;
73
74 const u16 slpout = 0x11;
75 const u16 dison = 0x29;
76
77 const u16 slpin = 0x10;
78 const u16 disoff = 0x28;
79
80 if (power) {
81 if (priv->lcd_on)
82 return 0;
83
84 dev_dbg(&spi->dev, "turning on LCD\n");
85
86 spi_write(spi, (const u8 *)&slpout, sizeof(u16));
87 msleep(60);
88 spi_write(spi, (const u8 *)&dison, sizeof(u16));
89
90 priv->lcd_on = 1;
91 } else {
92 if (!priv->lcd_on)
93 return 0;
94
95 dev_dbg(&spi->dev, "turning off LCD\n");
96
97 spi_write(spi, (const u8 *)&disoff, sizeof(u16));
98 msleep(60);
99 spi_write(spi, (const u8 *)&slpin, sizeof(u16));
100
101 priv->lcd_on = 0;
102 }
103
104 return 0;
105}
106
107static struct lcd_ops l4f_ops = {
108 .set_power = l4f00242t03_lcd_power_set,
109 .get_power = NULL,
110};
111
112static int __devinit l4f00242t03_probe(struct spi_device *spi)
113{
114 struct l4f00242t03_priv *priv;
115 struct l4f00242t03_pdata *pdata = spi->dev.platform_data;
116 int ret;
117
118 if (pdata == NULL) {
119 dev_err(&spi->dev, "Uninitialized platform data.\n");
120 return -EINVAL;
121 }
122
123 priv = kzalloc(sizeof(struct l4f00242t03_priv), GFP_KERNEL);
124
125 if (priv == NULL) {
126 dev_err(&spi->dev, "No memory for this device.\n");
127 ret = -ENOMEM;
128 goto err;
129 }
130
131 dev_set_drvdata(&spi->dev, priv);
132 spi->bits_per_word = 9;
133 spi_setup(spi);
134
135 priv->spi = spi;
136
137 ret = gpio_request(pdata->reset_gpio, "lcd l4f00242t03 reset");
138 if (ret) {
139 dev_err(&spi->dev,
140 "Unable to get the lcd l4f00242t03 reset gpio.\n");
141 return ret;
142 }
143
144 ret = gpio_direction_output(pdata->reset_gpio, 1);
145 if (ret)
146 goto err2;
147
148 ret = gpio_request(pdata->data_enable_gpio,
149 "lcd l4f00242t03 data enable");
150 if (ret) {
151 dev_err(&spi->dev,
152 "Unable to get the lcd l4f00242t03 data en gpio.\n");
153 return ret;
154 }
155
156 ret = gpio_direction_output(pdata->data_enable_gpio, 0);
157 if (ret)
158 goto err3;
159
160 if (pdata->io_supply) {
161 priv->io_reg = regulator_get(NULL, pdata->io_supply);
162
163 if (IS_ERR(priv->io_reg)) {
164 pr_err("%s: Unable to get the IO regulator\n",
165 __func__);
166 goto err3;
167 }
168 }
169
170 if (pdata->core_supply) {
171 priv->core_reg = regulator_get(NULL, pdata->core_supply);
172
173 if (IS_ERR(priv->core_reg)) {
174 pr_err("%s: Unable to get the core regulator\n",
175 __func__);
176 goto err4;
177 }
178 }
179
180 priv->ld = lcd_device_register("l4f00242t03",
181 &spi->dev, priv, &l4f_ops);
182 if (IS_ERR(priv->ld)) {
183 ret = PTR_ERR(priv->ld);
184 goto err5;
185 }
186
187 /* Init the LCD */
188 l4f00242t03_reset(pdata->reset_gpio);
189 l4f00242t03_lcd_init(spi);
190 l4f00242t03_lcd_power_set(priv->ld, 1);
191
192 dev_info(&spi->dev, "Epson l4f00242t03 lcd probed.\n");
193
194 return 0;
195
196err5:
197 if (priv->core_reg)
198 regulator_put(priv->core_reg);
199err4:
200 if (priv->io_reg)
201 regulator_put(priv->io_reg);
202err3:
203 gpio_free(pdata->data_enable_gpio);
204err2:
205 gpio_free(pdata->reset_gpio);
206err:
207 kfree(priv);
208
209 return ret;
210}
211
212static int __devexit l4f00242t03_remove(struct spi_device *spi)
213{
214 struct l4f00242t03_priv *priv = dev_get_drvdata(&spi->dev);
215 struct l4f00242t03_pdata *pdata = priv->spi->dev.platform_data;
216
217 l4f00242t03_lcd_power_set(priv->ld, 0);
218 lcd_device_unregister(priv->ld);
219
220 gpio_free(pdata->data_enable_gpio);
221 gpio_free(pdata->reset_gpio);
222
223 if (priv->io_reg)
224 regulator_put(priv->core_reg);
225 if (priv->core_reg)
226 regulator_put(priv->io_reg);
227
228 kfree(priv);
229
230 return 0;
231}
232
233static struct spi_driver l4f00242t03_driver = {
234 .driver = {
235 .name = "l4f00242t03",
236 .owner = THIS_MODULE,
237 },
238 .probe = l4f00242t03_probe,
239 .remove = __devexit_p(l4f00242t03_remove),
240};
241
242static __init int l4f00242t03_init(void)
243{
244 return spi_register_driver(&l4f00242t03_driver);
245}
246
247static __exit void l4f00242t03_exit(void)
248{
249 spi_unregister_driver(&l4f00242t03_driver);
250}
251
252module_init(l4f00242t03_init);
253module_exit(l4f00242t03_exit);
254
255MODULE_AUTHOR("Alberto Panizzo <maramaopercheseimorto@gmail.com>");
256MODULE_DESCRIPTION("EPSON L4F00242T03 LCD");
257MODULE_LICENSE("GPL v2");
diff --git a/drivers/video/backlight/locomolcd.c b/drivers/video/backlight/locomolcd.c
index 00a9591b0003..7571bc26071e 100644
--- a/drivers/video/backlight/locomolcd.c
+++ b/drivers/video/backlight/locomolcd.c
@@ -167,6 +167,7 @@ static int locomolcd_resume(struct locomo_dev *dev)
167 167
168static int locomolcd_probe(struct locomo_dev *ldev) 168static int locomolcd_probe(struct locomo_dev *ldev)
169{ 169{
170 struct backlight_properties props;
170 unsigned long flags; 171 unsigned long flags;
171 172
172 local_irq_save(flags); 173 local_irq_save(flags);
@@ -182,13 +183,16 @@ static int locomolcd_probe(struct locomo_dev *ldev)
182 183
183 local_irq_restore(flags); 184 local_irq_restore(flags);
184 185
185 locomolcd_bl_device = backlight_device_register("locomo-bl", &ldev->dev, NULL, &locomobl_data); 186 memset(&props, 0, sizeof(struct backlight_properties));
187 props.max_brightness = 4;
188 locomolcd_bl_device = backlight_device_register("locomo-bl",
189 &ldev->dev, NULL,
190 &locomobl_data, &props);
186 191
187 if (IS_ERR (locomolcd_bl_device)) 192 if (IS_ERR (locomolcd_bl_device))
188 return PTR_ERR (locomolcd_bl_device); 193 return PTR_ERR (locomolcd_bl_device);
189 194
190 /* Set up frontlight so that screen is readable */ 195 /* Set up frontlight so that screen is readable */
191 locomolcd_bl_device->props.max_brightness = 4,
192 locomolcd_bl_device->props.brightness = 2; 196 locomolcd_bl_device->props.brightness = 2;
193 locomolcd_set_intensity(locomolcd_bl_device); 197 locomolcd_set_intensity(locomolcd_bl_device);
194 198
diff --git a/drivers/video/backlight/max8925_bl.c b/drivers/video/backlight/max8925_bl.c
index c267069a52a3..c91adaf492cf 100644
--- a/drivers/video/backlight/max8925_bl.c
+++ b/drivers/video/backlight/max8925_bl.c
@@ -104,6 +104,7 @@ static int __devinit max8925_backlight_probe(struct platform_device *pdev)
104 struct max8925_backlight_pdata *pdata = NULL; 104 struct max8925_backlight_pdata *pdata = NULL;
105 struct max8925_backlight_data *data; 105 struct max8925_backlight_data *data;
106 struct backlight_device *bl; 106 struct backlight_device *bl;
107 struct backlight_properties props;
107 struct resource *res; 108 struct resource *res;
108 char name[MAX8925_NAME_SIZE]; 109 char name[MAX8925_NAME_SIZE];
109 unsigned char value; 110 unsigned char value;
@@ -133,14 +134,15 @@ static int __devinit max8925_backlight_probe(struct platform_device *pdev)
133 data->chip = chip; 134 data->chip = chip;
134 data->current_brightness = 0; 135 data->current_brightness = 0;
135 136
137 memset(&props, 0, sizeof(struct backlight_properties));
138 props.max_brightness = MAX_BRIGHTNESS;
136 bl = backlight_device_register(name, &pdev->dev, data, 139 bl = backlight_device_register(name, &pdev->dev, data,
137 &max8925_backlight_ops); 140 &max8925_backlight_ops, &props);
138 if (IS_ERR(bl)) { 141 if (IS_ERR(bl)) {
139 dev_err(&pdev->dev, "failed to register backlight\n"); 142 dev_err(&pdev->dev, "failed to register backlight\n");
140 kfree(data); 143 kfree(data);
141 return PTR_ERR(bl); 144 return PTR_ERR(bl);
142 } 145 }
143 bl->props.max_brightness = MAX_BRIGHTNESS;
144 bl->props.brightness = MAX_BRIGHTNESS; 146 bl->props.brightness = MAX_BRIGHTNESS;
145 147
146 platform_set_drvdata(pdev, bl); 148 platform_set_drvdata(pdev, bl);
diff --git a/drivers/video/backlight/mbp_nvidia_bl.c b/drivers/video/backlight/mbp_nvidia_bl.c
index 2e78b0784bdc..1b5d3fe6bbbc 100644
--- a/drivers/video/backlight/mbp_nvidia_bl.c
+++ b/drivers/video/backlight/mbp_nvidia_bl.c
@@ -139,6 +139,51 @@ static int mbp_dmi_match(const struct dmi_system_id *id)
139static const struct dmi_system_id __initdata mbp_device_table[] = { 139static const struct dmi_system_id __initdata mbp_device_table[] = {
140 { 140 {
141 .callback = mbp_dmi_match, 141 .callback = mbp_dmi_match,
142 .ident = "MacBook 1,1",
143 .matches = {
144 DMI_MATCH(DMI_SYS_VENDOR, "Apple Inc."),
145 DMI_MATCH(DMI_PRODUCT_NAME, "MacBook1,1"),
146 },
147 .driver_data = (void *)&intel_chipset_data,
148 },
149 {
150 .callback = mbp_dmi_match,
151 .ident = "MacBook 2,1",
152 .matches = {
153 DMI_MATCH(DMI_SYS_VENDOR, "Apple Inc."),
154 DMI_MATCH(DMI_PRODUCT_NAME, "MacBook2,1"),
155 },
156 .driver_data = (void *)&intel_chipset_data,
157 },
158 {
159 .callback = mbp_dmi_match,
160 .ident = "MacBook 3,1",
161 .matches = {
162 DMI_MATCH(DMI_SYS_VENDOR, "Apple Inc."),
163 DMI_MATCH(DMI_PRODUCT_NAME, "MacBook3,1"),
164 },
165 .driver_data = (void *)&intel_chipset_data,
166 },
167 {
168 .callback = mbp_dmi_match,
169 .ident = "MacBook 4,1",
170 .matches = {
171 DMI_MATCH(DMI_SYS_VENDOR, "Apple Inc."),
172 DMI_MATCH(DMI_PRODUCT_NAME, "MacBook4,1"),
173 },
174 .driver_data = (void *)&intel_chipset_data,
175 },
176 {
177 .callback = mbp_dmi_match,
178 .ident = "MacBook 4,2",
179 .matches = {
180 DMI_MATCH(DMI_SYS_VENDOR, "Apple Inc."),
181 DMI_MATCH(DMI_PRODUCT_NAME, "MacBook4,2"),
182 },
183 .driver_data = (void *)&intel_chipset_data,
184 },
185 {
186 .callback = mbp_dmi_match,
142 .ident = "MacBookPro 3,1", 187 .ident = "MacBookPro 3,1",
143 .matches = { 188 .matches = {
144 DMI_MATCH(DMI_SYS_VENDOR, "Apple Inc."), 189 DMI_MATCH(DMI_SYS_VENDOR, "Apple Inc."),
@@ -250,6 +295,7 @@ static const struct dmi_system_id __initdata mbp_device_table[] = {
250 295
251static int __init mbp_init(void) 296static int __init mbp_init(void)
252{ 297{
298 struct backlight_properties props;
253 if (!dmi_check_system(mbp_device_table)) 299 if (!dmi_check_system(mbp_device_table))
254 return -ENODEV; 300 return -ENODEV;
255 301
@@ -257,14 +303,17 @@ static int __init mbp_init(void)
257 "Macbook Pro backlight")) 303 "Macbook Pro backlight"))
258 return -ENXIO; 304 return -ENXIO;
259 305
260 mbp_backlight_device = backlight_device_register("mbp_backlight", 306 memset(&props, 0, sizeof(struct backlight_properties));
261 NULL, NULL, &driver_data->backlight_ops); 307 props.max_brightness = 15;
308 mbp_backlight_device = backlight_device_register("mbp_backlight", NULL,
309 NULL,
310 &driver_data->backlight_ops,
311 &props);
262 if (IS_ERR(mbp_backlight_device)) { 312 if (IS_ERR(mbp_backlight_device)) {
263 release_region(driver_data->iostart, driver_data->iolen); 313 release_region(driver_data->iostart, driver_data->iolen);
264 return PTR_ERR(mbp_backlight_device); 314 return PTR_ERR(mbp_backlight_device);
265 } 315 }
266 316
267 mbp_backlight_device->props.max_brightness = 15;
268 mbp_backlight_device->props.brightness = 317 mbp_backlight_device->props.brightness =
269 driver_data->backlight_ops.get_brightness(mbp_backlight_device); 318 driver_data->backlight_ops.get_brightness(mbp_backlight_device);
270 backlight_update_status(mbp_backlight_device); 319 backlight_update_status(mbp_backlight_device);
diff --git a/drivers/video/backlight/omap1_bl.c b/drivers/video/backlight/omap1_bl.c
index a3a7f8938175..333d28e6b062 100644
--- a/drivers/video/backlight/omap1_bl.c
+++ b/drivers/video/backlight/omap1_bl.c
@@ -132,6 +132,7 @@ static const struct backlight_ops omapbl_ops = {
132 132
133static int omapbl_probe(struct platform_device *pdev) 133static int omapbl_probe(struct platform_device *pdev)
134{ 134{
135 struct backlight_properties props;
135 struct backlight_device *dev; 136 struct backlight_device *dev;
136 struct omap_backlight *bl; 137 struct omap_backlight *bl;
137 struct omap_backlight_config *pdata = pdev->dev.platform_data; 138 struct omap_backlight_config *pdata = pdev->dev.platform_data;
@@ -143,7 +144,10 @@ static int omapbl_probe(struct platform_device *pdev)
143 if (unlikely(!bl)) 144 if (unlikely(!bl))
144 return -ENOMEM; 145 return -ENOMEM;
145 146
146 dev = backlight_device_register("omap-bl", &pdev->dev, bl, &omapbl_ops); 147 memset(&props, 0, sizeof(struct backlight_properties));
148 props.max_brightness = OMAPBL_MAX_INTENSITY;
149 dev = backlight_device_register("omap-bl", &pdev->dev, bl, &omapbl_ops,
150 &props);
147 if (IS_ERR(dev)) { 151 if (IS_ERR(dev)) {
148 kfree(bl); 152 kfree(bl);
149 return PTR_ERR(dev); 153 return PTR_ERR(dev);
@@ -160,7 +164,6 @@ static int omapbl_probe(struct platform_device *pdev)
160 omap_cfg_reg(PWL); /* Conflicts with UART3 */ 164 omap_cfg_reg(PWL); /* Conflicts with UART3 */
161 165
162 dev->props.fb_blank = FB_BLANK_UNBLANK; 166 dev->props.fb_blank = FB_BLANK_UNBLANK;
163 dev->props.max_brightness = OMAPBL_MAX_INTENSITY;
164 dev->props.brightness = pdata->default_intensity; 167 dev->props.brightness = pdata->default_intensity;
165 omapbl_update_status(dev); 168 omapbl_update_status(dev);
166 169
diff --git a/drivers/video/backlight/progear_bl.c b/drivers/video/backlight/progear_bl.c
index 075786e05034..809278c90738 100644
--- a/drivers/video/backlight/progear_bl.c
+++ b/drivers/video/backlight/progear_bl.c
@@ -61,8 +61,10 @@ static const struct backlight_ops progearbl_ops = {
61 61
62static int progearbl_probe(struct platform_device *pdev) 62static int progearbl_probe(struct platform_device *pdev)
63{ 63{
64 struct backlight_properties props;
64 u8 temp; 65 u8 temp;
65 struct backlight_device *progear_backlight_device; 66 struct backlight_device *progear_backlight_device;
67 int ret;
66 68
67 pmu_dev = pci_get_device(PCI_VENDOR_ID_AL, PCI_DEVICE_ID_AL_M7101, NULL); 69 pmu_dev = pci_get_device(PCI_VENDOR_ID_AL, PCI_DEVICE_ID_AL_M7101, NULL);
68 if (!pmu_dev) { 70 if (!pmu_dev) {
@@ -73,28 +75,37 @@ static int progearbl_probe(struct platform_device *pdev)
73 sb_dev = pci_get_device(PCI_VENDOR_ID_AL, PCI_DEVICE_ID_AL_M1533, NULL); 75 sb_dev = pci_get_device(PCI_VENDOR_ID_AL, PCI_DEVICE_ID_AL_M1533, NULL);
74 if (!sb_dev) { 76 if (!sb_dev) {
75 printk("ALI 1533 SB not found.\n"); 77 printk("ALI 1533 SB not found.\n");
76 pci_dev_put(pmu_dev); 78 ret = -ENODEV;
77 return -ENODEV; 79 goto put_pmu;
78 } 80 }
79 81
80 /* Set SB_MPS1 to enable brightness control. */ 82 /* Set SB_MPS1 to enable brightness control. */
81 pci_read_config_byte(sb_dev, SB_MPS1, &temp); 83 pci_read_config_byte(sb_dev, SB_MPS1, &temp);
82 pci_write_config_byte(sb_dev, SB_MPS1, temp | 0x20); 84 pci_write_config_byte(sb_dev, SB_MPS1, temp | 0x20);
83 85
86 memset(&props, 0, sizeof(struct backlight_properties));
87 props.max_brightness = HW_LEVEL_MAX - HW_LEVEL_MIN;
84 progear_backlight_device = backlight_device_register("progear-bl", 88 progear_backlight_device = backlight_device_register("progear-bl",
85 &pdev->dev, NULL, 89 &pdev->dev, NULL,
86 &progearbl_ops); 90 &progearbl_ops,
87 if (IS_ERR(progear_backlight_device)) 91 &props);
88 return PTR_ERR(progear_backlight_device); 92 if (IS_ERR(progear_backlight_device)) {
93 ret = PTR_ERR(progear_backlight_device);
94 goto put_sb;
95 }
89 96
90 platform_set_drvdata(pdev, progear_backlight_device); 97 platform_set_drvdata(pdev, progear_backlight_device);
91 98
92 progear_backlight_device->props.power = FB_BLANK_UNBLANK; 99 progear_backlight_device->props.power = FB_BLANK_UNBLANK;
93 progear_backlight_device->props.brightness = HW_LEVEL_MAX - HW_LEVEL_MIN; 100 progear_backlight_device->props.brightness = HW_LEVEL_MAX - HW_LEVEL_MIN;
94 progear_backlight_device->props.max_brightness = HW_LEVEL_MAX - HW_LEVEL_MIN;
95 progearbl_set_intensity(progear_backlight_device); 101 progearbl_set_intensity(progear_backlight_device);
96 102
97 return 0; 103 return 0;
104put_sb:
105 pci_dev_put(sb_dev);
106put_pmu:
107 pci_dev_put(pmu_dev);
108 return ret;
98} 109}
99 110
100static int progearbl_remove(struct platform_device *pdev) 111static int progearbl_remove(struct platform_device *pdev)
diff --git a/drivers/video/backlight/pwm_bl.c b/drivers/video/backlight/pwm_bl.c
index 9d2ec2a1cce8..b89eebc3f77d 100644
--- a/drivers/video/backlight/pwm_bl.c
+++ b/drivers/video/backlight/pwm_bl.c
@@ -65,6 +65,7 @@ static const struct backlight_ops pwm_backlight_ops = {
65 65
66static int pwm_backlight_probe(struct platform_device *pdev) 66static int pwm_backlight_probe(struct platform_device *pdev)
67{ 67{
68 struct backlight_properties props;
68 struct platform_pwm_backlight_data *data = pdev->dev.platform_data; 69 struct platform_pwm_backlight_data *data = pdev->dev.platform_data;
69 struct backlight_device *bl; 70 struct backlight_device *bl;
70 struct pwm_bl_data *pb; 71 struct pwm_bl_data *pb;
@@ -100,15 +101,16 @@ static int pwm_backlight_probe(struct platform_device *pdev)
100 } else 101 } else
101 dev_dbg(&pdev->dev, "got pwm for backlight\n"); 102 dev_dbg(&pdev->dev, "got pwm for backlight\n");
102 103
103 bl = backlight_device_register(dev_name(&pdev->dev), &pdev->dev, 104 memset(&props, 0, sizeof(struct backlight_properties));
104 pb, &pwm_backlight_ops); 105 props.max_brightness = data->max_brightness;
106 bl = backlight_device_register(dev_name(&pdev->dev), &pdev->dev, pb,
107 &pwm_backlight_ops, &props);
105 if (IS_ERR(bl)) { 108 if (IS_ERR(bl)) {
106 dev_err(&pdev->dev, "failed to register backlight\n"); 109 dev_err(&pdev->dev, "failed to register backlight\n");
107 ret = PTR_ERR(bl); 110 ret = PTR_ERR(bl);
108 goto err_bl; 111 goto err_bl;
109 } 112 }
110 113
111 bl->props.max_brightness = data->max_brightness;
112 bl->props.brightness = data->dft_brightness; 114 bl->props.brightness = data->dft_brightness;
113 backlight_update_status(bl); 115 backlight_update_status(bl);
114 116
diff --git a/drivers/video/backlight/tosa_bl.c b/drivers/video/backlight/tosa_bl.c
index e14ce4d469f5..f57bbf170049 100644
--- a/drivers/video/backlight/tosa_bl.c
+++ b/drivers/video/backlight/tosa_bl.c
@@ -80,6 +80,7 @@ static const struct backlight_ops bl_ops = {
80static int __devinit tosa_bl_probe(struct i2c_client *client, 80static int __devinit tosa_bl_probe(struct i2c_client *client,
81 const struct i2c_device_id *id) 81 const struct i2c_device_id *id)
82{ 82{
83 struct backlight_properties props;
83 struct tosa_bl_data *data = kzalloc(sizeof(struct tosa_bl_data), GFP_KERNEL); 84 struct tosa_bl_data *data = kzalloc(sizeof(struct tosa_bl_data), GFP_KERNEL);
84 int ret = 0; 85 int ret = 0;
85 if (!data) 86 if (!data)
@@ -99,15 +100,16 @@ static int __devinit tosa_bl_probe(struct i2c_client *client,
99 i2c_set_clientdata(client, data); 100 i2c_set_clientdata(client, data);
100 data->i2c = client; 101 data->i2c = client;
101 102
102 data->bl = backlight_device_register("tosa-bl", &client->dev, 103 memset(&props, 0, sizeof(struct backlight_properties));
103 data, &bl_ops); 104 props.max_brightness = 512 - 1;
105 data->bl = backlight_device_register("tosa-bl", &client->dev, data,
106 &bl_ops, &props);
104 if (IS_ERR(data->bl)) { 107 if (IS_ERR(data->bl)) {
105 ret = PTR_ERR(data->bl); 108 ret = PTR_ERR(data->bl);
106 goto err_reg; 109 goto err_reg;
107 } 110 }
108 111
109 data->bl->props.brightness = 69; 112 data->bl->props.brightness = 69;
110 data->bl->props.max_brightness = 512 - 1;
111 data->bl->props.power = FB_BLANK_UNBLANK; 113 data->bl->props.power = FB_BLANK_UNBLANK;
112 114
113 backlight_update_status(data->bl); 115 backlight_update_status(data->bl);
diff --git a/drivers/video/backlight/wm831x_bl.c b/drivers/video/backlight/wm831x_bl.c
index e32add37a203..a4312709fb1b 100644
--- a/drivers/video/backlight/wm831x_bl.c
+++ b/drivers/video/backlight/wm831x_bl.c
@@ -125,6 +125,7 @@ static int wm831x_backlight_probe(struct platform_device *pdev)
125 struct wm831x_backlight_pdata *pdata; 125 struct wm831x_backlight_pdata *pdata;
126 struct wm831x_backlight_data *data; 126 struct wm831x_backlight_data *data;
127 struct backlight_device *bl; 127 struct backlight_device *bl;
128 struct backlight_properties props;
128 int ret, i, max_isel, isink_reg, dcdc_cfg; 129 int ret, i, max_isel, isink_reg, dcdc_cfg;
129 130
130 /* We need platform data */ 131 /* We need platform data */
@@ -191,15 +192,15 @@ static int wm831x_backlight_probe(struct platform_device *pdev)
191 data->current_brightness = 0; 192 data->current_brightness = 0;
192 data->isink_reg = isink_reg; 193 data->isink_reg = isink_reg;
193 194
194 bl = backlight_device_register("wm831x", &pdev->dev, 195 props.max_brightness = max_isel;
195 data, &wm831x_backlight_ops); 196 bl = backlight_device_register("wm831x", &pdev->dev, data,
197 &wm831x_backlight_ops, &props);
196 if (IS_ERR(bl)) { 198 if (IS_ERR(bl)) {
197 dev_err(&pdev->dev, "failed to register backlight\n"); 199 dev_err(&pdev->dev, "failed to register backlight\n");
198 kfree(data); 200 kfree(data);
199 return PTR_ERR(bl); 201 return PTR_ERR(bl);
200 } 202 }
201 203
202 bl->props.max_brightness = max_isel;
203 bl->props.brightness = max_isel; 204 bl->props.brightness = max_isel;
204 205
205 platform_set_drvdata(pdev, bl); 206 platform_set_drvdata(pdev, bl);
diff --git a/drivers/video/bf54x-lq043fb.c b/drivers/video/bf54x-lq043fb.c
index 814312a7452f..23b2a8c0dbfc 100644
--- a/drivers/video/bf54x-lq043fb.c
+++ b/drivers/video/bf54x-lq043fb.c
@@ -433,7 +433,7 @@ static int bl_get_brightness(struct backlight_device *bd)
433 return 0; 433 return 0;
434} 434}
435 435
436static struct backlight_ops bfin_lq043fb_bl_ops = { 436static const struct backlight_ops bfin_lq043fb_bl_ops = {
437 .get_brightness = bl_get_brightness, 437 .get_brightness = bl_get_brightness,
438}; 438};
439 439
@@ -501,6 +501,7 @@ static irqreturn_t bfin_bf54x_irq_error(int irq, void *dev_id)
501 501
502static int __devinit bfin_bf54x_probe(struct platform_device *pdev) 502static int __devinit bfin_bf54x_probe(struct platform_device *pdev)
503{ 503{
504 struct backlight_properties props;
504 struct bfin_bf54xfb_info *info; 505 struct bfin_bf54xfb_info *info;
505 struct fb_info *fbinfo; 506 struct fb_info *fbinfo;
506 int ret; 507 int ret;
@@ -645,10 +646,16 @@ static int __devinit bfin_bf54x_probe(struct platform_device *pdev)
645 goto out8; 646 goto out8;
646 } 647 }
647#ifndef NO_BL_SUPPORT 648#ifndef NO_BL_SUPPORT
648 bl_dev = 649 memset(&props, 0, sizeof(struct backlight_properties));
649 backlight_device_register("bf54x-bl", NULL, NULL, 650 props.max_brightness = 255;
650 &bfin_lq043fb_bl_ops); 651 bl_dev = backlight_device_register("bf54x-bl", NULL, NULL,
651 bl_dev->props.max_brightness = 255; 652 &bfin_lq043fb_bl_ops, &props);
653 if (IS_ERR(bl_dev)) {
654 printk(KERN_ERR DRIVER_NAME
655 ": unable to register backlight.\n");
656 ret = -EINVAL;
657 goto out9;
658 }
652 659
653 lcd_dev = lcd_device_register(DRIVER_NAME, &pdev->dev, NULL, &bfin_lcd_ops); 660 lcd_dev = lcd_device_register(DRIVER_NAME, &pdev->dev, NULL, &bfin_lcd_ops);
654 lcd_dev->props.max_contrast = 255, printk(KERN_INFO "Done.\n"); 661 lcd_dev->props.max_contrast = 255, printk(KERN_INFO "Done.\n");
@@ -656,6 +663,8 @@ static int __devinit bfin_bf54x_probe(struct platform_device *pdev)
656 663
657 return 0; 664 return 0;
658 665
666out9:
667 unregister_framebuffer(fbinfo);
659out8: 668out8:
660 free_irq(info->irq, info); 669 free_irq(info->irq, info);
661out7: 670out7:
diff --git a/drivers/video/bfin-t350mcqb-fb.c b/drivers/video/bfin-t350mcqb-fb.c
index 5653d083a983..31a2dec927bb 100644
--- a/drivers/video/bfin-t350mcqb-fb.c
+++ b/drivers/video/bfin-t350mcqb-fb.c
@@ -352,7 +352,7 @@ static int bl_get_brightness(struct backlight_device *bd)
352 return 0; 352 return 0;
353} 353}
354 354
355static struct backlight_ops bfin_lq043fb_bl_ops = { 355static const struct backlight_ops bfin_lq043fb_bl_ops = {
356 .get_brightness = bl_get_brightness, 356 .get_brightness = bl_get_brightness,
357}; 357};
358 358
@@ -419,6 +419,7 @@ static irqreturn_t bfin_t350mcqb_irq_error(int irq, void *dev_id)
419 419
420static int __devinit bfin_t350mcqb_probe(struct platform_device *pdev) 420static int __devinit bfin_t350mcqb_probe(struct platform_device *pdev)
421{ 421{
422 struct backlight_properties props;
422 struct bfin_t350mcqbfb_info *info; 423 struct bfin_t350mcqbfb_info *info;
423 struct fb_info *fbinfo; 424 struct fb_info *fbinfo;
424 int ret; 425 int ret;
@@ -540,10 +541,16 @@ static int __devinit bfin_t350mcqb_probe(struct platform_device *pdev)
540 goto out8; 541 goto out8;
541 } 542 }
542#ifndef NO_BL_SUPPORT 543#ifndef NO_BL_SUPPORT
543 bl_dev = 544 memset(&props, 0, sizeof(struct backlight_properties));
544 backlight_device_register("bf52x-bl", NULL, NULL, 545 props.max_brightness = 255;
545 &bfin_lq043fb_bl_ops); 546 bl_dev = backlight_device_register("bf52x-bl", NULL, NULL,
546 bl_dev->props.max_brightness = 255; 547 &bfin_lq043fb_bl_ops, &props);
548 if (IS_ERR(bl_dev)) {
549 printk(KERN_ERR DRIVER_NAME
550 ": unable to register backlight.\n");
551 ret = -EINVAL;
552 goto out9;
553 }
547 554
548 lcd_dev = lcd_device_register(DRIVER_NAME, NULL, &bfin_lcd_ops); 555 lcd_dev = lcd_device_register(DRIVER_NAME, NULL, &bfin_lcd_ops);
549 lcd_dev->props.max_contrast = 255, printk(KERN_INFO "Done.\n"); 556 lcd_dev->props.max_contrast = 255, printk(KERN_INFO "Done.\n");
@@ -551,6 +558,8 @@ static int __devinit bfin_t350mcqb_probe(struct platform_device *pdev)
551 558
552 return 0; 559 return 0;
553 560
561out9:
562 unregister_framebuffer(fbinfo);
554out8: 563out8:
555 free_irq(info->irq, info); 564 free_irq(info->irq, info);
556out7: 565out7:
diff --git a/drivers/video/nvidia/nv_backlight.c b/drivers/video/nvidia/nv_backlight.c
index 443e3c85a9a0..2fb552a6f32c 100644
--- a/drivers/video/nvidia/nv_backlight.c
+++ b/drivers/video/nvidia/nv_backlight.c
@@ -94,6 +94,7 @@ static struct backlight_ops nvidia_bl_ops = {
94 94
95void nvidia_bl_init(struct nvidia_par *par) 95void nvidia_bl_init(struct nvidia_par *par)
96{ 96{
97 struct backlight_properties props;
97 struct fb_info *info = pci_get_drvdata(par->pci_dev); 98 struct fb_info *info = pci_get_drvdata(par->pci_dev);
98 struct backlight_device *bd; 99 struct backlight_device *bd;
99 char name[12]; 100 char name[12];
@@ -109,7 +110,10 @@ void nvidia_bl_init(struct nvidia_par *par)
109 110
110 snprintf(name, sizeof(name), "nvidiabl%d", info->node); 111 snprintf(name, sizeof(name), "nvidiabl%d", info->node);
111 112
112 bd = backlight_device_register(name, info->dev, par, &nvidia_bl_ops); 113 memset(&props, 0, sizeof(struct backlight_properties));
114 props.max_brightness = FB_BACKLIGHT_LEVELS - 1;
115 bd = backlight_device_register(name, info->dev, par, &nvidia_bl_ops,
116 &props);
113 if (IS_ERR(bd)) { 117 if (IS_ERR(bd)) {
114 info->bl_dev = NULL; 118 info->bl_dev = NULL;
115 printk(KERN_WARNING "nvidia: Backlight registration failed\n"); 119 printk(KERN_WARNING "nvidia: Backlight registration failed\n");
@@ -121,7 +125,6 @@ void nvidia_bl_init(struct nvidia_par *par)
121 0x158 * FB_BACKLIGHT_MAX / MAX_LEVEL, 125 0x158 * FB_BACKLIGHT_MAX / MAX_LEVEL,
122 0x534 * FB_BACKLIGHT_MAX / MAX_LEVEL); 126 0x534 * FB_BACKLIGHT_MAX / MAX_LEVEL);
123 127
124 bd->props.max_brightness = FB_BACKLIGHT_LEVELS - 1;
125 bd->props.brightness = bd->props.max_brightness; 128 bd->props.brightness = bd->props.max_brightness;
126 bd->props.power = FB_BLANK_UNBLANK; 129 bd->props.power = FB_BLANK_UNBLANK;
127 backlight_update_status(bd); 130 backlight_update_status(bd);
diff --git a/drivers/video/omap2/displays/panel-taal.c b/drivers/video/omap2/displays/panel-taal.c
index fcd6a61a91eb..59769e85d41c 100644
--- a/drivers/video/omap2/displays/panel-taal.c
+++ b/drivers/video/omap2/displays/panel-taal.c
@@ -486,6 +486,7 @@ static struct attribute_group taal_attr_group = {
486 486
487static int taal_probe(struct omap_dss_device *dssdev) 487static int taal_probe(struct omap_dss_device *dssdev)
488{ 488{
489 struct backlight_properties props;
489 struct taal_data *td; 490 struct taal_data *td;
490 struct backlight_device *bldev; 491 struct backlight_device *bldev;
491 int r; 492 int r;
@@ -520,11 +521,16 @@ static int taal_probe(struct omap_dss_device *dssdev)
520 521
521 /* if no platform set_backlight() defined, presume DSI backlight 522 /* if no platform set_backlight() defined, presume DSI backlight
522 * control */ 523 * control */
524 memset(&props, 0, sizeof(struct backlight_properties));
523 if (!dssdev->set_backlight) 525 if (!dssdev->set_backlight)
524 td->use_dsi_bl = true; 526 td->use_dsi_bl = true;
525 527
528 if (td->use_dsi_bl)
529 props.max_brightness = 255;
530 else
531 props.max_brightness = 127;
526 bldev = backlight_device_register("taal", &dssdev->dev, dssdev, 532 bldev = backlight_device_register("taal", &dssdev->dev, dssdev,
527 &taal_bl_ops); 533 &taal_bl_ops, &props);
528 if (IS_ERR(bldev)) { 534 if (IS_ERR(bldev)) {
529 r = PTR_ERR(bldev); 535 r = PTR_ERR(bldev);
530 goto err2; 536 goto err2;
@@ -534,13 +540,10 @@ static int taal_probe(struct omap_dss_device *dssdev)
534 540
535 bldev->props.fb_blank = FB_BLANK_UNBLANK; 541 bldev->props.fb_blank = FB_BLANK_UNBLANK;
536 bldev->props.power = FB_BLANK_UNBLANK; 542 bldev->props.power = FB_BLANK_UNBLANK;
537 if (td->use_dsi_bl) { 543 if (td->use_dsi_bl)
538 bldev->props.max_brightness = 255;
539 bldev->props.brightness = 255; 544 bldev->props.brightness = 255;
540 } else { 545 else
541 bldev->props.max_brightness = 127;
542 bldev->props.brightness = 127; 546 bldev->props.brightness = 127;
543 }
544 547
545 taal_bl_update_status(bldev); 548 taal_bl_update_status(bldev);
546 549
diff --git a/drivers/video/riva/fbdev.c b/drivers/video/riva/fbdev.c
index d94c57ffbdb1..618f36bec10d 100644
--- a/drivers/video/riva/fbdev.c
+++ b/drivers/video/riva/fbdev.c
@@ -338,6 +338,7 @@ static struct backlight_ops riva_bl_ops = {
338 338
339static void riva_bl_init(struct riva_par *par) 339static void riva_bl_init(struct riva_par *par)
340{ 340{
341 struct backlight_properties props;
341 struct fb_info *info = pci_get_drvdata(par->pdev); 342 struct fb_info *info = pci_get_drvdata(par->pdev);
342 struct backlight_device *bd; 343 struct backlight_device *bd;
343 char name[12]; 344 char name[12];
@@ -353,7 +354,10 @@ static void riva_bl_init(struct riva_par *par)
353 354
354 snprintf(name, sizeof(name), "rivabl%d", info->node); 355 snprintf(name, sizeof(name), "rivabl%d", info->node);
355 356
356 bd = backlight_device_register(name, info->dev, par, &riva_bl_ops); 357 memset(&props, 0, sizeof(struct backlight_properties));
358 props.max_brightness = FB_BACKLIGHT_LEVELS - 1;
359 bd = backlight_device_register(name, info->dev, par, &riva_bl_ops,
360 &props);
357 if (IS_ERR(bd)) { 361 if (IS_ERR(bd)) {
358 info->bl_dev = NULL; 362 info->bl_dev = NULL;
359 printk(KERN_WARNING "riva: Backlight registration failed\n"); 363 printk(KERN_WARNING "riva: Backlight registration failed\n");
@@ -365,7 +369,6 @@ static void riva_bl_init(struct riva_par *par)
365 MIN_LEVEL * FB_BACKLIGHT_MAX / MAX_LEVEL, 369 MIN_LEVEL * FB_BACKLIGHT_MAX / MAX_LEVEL,
366 FB_BACKLIGHT_MAX); 370 FB_BACKLIGHT_MAX);
367 371
368 bd->props.max_brightness = FB_BACKLIGHT_LEVELS - 1;
369 bd->props.brightness = bd->props.max_brightness; 372 bd->props.brightness = bd->props.max_brightness;
370 bd->props.power = FB_BLANK_UNBLANK; 373 bd->props.power = FB_BLANK_UNBLANK;
371 backlight_update_status(bd); 374 backlight_update_status(bd);
diff --git a/fs/Kconfig b/fs/Kconfig
index 7405f071be67..5f85b5947613 100644
--- a/fs/Kconfig
+++ b/fs/Kconfig
@@ -235,6 +235,7 @@ config NFS_COMMON
235 235
236source "net/sunrpc/Kconfig" 236source "net/sunrpc/Kconfig"
237source "fs/smbfs/Kconfig" 237source "fs/smbfs/Kconfig"
238source "fs/ceph/Kconfig"
238source "fs/cifs/Kconfig" 239source "fs/cifs/Kconfig"
239source "fs/ncpfs/Kconfig" 240source "fs/ncpfs/Kconfig"
240source "fs/coda/Kconfig" 241source "fs/coda/Kconfig"
diff --git a/fs/Makefile b/fs/Makefile
index c3633aa46911..97f340f14ba2 100644
--- a/fs/Makefile
+++ b/fs/Makefile
@@ -125,3 +125,4 @@ obj-$(CONFIG_OCFS2_FS) += ocfs2/
125obj-$(CONFIG_BTRFS_FS) += btrfs/ 125obj-$(CONFIG_BTRFS_FS) += btrfs/
126obj-$(CONFIG_GFS2_FS) += gfs2/ 126obj-$(CONFIG_GFS2_FS) += gfs2/
127obj-$(CONFIG_EXOFS_FS) += exofs/ 127obj-$(CONFIG_EXOFS_FS) += exofs/
128obj-$(CONFIG_CEPH_FS) += ceph/
diff --git a/fs/afs/security.c b/fs/afs/security.c
index 3ef504370034..bb4ed144d0e4 100644
--- a/fs/afs/security.c
+++ b/fs/afs/security.c
@@ -189,8 +189,9 @@ void afs_cache_permit(struct afs_vnode *vnode, struct key *key, long acl_order)
189 if (!permits) 189 if (!permits)
190 goto out_unlock; 190 goto out_unlock;
191 191
192 memcpy(permits->permits, xpermits->permits, 192 if (xpermits)
193 count * sizeof(struct afs_permit)); 193 memcpy(permits->permits, xpermits->permits,
194 count * sizeof(struct afs_permit));
194 195
195 _debug("key %x access %x", 196 _debug("key %x access %x",
196 key_serial(key), vnode->status.caller_access); 197 key_serial(key), vnode->status.caller_access);
diff --git a/fs/btrfs/btrfs_inode.h b/fs/btrfs/btrfs_inode.h
index 3f1f50d9d916..7a4dee199832 100644
--- a/fs/btrfs/btrfs_inode.h
+++ b/fs/btrfs/btrfs_inode.h
@@ -153,6 +153,11 @@ struct btrfs_inode {
153 unsigned ordered_data_close:1; 153 unsigned ordered_data_close:1;
154 unsigned dummy_inode:1; 154 unsigned dummy_inode:1;
155 155
156 /*
157 * always compress this one file
158 */
159 unsigned force_compress:1;
160
156 struct inode vfs_inode; 161 struct inode vfs_inode;
157}; 162};
158 163
diff --git a/fs/btrfs/compression.c b/fs/btrfs/compression.c
index a11a32058b50..28b92a7218ab 100644
--- a/fs/btrfs/compression.c
+++ b/fs/btrfs/compression.c
@@ -478,7 +478,7 @@ static noinline int add_ra_bio_pages(struct inode *inode,
478 goto next; 478 goto next;
479 } 479 }
480 480
481 page = alloc_page(mapping_gfp_mask(mapping) | GFP_NOFS); 481 page = alloc_page(mapping_gfp_mask(mapping) & ~__GFP_FS);
482 if (!page) 482 if (!page)
483 break; 483 break;
484 484
diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h
index 8b5cfdd4bfc1..0af2e3868573 100644
--- a/fs/btrfs/ctree.h
+++ b/fs/btrfs/ctree.h
@@ -373,11 +373,13 @@ struct btrfs_super_block {
373 * ones specified below then we will fail to mount 373 * ones specified below then we will fail to mount
374 */ 374 */
375#define BTRFS_FEATURE_INCOMPAT_MIXED_BACKREF (1ULL << 0) 375#define BTRFS_FEATURE_INCOMPAT_MIXED_BACKREF (1ULL << 0)
376#define BTRFS_FEATURE_INCOMPAT_DEFAULT_SUBVOL (2ULL << 0)
376 377
377#define BTRFS_FEATURE_COMPAT_SUPP 0ULL 378#define BTRFS_FEATURE_COMPAT_SUPP 0ULL
378#define BTRFS_FEATURE_COMPAT_RO_SUPP 0ULL 379#define BTRFS_FEATURE_COMPAT_RO_SUPP 0ULL
379#define BTRFS_FEATURE_INCOMPAT_SUPP \ 380#define BTRFS_FEATURE_INCOMPAT_SUPP \
380 BTRFS_FEATURE_INCOMPAT_MIXED_BACKREF 381 (BTRFS_FEATURE_INCOMPAT_MIXED_BACKREF | \
382 BTRFS_FEATURE_INCOMPAT_DEFAULT_SUBVOL)
381 383
382/* 384/*
383 * A leaf is full of items. offset and size tell us where to find 385 * A leaf is full of items. offset and size tell us where to find
@@ -1182,7 +1184,6 @@ struct btrfs_root {
1182#define BTRFS_INODE_NOATIME (1 << 9) 1184#define BTRFS_INODE_NOATIME (1 << 9)
1183#define BTRFS_INODE_DIRSYNC (1 << 10) 1185#define BTRFS_INODE_DIRSYNC (1 << 10)
1184 1186
1185
1186/* some macros to generate set/get funcs for the struct fields. This 1187/* some macros to generate set/get funcs for the struct fields. This
1187 * assumes there is a lefoo_to_cpu for every type, so lets make a simple 1188 * assumes there is a lefoo_to_cpu for every type, so lets make a simple
1188 * one for u8: 1189 * one for u8:
@@ -1842,7 +1843,7 @@ BTRFS_SETGET_STACK_FUNCS(super_num_devices, struct btrfs_super_block,
1842BTRFS_SETGET_STACK_FUNCS(super_compat_flags, struct btrfs_super_block, 1843BTRFS_SETGET_STACK_FUNCS(super_compat_flags, struct btrfs_super_block,
1843 compat_flags, 64); 1844 compat_flags, 64);
1844BTRFS_SETGET_STACK_FUNCS(super_compat_ro_flags, struct btrfs_super_block, 1845BTRFS_SETGET_STACK_FUNCS(super_compat_ro_flags, struct btrfs_super_block,
1845 compat_flags, 64); 1846 compat_ro_flags, 64);
1846BTRFS_SETGET_STACK_FUNCS(super_incompat_flags, struct btrfs_super_block, 1847BTRFS_SETGET_STACK_FUNCS(super_incompat_flags, struct btrfs_super_block,
1847 incompat_flags, 64); 1848 incompat_flags, 64);
1848BTRFS_SETGET_STACK_FUNCS(super_csum_type, struct btrfs_super_block, 1849BTRFS_SETGET_STACK_FUNCS(super_csum_type, struct btrfs_super_block,
@@ -2310,7 +2311,8 @@ int btrfs_truncate_inode_items(struct btrfs_trans_handle *trans,
2310 u32 min_type); 2311 u32 min_type);
2311 2312
2312int btrfs_start_delalloc_inodes(struct btrfs_root *root, int delay_iput); 2313int btrfs_start_delalloc_inodes(struct btrfs_root *root, int delay_iput);
2313int btrfs_set_extent_delalloc(struct inode *inode, u64 start, u64 end); 2314int btrfs_set_extent_delalloc(struct inode *inode, u64 start, u64 end,
2315 struct extent_state **cached_state);
2314int btrfs_writepages(struct address_space *mapping, 2316int btrfs_writepages(struct address_space *mapping,
2315 struct writeback_control *wbc); 2317 struct writeback_control *wbc);
2316int btrfs_create_subvol_root(struct btrfs_trans_handle *trans, 2318int btrfs_create_subvol_root(struct btrfs_trans_handle *trans,
@@ -2335,7 +2337,7 @@ int btrfs_init_cachep(void);
2335void btrfs_destroy_cachep(void); 2337void btrfs_destroy_cachep(void);
2336long btrfs_ioctl_trans_end(struct file *file); 2338long btrfs_ioctl_trans_end(struct file *file);
2337struct inode *btrfs_iget(struct super_block *s, struct btrfs_key *location, 2339struct inode *btrfs_iget(struct super_block *s, struct btrfs_key *location,
2338 struct btrfs_root *root); 2340 struct btrfs_root *root, int *was_new);
2339int btrfs_commit_write(struct file *file, struct page *page, 2341int btrfs_commit_write(struct file *file, struct page *page,
2340 unsigned from, unsigned to); 2342 unsigned from, unsigned to);
2341struct extent_map *btrfs_get_extent(struct inode *inode, struct page *page, 2343struct extent_map *btrfs_get_extent(struct inode *inode, struct page *page,
@@ -2386,7 +2388,6 @@ void btrfs_sysfs_del_super(struct btrfs_fs_info *root);
2386ssize_t btrfs_listxattr(struct dentry *dentry, char *buffer, size_t size); 2388ssize_t btrfs_listxattr(struct dentry *dentry, char *buffer, size_t size);
2387 2389
2388/* super.c */ 2390/* super.c */
2389u64 btrfs_parse_size(char *str);
2390int btrfs_parse_options(struct btrfs_root *root, char *options); 2391int btrfs_parse_options(struct btrfs_root *root, char *options);
2391int btrfs_sync_fs(struct super_block *sb, int wait); 2392int btrfs_sync_fs(struct super_block *sb, int wait);
2392 2393
diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c
index 0427183e3e05..11d0ad30e203 100644
--- a/fs/btrfs/disk-io.c
+++ b/fs/btrfs/disk-io.c
@@ -263,13 +263,15 @@ static int csum_tree_block(struct btrfs_root *root, struct extent_buffer *buf,
263static int verify_parent_transid(struct extent_io_tree *io_tree, 263static int verify_parent_transid(struct extent_io_tree *io_tree,
264 struct extent_buffer *eb, u64 parent_transid) 264 struct extent_buffer *eb, u64 parent_transid)
265{ 265{
266 struct extent_state *cached_state = NULL;
266 int ret; 267 int ret;
267 268
268 if (!parent_transid || btrfs_header_generation(eb) == parent_transid) 269 if (!parent_transid || btrfs_header_generation(eb) == parent_transid)
269 return 0; 270 return 0;
270 271
271 lock_extent(io_tree, eb->start, eb->start + eb->len - 1, GFP_NOFS); 272 lock_extent_bits(io_tree, eb->start, eb->start + eb->len - 1,
272 if (extent_buffer_uptodate(io_tree, eb) && 273 0, &cached_state, GFP_NOFS);
274 if (extent_buffer_uptodate(io_tree, eb, cached_state) &&
273 btrfs_header_generation(eb) == parent_transid) { 275 btrfs_header_generation(eb) == parent_transid) {
274 ret = 0; 276 ret = 0;
275 goto out; 277 goto out;
@@ -282,10 +284,10 @@ static int verify_parent_transid(struct extent_io_tree *io_tree,
282 (unsigned long long)btrfs_header_generation(eb)); 284 (unsigned long long)btrfs_header_generation(eb));
283 } 285 }
284 ret = 1; 286 ret = 1;
285 clear_extent_buffer_uptodate(io_tree, eb); 287 clear_extent_buffer_uptodate(io_tree, eb, &cached_state);
286out: 288out:
287 unlock_extent(io_tree, eb->start, eb->start + eb->len - 1, 289 unlock_extent_cached(io_tree, eb->start, eb->start + eb->len - 1,
288 GFP_NOFS); 290 &cached_state, GFP_NOFS);
289 return ret; 291 return ret;
290} 292}
291 293
@@ -2497,7 +2499,8 @@ int btrfs_buffer_uptodate(struct extent_buffer *buf, u64 parent_transid)
2497 int ret; 2499 int ret;
2498 struct inode *btree_inode = buf->first_page->mapping->host; 2500 struct inode *btree_inode = buf->first_page->mapping->host;
2499 2501
2500 ret = extent_buffer_uptodate(&BTRFS_I(btree_inode)->io_tree, buf); 2502 ret = extent_buffer_uptodate(&BTRFS_I(btree_inode)->io_tree, buf,
2503 NULL);
2501 if (!ret) 2504 if (!ret)
2502 return ret; 2505 return ret;
2503 2506
diff --git a/fs/btrfs/export.c b/fs/btrfs/export.c
index ba5c3fd5ab8c..951ef09b82f4 100644
--- a/fs/btrfs/export.c
+++ b/fs/btrfs/export.c
@@ -95,7 +95,7 @@ static struct dentry *btrfs_get_dentry(struct super_block *sb, u64 objectid,
95 btrfs_set_key_type(&key, BTRFS_INODE_ITEM_KEY); 95 btrfs_set_key_type(&key, BTRFS_INODE_ITEM_KEY);
96 key.offset = 0; 96 key.offset = 0;
97 97
98 inode = btrfs_iget(sb, &key, root); 98 inode = btrfs_iget(sb, &key, root, NULL);
99 if (IS_ERR(inode)) { 99 if (IS_ERR(inode)) {
100 err = PTR_ERR(inode); 100 err = PTR_ERR(inode);
101 goto fail; 101 goto fail;
@@ -223,7 +223,7 @@ static struct dentry *btrfs_get_parent(struct dentry *child)
223 223
224 key.type = BTRFS_INODE_ITEM_KEY; 224 key.type = BTRFS_INODE_ITEM_KEY;
225 key.offset = 0; 225 key.offset = 0;
226 dentry = d_obtain_alias(btrfs_iget(root->fs_info->sb, &key, root)); 226 dentry = d_obtain_alias(btrfs_iget(root->fs_info->sb, &key, root, NULL));
227 if (!IS_ERR(dentry)) 227 if (!IS_ERR(dentry))
228 dentry->d_op = &btrfs_dentry_operations; 228 dentry->d_op = &btrfs_dentry_operations;
229 return dentry; 229 return dentry;
diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c
index 559f72489b3b..1727b26fb194 100644
--- a/fs/btrfs/extent-tree.c
+++ b/fs/btrfs/extent-tree.c
@@ -6561,6 +6561,7 @@ static noinline int invalidate_extent_cache(struct btrfs_root *root,
6561 struct btrfs_key key; 6561 struct btrfs_key key;
6562 struct inode *inode = NULL; 6562 struct inode *inode = NULL;
6563 struct btrfs_file_extent_item *fi; 6563 struct btrfs_file_extent_item *fi;
6564 struct extent_state *cached_state = NULL;
6564 u64 num_bytes; 6565 u64 num_bytes;
6565 u64 skip_objectid = 0; 6566 u64 skip_objectid = 0;
6566 u32 nritems; 6567 u32 nritems;
@@ -6589,12 +6590,14 @@ static noinline int invalidate_extent_cache(struct btrfs_root *root,
6589 } 6590 }
6590 num_bytes = btrfs_file_extent_num_bytes(leaf, fi); 6591 num_bytes = btrfs_file_extent_num_bytes(leaf, fi);
6591 6592
6592 lock_extent(&BTRFS_I(inode)->io_tree, key.offset, 6593 lock_extent_bits(&BTRFS_I(inode)->io_tree, key.offset,
6593 key.offset + num_bytes - 1, GFP_NOFS); 6594 key.offset + num_bytes - 1, 0, &cached_state,
6595 GFP_NOFS);
6594 btrfs_drop_extent_cache(inode, key.offset, 6596 btrfs_drop_extent_cache(inode, key.offset,
6595 key.offset + num_bytes - 1, 1); 6597 key.offset + num_bytes - 1, 1);
6596 unlock_extent(&BTRFS_I(inode)->io_tree, key.offset, 6598 unlock_extent_cached(&BTRFS_I(inode)->io_tree, key.offset,
6597 key.offset + num_bytes - 1, GFP_NOFS); 6599 key.offset + num_bytes - 1, &cached_state,
6600 GFP_NOFS);
6598 cond_resched(); 6601 cond_resched();
6599 } 6602 }
6600 iput(inode); 6603 iput(inode);
diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c
index 7073cbb1b2d4..c99121ac5d6b 100644
--- a/fs/btrfs/extent_io.c
+++ b/fs/btrfs/extent_io.c
@@ -513,7 +513,10 @@ int clear_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
513 u64 last_end; 513 u64 last_end;
514 int err; 514 int err;
515 int set = 0; 515 int set = 0;
516 int clear = 0;
516 517
518 if (bits & (EXTENT_IOBITS | EXTENT_BOUNDARY))
519 clear = 1;
517again: 520again:
518 if (!prealloc && (mask & __GFP_WAIT)) { 521 if (!prealloc && (mask & __GFP_WAIT)) {
519 prealloc = alloc_extent_state(mask); 522 prealloc = alloc_extent_state(mask);
@@ -524,14 +527,20 @@ again:
524 spin_lock(&tree->lock); 527 spin_lock(&tree->lock);
525 if (cached_state) { 528 if (cached_state) {
526 cached = *cached_state; 529 cached = *cached_state;
527 *cached_state = NULL; 530
528 cached_state = NULL; 531 if (clear) {
532 *cached_state = NULL;
533 cached_state = NULL;
534 }
535
529 if (cached && cached->tree && cached->start == start) { 536 if (cached && cached->tree && cached->start == start) {
530 atomic_dec(&cached->refs); 537 if (clear)
538 atomic_dec(&cached->refs);
531 state = cached; 539 state = cached;
532 goto hit_next; 540 goto hit_next;
533 } 541 }
534 free_extent_state(cached); 542 if (clear)
543 free_extent_state(cached);
535 } 544 }
536 /* 545 /*
537 * this search will find the extents that end after 546 * this search will find the extents that end after
@@ -946,11 +955,11 @@ int clear_extent_bits(struct extent_io_tree *tree, u64 start, u64 end,
946} 955}
947 956
948int set_extent_delalloc(struct extent_io_tree *tree, u64 start, u64 end, 957int set_extent_delalloc(struct extent_io_tree *tree, u64 start, u64 end,
949 gfp_t mask) 958 struct extent_state **cached_state, gfp_t mask)
950{ 959{
951 return set_extent_bit(tree, start, end, 960 return set_extent_bit(tree, start, end,
952 EXTENT_DELALLOC | EXTENT_DIRTY | EXTENT_UPTODATE, 961 EXTENT_DELALLOC | EXTENT_DIRTY | EXTENT_UPTODATE,
953 0, NULL, NULL, mask); 962 0, NULL, cached_state, mask);
954} 963}
955 964
956int clear_extent_dirty(struct extent_io_tree *tree, u64 start, u64 end, 965int clear_extent_dirty(struct extent_io_tree *tree, u64 start, u64 end,
@@ -984,10 +993,11 @@ int set_extent_uptodate(struct extent_io_tree *tree, u64 start, u64 end,
984} 993}
985 994
986static int clear_extent_uptodate(struct extent_io_tree *tree, u64 start, 995static int clear_extent_uptodate(struct extent_io_tree *tree, u64 start,
987 u64 end, gfp_t mask) 996 u64 end, struct extent_state **cached_state,
997 gfp_t mask)
988{ 998{
989 return clear_extent_bit(tree, start, end, EXTENT_UPTODATE, 0, 0, 999 return clear_extent_bit(tree, start, end, EXTENT_UPTODATE, 0, 0,
990 NULL, mask); 1000 cached_state, mask);
991} 1001}
992 1002
993int wait_on_extent_writeback(struct extent_io_tree *tree, u64 start, u64 end) 1003int wait_on_extent_writeback(struct extent_io_tree *tree, u64 start, u64 end)
@@ -1171,7 +1181,8 @@ out:
1171 * 1 is returned if we find something, 0 if nothing was in the tree 1181 * 1 is returned if we find something, 0 if nothing was in the tree
1172 */ 1182 */
1173static noinline u64 find_delalloc_range(struct extent_io_tree *tree, 1183static noinline u64 find_delalloc_range(struct extent_io_tree *tree,
1174 u64 *start, u64 *end, u64 max_bytes) 1184 u64 *start, u64 *end, u64 max_bytes,
1185 struct extent_state **cached_state)
1175{ 1186{
1176 struct rb_node *node; 1187 struct rb_node *node;
1177 struct extent_state *state; 1188 struct extent_state *state;
@@ -1203,8 +1214,11 @@ static noinline u64 find_delalloc_range(struct extent_io_tree *tree,
1203 *end = state->end; 1214 *end = state->end;
1204 goto out; 1215 goto out;
1205 } 1216 }
1206 if (!found) 1217 if (!found) {
1207 *start = state->start; 1218 *start = state->start;
1219 *cached_state = state;
1220 atomic_inc(&state->refs);
1221 }
1208 found++; 1222 found++;
1209 *end = state->end; 1223 *end = state->end;
1210 cur_start = state->end + 1; 1224 cur_start = state->end + 1;
@@ -1336,10 +1350,11 @@ again:
1336 delalloc_start = *start; 1350 delalloc_start = *start;
1337 delalloc_end = 0; 1351 delalloc_end = 0;
1338 found = find_delalloc_range(tree, &delalloc_start, &delalloc_end, 1352 found = find_delalloc_range(tree, &delalloc_start, &delalloc_end,
1339 max_bytes); 1353 max_bytes, &cached_state);
1340 if (!found || delalloc_end <= *start) { 1354 if (!found || delalloc_end <= *start) {
1341 *start = delalloc_start; 1355 *start = delalloc_start;
1342 *end = delalloc_end; 1356 *end = delalloc_end;
1357 free_extent_state(cached_state);
1343 return found; 1358 return found;
1344 } 1359 }
1345 1360
@@ -1722,7 +1737,7 @@ static void end_bio_extent_writepage(struct bio *bio, int err)
1722 } 1737 }
1723 1738
1724 if (!uptodate) { 1739 if (!uptodate) {
1725 clear_extent_uptodate(tree, start, end, GFP_NOFS); 1740 clear_extent_uptodate(tree, start, end, NULL, GFP_NOFS);
1726 ClearPageUptodate(page); 1741 ClearPageUptodate(page);
1727 SetPageError(page); 1742 SetPageError(page);
1728 } 1743 }
@@ -1750,7 +1765,8 @@ static void end_bio_extent_writepage(struct bio *bio, int err)
1750static void end_bio_extent_readpage(struct bio *bio, int err) 1765static void end_bio_extent_readpage(struct bio *bio, int err)
1751{ 1766{
1752 int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags); 1767 int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
1753 struct bio_vec *bvec = bio->bi_io_vec + bio->bi_vcnt - 1; 1768 struct bio_vec *bvec_end = bio->bi_io_vec + bio->bi_vcnt - 1;
1769 struct bio_vec *bvec = bio->bi_io_vec;
1754 struct extent_io_tree *tree; 1770 struct extent_io_tree *tree;
1755 u64 start; 1771 u64 start;
1756 u64 end; 1772 u64 end;
@@ -1773,7 +1789,7 @@ static void end_bio_extent_readpage(struct bio *bio, int err)
1773 else 1789 else
1774 whole_page = 0; 1790 whole_page = 0;
1775 1791
1776 if (--bvec >= bio->bi_io_vec) 1792 if (++bvec <= bvec_end)
1777 prefetchw(&bvec->bv_page->flags); 1793 prefetchw(&bvec->bv_page->flags);
1778 1794
1779 if (uptodate && tree->ops && tree->ops->readpage_end_io_hook) { 1795 if (uptodate && tree->ops && tree->ops->readpage_end_io_hook) {
@@ -1818,7 +1834,7 @@ static void end_bio_extent_readpage(struct bio *bio, int err)
1818 } 1834 }
1819 check_page_locked(tree, page); 1835 check_page_locked(tree, page);
1820 } 1836 }
1821 } while (bvec >= bio->bi_io_vec); 1837 } while (bvec <= bvec_end);
1822 1838
1823 bio_put(bio); 1839 bio_put(bio);
1824} 1840}
@@ -2704,6 +2720,7 @@ int extent_readpages(struct extent_io_tree *tree,
2704int extent_invalidatepage(struct extent_io_tree *tree, 2720int extent_invalidatepage(struct extent_io_tree *tree,
2705 struct page *page, unsigned long offset) 2721 struct page *page, unsigned long offset)
2706{ 2722{
2723 struct extent_state *cached_state = NULL;
2707 u64 start = ((u64)page->index << PAGE_CACHE_SHIFT); 2724 u64 start = ((u64)page->index << PAGE_CACHE_SHIFT);
2708 u64 end = start + PAGE_CACHE_SIZE - 1; 2725 u64 end = start + PAGE_CACHE_SIZE - 1;
2709 size_t blocksize = page->mapping->host->i_sb->s_blocksize; 2726 size_t blocksize = page->mapping->host->i_sb->s_blocksize;
@@ -2712,12 +2729,12 @@ int extent_invalidatepage(struct extent_io_tree *tree,
2712 if (start > end) 2729 if (start > end)
2713 return 0; 2730 return 0;
2714 2731
2715 lock_extent(tree, start, end, GFP_NOFS); 2732 lock_extent_bits(tree, start, end, 0, &cached_state, GFP_NOFS);
2716 wait_on_page_writeback(page); 2733 wait_on_page_writeback(page);
2717 clear_extent_bit(tree, start, end, 2734 clear_extent_bit(tree, start, end,
2718 EXTENT_LOCKED | EXTENT_DIRTY | EXTENT_DELALLOC | 2735 EXTENT_LOCKED | EXTENT_DIRTY | EXTENT_DELALLOC |
2719 EXTENT_DO_ACCOUNTING, 2736 EXTENT_DO_ACCOUNTING,
2720 1, 1, NULL, GFP_NOFS); 2737 1, 1, &cached_state, GFP_NOFS);
2721 return 0; 2738 return 0;
2722} 2739}
2723 2740
@@ -2920,16 +2937,17 @@ sector_t extent_bmap(struct address_space *mapping, sector_t iblock,
2920 get_extent_t *get_extent) 2937 get_extent_t *get_extent)
2921{ 2938{
2922 struct inode *inode = mapping->host; 2939 struct inode *inode = mapping->host;
2940 struct extent_state *cached_state = NULL;
2923 u64 start = iblock << inode->i_blkbits; 2941 u64 start = iblock << inode->i_blkbits;
2924 sector_t sector = 0; 2942 sector_t sector = 0;
2925 size_t blksize = (1 << inode->i_blkbits); 2943 size_t blksize = (1 << inode->i_blkbits);
2926 struct extent_map *em; 2944 struct extent_map *em;
2927 2945
2928 lock_extent(&BTRFS_I(inode)->io_tree, start, start + blksize - 1, 2946 lock_extent_bits(&BTRFS_I(inode)->io_tree, start, start + blksize - 1,
2929 GFP_NOFS); 2947 0, &cached_state, GFP_NOFS);
2930 em = get_extent(inode, NULL, 0, start, blksize, 0); 2948 em = get_extent(inode, NULL, 0, start, blksize, 0);
2931 unlock_extent(&BTRFS_I(inode)->io_tree, start, start + blksize - 1, 2949 unlock_extent_cached(&BTRFS_I(inode)->io_tree, start,
2932 GFP_NOFS); 2950 start + blksize - 1, &cached_state, GFP_NOFS);
2933 if (!em || IS_ERR(em)) 2951 if (!em || IS_ERR(em))
2934 return 0; 2952 return 0;
2935 2953
@@ -2951,6 +2969,7 @@ int extent_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
2951 u32 flags = 0; 2969 u32 flags = 0;
2952 u64 disko = 0; 2970 u64 disko = 0;
2953 struct extent_map *em = NULL; 2971 struct extent_map *em = NULL;
2972 struct extent_state *cached_state = NULL;
2954 int end = 0; 2973 int end = 0;
2955 u64 em_start = 0, em_len = 0; 2974 u64 em_start = 0, em_len = 0;
2956 unsigned long emflags; 2975 unsigned long emflags;
@@ -2959,8 +2978,8 @@ int extent_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
2959 if (len == 0) 2978 if (len == 0)
2960 return -EINVAL; 2979 return -EINVAL;
2961 2980
2962 lock_extent(&BTRFS_I(inode)->io_tree, start, start + len, 2981 lock_extent_bits(&BTRFS_I(inode)->io_tree, start, start + len, 0,
2963 GFP_NOFS); 2982 &cached_state, GFP_NOFS);
2964 em = get_extent(inode, NULL, 0, off, max - off, 0); 2983 em = get_extent(inode, NULL, 0, off, max - off, 0);
2965 if (!em) 2984 if (!em)
2966 goto out; 2985 goto out;
@@ -3023,8 +3042,8 @@ int extent_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
3023out_free: 3042out_free:
3024 free_extent_map(em); 3043 free_extent_map(em);
3025out: 3044out:
3026 unlock_extent(&BTRFS_I(inode)->io_tree, start, start + len, 3045 unlock_extent_cached(&BTRFS_I(inode)->io_tree, start, start + len,
3027 GFP_NOFS); 3046 &cached_state, GFP_NOFS);
3028 return ret; 3047 return ret;
3029} 3048}
3030 3049
@@ -3264,7 +3283,8 @@ int set_extent_buffer_dirty(struct extent_io_tree *tree,
3264} 3283}
3265 3284
3266int clear_extent_buffer_uptodate(struct extent_io_tree *tree, 3285int clear_extent_buffer_uptodate(struct extent_io_tree *tree,
3267 struct extent_buffer *eb) 3286 struct extent_buffer *eb,
3287 struct extent_state **cached_state)
3268{ 3288{
3269 unsigned long i; 3289 unsigned long i;
3270 struct page *page; 3290 struct page *page;
@@ -3274,7 +3294,7 @@ int clear_extent_buffer_uptodate(struct extent_io_tree *tree,
3274 clear_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags); 3294 clear_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags);
3275 3295
3276 clear_extent_uptodate(tree, eb->start, eb->start + eb->len - 1, 3296 clear_extent_uptodate(tree, eb->start, eb->start + eb->len - 1,
3277 GFP_NOFS); 3297 cached_state, GFP_NOFS);
3278 for (i = 0; i < num_pages; i++) { 3298 for (i = 0; i < num_pages; i++) {
3279 page = extent_buffer_page(eb, i); 3299 page = extent_buffer_page(eb, i);
3280 if (page) 3300 if (page)
@@ -3334,7 +3354,8 @@ int extent_range_uptodate(struct extent_io_tree *tree,
3334} 3354}
3335 3355
3336int extent_buffer_uptodate(struct extent_io_tree *tree, 3356int extent_buffer_uptodate(struct extent_io_tree *tree,
3337 struct extent_buffer *eb) 3357 struct extent_buffer *eb,
3358 struct extent_state *cached_state)
3338{ 3359{
3339 int ret = 0; 3360 int ret = 0;
3340 unsigned long num_pages; 3361 unsigned long num_pages;
@@ -3346,7 +3367,7 @@ int extent_buffer_uptodate(struct extent_io_tree *tree,
3346 return 1; 3367 return 1;
3347 3368
3348 ret = test_range_bit(tree, eb->start, eb->start + eb->len - 1, 3369 ret = test_range_bit(tree, eb->start, eb->start + eb->len - 1,
3349 EXTENT_UPTODATE, 1, NULL); 3370 EXTENT_UPTODATE, 1, cached_state);
3350 if (ret) 3371 if (ret)
3351 return ret; 3372 return ret;
3352 3373
diff --git a/fs/btrfs/extent_io.h b/fs/btrfs/extent_io.h
index 36de250a7b2b..bbab4813646f 100644
--- a/fs/btrfs/extent_io.h
+++ b/fs/btrfs/extent_io.h
@@ -163,6 +163,8 @@ int lock_extent(struct extent_io_tree *tree, u64 start, u64 end, gfp_t mask);
163int lock_extent_bits(struct extent_io_tree *tree, u64 start, u64 end, 163int lock_extent_bits(struct extent_io_tree *tree, u64 start, u64 end,
164 int bits, struct extent_state **cached, gfp_t mask); 164 int bits, struct extent_state **cached, gfp_t mask);
165int unlock_extent(struct extent_io_tree *tree, u64 start, u64 end, gfp_t mask); 165int unlock_extent(struct extent_io_tree *tree, u64 start, u64 end, gfp_t mask);
166int unlock_extent_cached(struct extent_io_tree *tree, u64 start, u64 end,
167 struct extent_state **cached, gfp_t mask);
166int try_lock_extent(struct extent_io_tree *tree, u64 start, u64 end, 168int try_lock_extent(struct extent_io_tree *tree, u64 start, u64 end,
167 gfp_t mask); 169 gfp_t mask);
168int extent_read_full_page(struct extent_io_tree *tree, struct page *page, 170int extent_read_full_page(struct extent_io_tree *tree, struct page *page,
@@ -196,7 +198,7 @@ int clear_extent_ordered(struct extent_io_tree *tree, u64 start, u64 end,
196int clear_extent_ordered_metadata(struct extent_io_tree *tree, u64 start, 198int clear_extent_ordered_metadata(struct extent_io_tree *tree, u64 start,
197 u64 end, gfp_t mask); 199 u64 end, gfp_t mask);
198int set_extent_delalloc(struct extent_io_tree *tree, u64 start, u64 end, 200int set_extent_delalloc(struct extent_io_tree *tree, u64 start, u64 end,
199 gfp_t mask); 201 struct extent_state **cached_state, gfp_t mask);
200int set_extent_ordered(struct extent_io_tree *tree, u64 start, u64 end, 202int set_extent_ordered(struct extent_io_tree *tree, u64 start, u64 end,
201 gfp_t mask); 203 gfp_t mask);
202int find_first_extent_bit(struct extent_io_tree *tree, u64 start, 204int find_first_extent_bit(struct extent_io_tree *tree, u64 start,
@@ -281,9 +283,11 @@ int test_extent_buffer_dirty(struct extent_io_tree *tree,
281int set_extent_buffer_uptodate(struct extent_io_tree *tree, 283int set_extent_buffer_uptodate(struct extent_io_tree *tree,
282 struct extent_buffer *eb); 284 struct extent_buffer *eb);
283int clear_extent_buffer_uptodate(struct extent_io_tree *tree, 285int clear_extent_buffer_uptodate(struct extent_io_tree *tree,
284 struct extent_buffer *eb); 286 struct extent_buffer *eb,
287 struct extent_state **cached_state);
285int extent_buffer_uptodate(struct extent_io_tree *tree, 288int extent_buffer_uptodate(struct extent_io_tree *tree,
286 struct extent_buffer *eb); 289 struct extent_buffer *eb,
290 struct extent_state *cached_state);
287int map_extent_buffer(struct extent_buffer *eb, unsigned long offset, 291int map_extent_buffer(struct extent_buffer *eb, unsigned long offset,
288 unsigned long min_len, char **token, char **map, 292 unsigned long min_len, char **token, char **map,
289 unsigned long *map_start, 293 unsigned long *map_start,
diff --git a/fs/btrfs/file.c b/fs/btrfs/file.c
index 6ed434ac037f..ee3323c7fc1c 100644
--- a/fs/btrfs/file.c
+++ b/fs/btrfs/file.c
@@ -123,7 +123,8 @@ static noinline int dirty_and_release_pages(struct btrfs_trans_handle *trans,
123 root->sectorsize - 1) & ~((u64)root->sectorsize - 1); 123 root->sectorsize - 1) & ~((u64)root->sectorsize - 1);
124 124
125 end_of_last_block = start_pos + num_bytes - 1; 125 end_of_last_block = start_pos + num_bytes - 1;
126 err = btrfs_set_extent_delalloc(inode, start_pos, end_of_last_block); 126 err = btrfs_set_extent_delalloc(inode, start_pos, end_of_last_block,
127 NULL);
127 if (err) 128 if (err)
128 return err; 129 return err;
129 130
@@ -753,6 +754,7 @@ static noinline int prepare_pages(struct btrfs_root *root, struct file *file,
753 loff_t pos, unsigned long first_index, 754 loff_t pos, unsigned long first_index,
754 unsigned long last_index, size_t write_bytes) 755 unsigned long last_index, size_t write_bytes)
755{ 756{
757 struct extent_state *cached_state = NULL;
756 int i; 758 int i;
757 unsigned long index = pos >> PAGE_CACHE_SHIFT; 759 unsigned long index = pos >> PAGE_CACHE_SHIFT;
758 struct inode *inode = fdentry(file)->d_inode; 760 struct inode *inode = fdentry(file)->d_inode;
@@ -781,16 +783,18 @@ again:
781 } 783 }
782 if (start_pos < inode->i_size) { 784 if (start_pos < inode->i_size) {
783 struct btrfs_ordered_extent *ordered; 785 struct btrfs_ordered_extent *ordered;
784 lock_extent(&BTRFS_I(inode)->io_tree, 786 lock_extent_bits(&BTRFS_I(inode)->io_tree,
785 start_pos, last_pos - 1, GFP_NOFS); 787 start_pos, last_pos - 1, 0, &cached_state,
788 GFP_NOFS);
786 ordered = btrfs_lookup_first_ordered_extent(inode, 789 ordered = btrfs_lookup_first_ordered_extent(inode,
787 last_pos - 1); 790 last_pos - 1);
788 if (ordered && 791 if (ordered &&
789 ordered->file_offset + ordered->len > start_pos && 792 ordered->file_offset + ordered->len > start_pos &&
790 ordered->file_offset < last_pos) { 793 ordered->file_offset < last_pos) {
791 btrfs_put_ordered_extent(ordered); 794 btrfs_put_ordered_extent(ordered);
792 unlock_extent(&BTRFS_I(inode)->io_tree, 795 unlock_extent_cached(&BTRFS_I(inode)->io_tree,
793 start_pos, last_pos - 1, GFP_NOFS); 796 start_pos, last_pos - 1,
797 &cached_state, GFP_NOFS);
794 for (i = 0; i < num_pages; i++) { 798 for (i = 0; i < num_pages; i++) {
795 unlock_page(pages[i]); 799 unlock_page(pages[i]);
796 page_cache_release(pages[i]); 800 page_cache_release(pages[i]);
@@ -802,12 +806,13 @@ again:
802 if (ordered) 806 if (ordered)
803 btrfs_put_ordered_extent(ordered); 807 btrfs_put_ordered_extent(ordered);
804 808
805 clear_extent_bits(&BTRFS_I(inode)->io_tree, start_pos, 809 clear_extent_bit(&BTRFS_I(inode)->io_tree, start_pos,
806 last_pos - 1, EXTENT_DIRTY | EXTENT_DELALLOC | 810 last_pos - 1, EXTENT_DIRTY | EXTENT_DELALLOC |
807 EXTENT_DO_ACCOUNTING, 811 EXTENT_DO_ACCOUNTING, 0, 0, &cached_state,
808 GFP_NOFS); 812 GFP_NOFS);
809 unlock_extent(&BTRFS_I(inode)->io_tree, 813 unlock_extent_cached(&BTRFS_I(inode)->io_tree,
810 start_pos, last_pos - 1, GFP_NOFS); 814 start_pos, last_pos - 1, &cached_state,
815 GFP_NOFS);
811 } 816 }
812 for (i = 0; i < num_pages; i++) { 817 for (i = 0; i < num_pages; i++) {
813 clear_page_dirty_for_io(pages[i]); 818 clear_page_dirty_for_io(pages[i]);
diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
index c41db6d45ab6..02bb099845fd 100644
--- a/fs/btrfs/inode.c
+++ b/fs/btrfs/inode.c
@@ -379,7 +379,8 @@ again:
379 * change at any time if we discover bad compression ratios. 379 * change at any time if we discover bad compression ratios.
380 */ 380 */
381 if (!(BTRFS_I(inode)->flags & BTRFS_INODE_NOCOMPRESS) && 381 if (!(BTRFS_I(inode)->flags & BTRFS_INODE_NOCOMPRESS) &&
382 btrfs_test_opt(root, COMPRESS)) { 382 (btrfs_test_opt(root, COMPRESS) ||
383 (BTRFS_I(inode)->force_compress))) {
383 WARN_ON(pages); 384 WARN_ON(pages);
384 pages = kzalloc(sizeof(struct page *) * nr_pages, GFP_NOFS); 385 pages = kzalloc(sizeof(struct page *) * nr_pages, GFP_NOFS);
385 386
@@ -483,8 +484,10 @@ again:
483 nr_pages_ret = 0; 484 nr_pages_ret = 0;
484 485
485 /* flag the file so we don't compress in the future */ 486 /* flag the file so we don't compress in the future */
486 if (!btrfs_test_opt(root, FORCE_COMPRESS)) 487 if (!btrfs_test_opt(root, FORCE_COMPRESS) &&
488 !(BTRFS_I(inode)->force_compress)) {
487 BTRFS_I(inode)->flags |= BTRFS_INODE_NOCOMPRESS; 489 BTRFS_I(inode)->flags |= BTRFS_INODE_NOCOMPRESS;
490 }
488 } 491 }
489 if (will_compress) { 492 if (will_compress) {
490 *num_added += 1; 493 *num_added += 1;
@@ -570,8 +573,8 @@ retry:
570 unsigned long nr_written = 0; 573 unsigned long nr_written = 0;
571 574
572 lock_extent(io_tree, async_extent->start, 575 lock_extent(io_tree, async_extent->start,
573 async_extent->start + 576 async_extent->start +
574 async_extent->ram_size - 1, GFP_NOFS); 577 async_extent->ram_size - 1, GFP_NOFS);
575 578
576 /* allocate blocks */ 579 /* allocate blocks */
577 ret = cow_file_range(inode, async_cow->locked_page, 580 ret = cow_file_range(inode, async_cow->locked_page,
@@ -1211,7 +1214,8 @@ static int run_delalloc_range(struct inode *inode, struct page *locked_page,
1211 else if (BTRFS_I(inode)->flags & BTRFS_INODE_PREALLOC) 1214 else if (BTRFS_I(inode)->flags & BTRFS_INODE_PREALLOC)
1212 ret = run_delalloc_nocow(inode, locked_page, start, end, 1215 ret = run_delalloc_nocow(inode, locked_page, start, end,
1213 page_started, 0, nr_written); 1216 page_started, 0, nr_written);
1214 else if (!btrfs_test_opt(root, COMPRESS)) 1217 else if (!btrfs_test_opt(root, COMPRESS) &&
1218 !(BTRFS_I(inode)->force_compress))
1215 ret = cow_file_range(inode, locked_page, start, end, 1219 ret = cow_file_range(inode, locked_page, start, end,
1216 page_started, nr_written, 1); 1220 page_started, nr_written, 1);
1217 else 1221 else
@@ -1508,12 +1512,13 @@ static noinline int add_pending_csums(struct btrfs_trans_handle *trans,
1508 return 0; 1512 return 0;
1509} 1513}
1510 1514
1511int btrfs_set_extent_delalloc(struct inode *inode, u64 start, u64 end) 1515int btrfs_set_extent_delalloc(struct inode *inode, u64 start, u64 end,
1516 struct extent_state **cached_state)
1512{ 1517{
1513 if ((end & (PAGE_CACHE_SIZE - 1)) == 0) 1518 if ((end & (PAGE_CACHE_SIZE - 1)) == 0)
1514 WARN_ON(1); 1519 WARN_ON(1);
1515 return set_extent_delalloc(&BTRFS_I(inode)->io_tree, start, end, 1520 return set_extent_delalloc(&BTRFS_I(inode)->io_tree, start, end,
1516 GFP_NOFS); 1521 cached_state, GFP_NOFS);
1517} 1522}
1518 1523
1519/* see btrfs_writepage_start_hook for details on why this is required */ 1524/* see btrfs_writepage_start_hook for details on why this is required */
@@ -1526,6 +1531,7 @@ static void btrfs_writepage_fixup_worker(struct btrfs_work *work)
1526{ 1531{
1527 struct btrfs_writepage_fixup *fixup; 1532 struct btrfs_writepage_fixup *fixup;
1528 struct btrfs_ordered_extent *ordered; 1533 struct btrfs_ordered_extent *ordered;
1534 struct extent_state *cached_state = NULL;
1529 struct page *page; 1535 struct page *page;
1530 struct inode *inode; 1536 struct inode *inode;
1531 u64 page_start; 1537 u64 page_start;
@@ -1544,7 +1550,8 @@ again:
1544 page_start = page_offset(page); 1550 page_start = page_offset(page);
1545 page_end = page_offset(page) + PAGE_CACHE_SIZE - 1; 1551 page_end = page_offset(page) + PAGE_CACHE_SIZE - 1;
1546 1552
1547 lock_extent(&BTRFS_I(inode)->io_tree, page_start, page_end, GFP_NOFS); 1553 lock_extent_bits(&BTRFS_I(inode)->io_tree, page_start, page_end, 0,
1554 &cached_state, GFP_NOFS);
1548 1555
1549 /* already ordered? We're done */ 1556 /* already ordered? We're done */
1550 if (PagePrivate2(page)) 1557 if (PagePrivate2(page))
@@ -1552,17 +1559,18 @@ again:
1552 1559
1553 ordered = btrfs_lookup_ordered_extent(inode, page_start); 1560 ordered = btrfs_lookup_ordered_extent(inode, page_start);
1554 if (ordered) { 1561 if (ordered) {
1555 unlock_extent(&BTRFS_I(inode)->io_tree, page_start, 1562 unlock_extent_cached(&BTRFS_I(inode)->io_tree, page_start,
1556 page_end, GFP_NOFS); 1563 page_end, &cached_state, GFP_NOFS);
1557 unlock_page(page); 1564 unlock_page(page);
1558 btrfs_start_ordered_extent(inode, ordered, 1); 1565 btrfs_start_ordered_extent(inode, ordered, 1);
1559 goto again; 1566 goto again;
1560 } 1567 }
1561 1568
1562 btrfs_set_extent_delalloc(inode, page_start, page_end); 1569 btrfs_set_extent_delalloc(inode, page_start, page_end, &cached_state);
1563 ClearPageChecked(page); 1570 ClearPageChecked(page);
1564out: 1571out:
1565 unlock_extent(&BTRFS_I(inode)->io_tree, page_start, page_end, GFP_NOFS); 1572 unlock_extent_cached(&BTRFS_I(inode)->io_tree, page_start, page_end,
1573 &cached_state, GFP_NOFS);
1566out_page: 1574out_page:
1567 unlock_page(page); 1575 unlock_page(page);
1568 page_cache_release(page); 1576 page_cache_release(page);
@@ -1691,14 +1699,14 @@ static int btrfs_finish_ordered_io(struct inode *inode, u64 start, u64 end)
1691 struct btrfs_trans_handle *trans; 1699 struct btrfs_trans_handle *trans;
1692 struct btrfs_ordered_extent *ordered_extent = NULL; 1700 struct btrfs_ordered_extent *ordered_extent = NULL;
1693 struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree; 1701 struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
1702 struct extent_state *cached_state = NULL;
1694 int compressed = 0; 1703 int compressed = 0;
1695 int ret; 1704 int ret;
1696 1705
1697 ret = btrfs_dec_test_ordered_pending(inode, start, end - start + 1); 1706 ret = btrfs_dec_test_ordered_pending(inode, &ordered_extent, start,
1707 end - start + 1);
1698 if (!ret) 1708 if (!ret)
1699 return 0; 1709 return 0;
1700
1701 ordered_extent = btrfs_lookup_ordered_extent(inode, start);
1702 BUG_ON(!ordered_extent); 1710 BUG_ON(!ordered_extent);
1703 1711
1704 if (test_bit(BTRFS_ORDERED_NOCOW, &ordered_extent->flags)) { 1712 if (test_bit(BTRFS_ORDERED_NOCOW, &ordered_extent->flags)) {
@@ -1713,9 +1721,9 @@ static int btrfs_finish_ordered_io(struct inode *inode, u64 start, u64 end)
1713 goto out; 1721 goto out;
1714 } 1722 }
1715 1723
1716 lock_extent(io_tree, ordered_extent->file_offset, 1724 lock_extent_bits(io_tree, ordered_extent->file_offset,
1717 ordered_extent->file_offset + ordered_extent->len - 1, 1725 ordered_extent->file_offset + ordered_extent->len - 1,
1718 GFP_NOFS); 1726 0, &cached_state, GFP_NOFS);
1719 1727
1720 trans = btrfs_join_transaction(root, 1); 1728 trans = btrfs_join_transaction(root, 1);
1721 1729
@@ -1742,9 +1750,10 @@ static int btrfs_finish_ordered_io(struct inode *inode, u64 start, u64 end)
1742 ordered_extent->len); 1750 ordered_extent->len);
1743 BUG_ON(ret); 1751 BUG_ON(ret);
1744 } 1752 }
1745 unlock_extent(io_tree, ordered_extent->file_offset, 1753 unlock_extent_cached(io_tree, ordered_extent->file_offset,
1746 ordered_extent->file_offset + ordered_extent->len - 1, 1754 ordered_extent->file_offset +
1747 GFP_NOFS); 1755 ordered_extent->len - 1, &cached_state, GFP_NOFS);
1756
1748 add_pending_csums(trans, inode, ordered_extent->file_offset, 1757 add_pending_csums(trans, inode, ordered_extent->file_offset,
1749 &ordered_extent->list); 1758 &ordered_extent->list);
1750 1759
@@ -2153,7 +2162,7 @@ void btrfs_orphan_cleanup(struct btrfs_root *root)
2153 found_key.objectid = found_key.offset; 2162 found_key.objectid = found_key.offset;
2154 found_key.type = BTRFS_INODE_ITEM_KEY; 2163 found_key.type = BTRFS_INODE_ITEM_KEY;
2155 found_key.offset = 0; 2164 found_key.offset = 0;
2156 inode = btrfs_iget(root->fs_info->sb, &found_key, root); 2165 inode = btrfs_iget(root->fs_info->sb, &found_key, root, NULL);
2157 if (IS_ERR(inode)) 2166 if (IS_ERR(inode))
2158 break; 2167 break;
2159 2168
@@ -3081,6 +3090,7 @@ static int btrfs_truncate_page(struct address_space *mapping, loff_t from)
3081 struct btrfs_root *root = BTRFS_I(inode)->root; 3090 struct btrfs_root *root = BTRFS_I(inode)->root;
3082 struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree; 3091 struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
3083 struct btrfs_ordered_extent *ordered; 3092 struct btrfs_ordered_extent *ordered;
3093 struct extent_state *cached_state = NULL;
3084 char *kaddr; 3094 char *kaddr;
3085 u32 blocksize = root->sectorsize; 3095 u32 blocksize = root->sectorsize;
3086 pgoff_t index = from >> PAGE_CACHE_SHIFT; 3096 pgoff_t index = from >> PAGE_CACHE_SHIFT;
@@ -3127,12 +3137,14 @@ again:
3127 } 3137 }
3128 wait_on_page_writeback(page); 3138 wait_on_page_writeback(page);
3129 3139
3130 lock_extent(io_tree, page_start, page_end, GFP_NOFS); 3140 lock_extent_bits(io_tree, page_start, page_end, 0, &cached_state,
3141 GFP_NOFS);
3131 set_page_extent_mapped(page); 3142 set_page_extent_mapped(page);
3132 3143
3133 ordered = btrfs_lookup_ordered_extent(inode, page_start); 3144 ordered = btrfs_lookup_ordered_extent(inode, page_start);
3134 if (ordered) { 3145 if (ordered) {
3135 unlock_extent(io_tree, page_start, page_end, GFP_NOFS); 3146 unlock_extent_cached(io_tree, page_start, page_end,
3147 &cached_state, GFP_NOFS);
3136 unlock_page(page); 3148 unlock_page(page);
3137 page_cache_release(page); 3149 page_cache_release(page);
3138 btrfs_start_ordered_extent(inode, ordered, 1); 3150 btrfs_start_ordered_extent(inode, ordered, 1);
@@ -3140,13 +3152,15 @@ again:
3140 goto again; 3152 goto again;
3141 } 3153 }
3142 3154
3143 clear_extent_bits(&BTRFS_I(inode)->io_tree, page_start, page_end, 3155 clear_extent_bit(&BTRFS_I(inode)->io_tree, page_start, page_end,
3144 EXTENT_DIRTY | EXTENT_DELALLOC | EXTENT_DO_ACCOUNTING, 3156 EXTENT_DIRTY | EXTENT_DELALLOC | EXTENT_DO_ACCOUNTING,
3145 GFP_NOFS); 3157 0, 0, &cached_state, GFP_NOFS);
3146 3158
3147 ret = btrfs_set_extent_delalloc(inode, page_start, page_end); 3159 ret = btrfs_set_extent_delalloc(inode, page_start, page_end,
3160 &cached_state);
3148 if (ret) { 3161 if (ret) {
3149 unlock_extent(io_tree, page_start, page_end, GFP_NOFS); 3162 unlock_extent_cached(io_tree, page_start, page_end,
3163 &cached_state, GFP_NOFS);
3150 goto out_unlock; 3164 goto out_unlock;
3151 } 3165 }
3152 3166
@@ -3159,7 +3173,8 @@ again:
3159 } 3173 }
3160 ClearPageChecked(page); 3174 ClearPageChecked(page);
3161 set_page_dirty(page); 3175 set_page_dirty(page);
3162 unlock_extent(io_tree, page_start, page_end, GFP_NOFS); 3176 unlock_extent_cached(io_tree, page_start, page_end, &cached_state,
3177 GFP_NOFS);
3163 3178
3164out_unlock: 3179out_unlock:
3165 if (ret) 3180 if (ret)
@@ -3177,6 +3192,7 @@ int btrfs_cont_expand(struct inode *inode, loff_t size)
3177 struct btrfs_root *root = BTRFS_I(inode)->root; 3192 struct btrfs_root *root = BTRFS_I(inode)->root;
3178 struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree; 3193 struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
3179 struct extent_map *em; 3194 struct extent_map *em;
3195 struct extent_state *cached_state = NULL;
3180 u64 mask = root->sectorsize - 1; 3196 u64 mask = root->sectorsize - 1;
3181 u64 hole_start = (inode->i_size + mask) & ~mask; 3197 u64 hole_start = (inode->i_size + mask) & ~mask;
3182 u64 block_end = (size + mask) & ~mask; 3198 u64 block_end = (size + mask) & ~mask;
@@ -3192,11 +3208,13 @@ int btrfs_cont_expand(struct inode *inode, loff_t size)
3192 struct btrfs_ordered_extent *ordered; 3208 struct btrfs_ordered_extent *ordered;
3193 btrfs_wait_ordered_range(inode, hole_start, 3209 btrfs_wait_ordered_range(inode, hole_start,
3194 block_end - hole_start); 3210 block_end - hole_start);
3195 lock_extent(io_tree, hole_start, block_end - 1, GFP_NOFS); 3211 lock_extent_bits(io_tree, hole_start, block_end - 1, 0,
3212 &cached_state, GFP_NOFS);
3196 ordered = btrfs_lookup_ordered_extent(inode, hole_start); 3213 ordered = btrfs_lookup_ordered_extent(inode, hole_start);
3197 if (!ordered) 3214 if (!ordered)
3198 break; 3215 break;
3199 unlock_extent(io_tree, hole_start, block_end - 1, GFP_NOFS); 3216 unlock_extent_cached(io_tree, hole_start, block_end - 1,
3217 &cached_state, GFP_NOFS);
3200 btrfs_put_ordered_extent(ordered); 3218 btrfs_put_ordered_extent(ordered);
3201 } 3219 }
3202 3220
@@ -3241,7 +3259,8 @@ int btrfs_cont_expand(struct inode *inode, loff_t size)
3241 break; 3259 break;
3242 } 3260 }
3243 3261
3244 unlock_extent(io_tree, hole_start, block_end - 1, GFP_NOFS); 3262 unlock_extent_cached(io_tree, hole_start, block_end - 1, &cached_state,
3263 GFP_NOFS);
3245 return err; 3264 return err;
3246} 3265}
3247 3266
@@ -3639,6 +3658,7 @@ static noinline void init_btrfs_i(struct inode *inode)
3639 bi->index_cnt = (u64)-1; 3658 bi->index_cnt = (u64)-1;
3640 bi->last_unlink_trans = 0; 3659 bi->last_unlink_trans = 0;
3641 bi->ordered_data_close = 0; 3660 bi->ordered_data_close = 0;
3661 bi->force_compress = 0;
3642 extent_map_tree_init(&BTRFS_I(inode)->extent_tree, GFP_NOFS); 3662 extent_map_tree_init(&BTRFS_I(inode)->extent_tree, GFP_NOFS);
3643 extent_io_tree_init(&BTRFS_I(inode)->io_tree, 3663 extent_io_tree_init(&BTRFS_I(inode)->io_tree,
3644 inode->i_mapping, GFP_NOFS); 3664 inode->i_mapping, GFP_NOFS);
@@ -3687,7 +3707,7 @@ static struct inode *btrfs_iget_locked(struct super_block *s,
3687 * Returns in *is_new if the inode was read from disk 3707 * Returns in *is_new if the inode was read from disk
3688 */ 3708 */
3689struct inode *btrfs_iget(struct super_block *s, struct btrfs_key *location, 3709struct inode *btrfs_iget(struct super_block *s, struct btrfs_key *location,
3690 struct btrfs_root *root) 3710 struct btrfs_root *root, int *new)
3691{ 3711{
3692 struct inode *inode; 3712 struct inode *inode;
3693 3713
@@ -3702,6 +3722,8 @@ struct inode *btrfs_iget(struct super_block *s, struct btrfs_key *location,
3702 3722
3703 inode_tree_add(inode); 3723 inode_tree_add(inode);
3704 unlock_new_inode(inode); 3724 unlock_new_inode(inode);
3725 if (new)
3726 *new = 1;
3705 } 3727 }
3706 3728
3707 return inode; 3729 return inode;
@@ -3754,7 +3776,7 @@ struct inode *btrfs_lookup_dentry(struct inode *dir, struct dentry *dentry)
3754 return NULL; 3776 return NULL;
3755 3777
3756 if (location.type == BTRFS_INODE_ITEM_KEY) { 3778 if (location.type == BTRFS_INODE_ITEM_KEY) {
3757 inode = btrfs_iget(dir->i_sb, &location, root); 3779 inode = btrfs_iget(dir->i_sb, &location, root, NULL);
3758 return inode; 3780 return inode;
3759 } 3781 }
3760 3782
@@ -3769,7 +3791,7 @@ struct inode *btrfs_lookup_dentry(struct inode *dir, struct dentry *dentry)
3769 else 3791 else
3770 inode = new_simple_dir(dir->i_sb, &location, sub_root); 3792 inode = new_simple_dir(dir->i_sb, &location, sub_root);
3771 } else { 3793 } else {
3772 inode = btrfs_iget(dir->i_sb, &location, sub_root); 3794 inode = btrfs_iget(dir->i_sb, &location, sub_root, NULL);
3773 } 3795 }
3774 srcu_read_unlock(&root->fs_info->subvol_srcu, index); 3796 srcu_read_unlock(&root->fs_info->subvol_srcu, index);
3775 3797
@@ -4501,7 +4523,7 @@ static int btrfs_mkdir(struct inode *dir, struct dentry *dentry, int mode)
4501 err = btrfs_find_free_objectid(trans, root, dir->i_ino, &objectid); 4523 err = btrfs_find_free_objectid(trans, root, dir->i_ino, &objectid);
4502 if (err) { 4524 if (err) {
4503 err = -ENOSPC; 4525 err = -ENOSPC;
4504 goto out_unlock; 4526 goto out_fail;
4505 } 4527 }
4506 4528
4507 inode = btrfs_new_inode(trans, root, dir, dentry->d_name.name, 4529 inode = btrfs_new_inode(trans, root, dir, dentry->d_name.name,
@@ -4979,6 +5001,7 @@ static void btrfs_invalidatepage(struct page *page, unsigned long offset)
4979{ 5001{
4980 struct extent_io_tree *tree; 5002 struct extent_io_tree *tree;
4981 struct btrfs_ordered_extent *ordered; 5003 struct btrfs_ordered_extent *ordered;
5004 struct extent_state *cached_state = NULL;
4982 u64 page_start = page_offset(page); 5005 u64 page_start = page_offset(page);
4983 u64 page_end = page_start + PAGE_CACHE_SIZE - 1; 5006 u64 page_end = page_start + PAGE_CACHE_SIZE - 1;
4984 5007
@@ -4997,7 +5020,8 @@ static void btrfs_invalidatepage(struct page *page, unsigned long offset)
4997 btrfs_releasepage(page, GFP_NOFS); 5020 btrfs_releasepage(page, GFP_NOFS);
4998 return; 5021 return;
4999 } 5022 }
5000 lock_extent(tree, page_start, page_end, GFP_NOFS); 5023 lock_extent_bits(tree, page_start, page_end, 0, &cached_state,
5024 GFP_NOFS);
5001 ordered = btrfs_lookup_ordered_extent(page->mapping->host, 5025 ordered = btrfs_lookup_ordered_extent(page->mapping->host,
5002 page_offset(page)); 5026 page_offset(page));
5003 if (ordered) { 5027 if (ordered) {
@@ -5008,7 +5032,7 @@ static void btrfs_invalidatepage(struct page *page, unsigned long offset)
5008 clear_extent_bit(tree, page_start, page_end, 5032 clear_extent_bit(tree, page_start, page_end,
5009 EXTENT_DIRTY | EXTENT_DELALLOC | 5033 EXTENT_DIRTY | EXTENT_DELALLOC |
5010 EXTENT_LOCKED | EXTENT_DO_ACCOUNTING, 1, 0, 5034 EXTENT_LOCKED | EXTENT_DO_ACCOUNTING, 1, 0,
5011 NULL, GFP_NOFS); 5035 &cached_state, GFP_NOFS);
5012 /* 5036 /*
5013 * whoever cleared the private bit is responsible 5037 * whoever cleared the private bit is responsible
5014 * for the finish_ordered_io 5038 * for the finish_ordered_io
@@ -5018,11 +5042,13 @@ static void btrfs_invalidatepage(struct page *page, unsigned long offset)
5018 page_start, page_end); 5042 page_start, page_end);
5019 } 5043 }
5020 btrfs_put_ordered_extent(ordered); 5044 btrfs_put_ordered_extent(ordered);
5021 lock_extent(tree, page_start, page_end, GFP_NOFS); 5045 cached_state = NULL;
5046 lock_extent_bits(tree, page_start, page_end, 0, &cached_state,
5047 GFP_NOFS);
5022 } 5048 }
5023 clear_extent_bit(tree, page_start, page_end, 5049 clear_extent_bit(tree, page_start, page_end,
5024 EXTENT_LOCKED | EXTENT_DIRTY | EXTENT_DELALLOC | 5050 EXTENT_LOCKED | EXTENT_DIRTY | EXTENT_DELALLOC |
5025 EXTENT_DO_ACCOUNTING, 1, 1, NULL, GFP_NOFS); 5051 EXTENT_DO_ACCOUNTING, 1, 1, &cached_state, GFP_NOFS);
5026 __btrfs_releasepage(page, GFP_NOFS); 5052 __btrfs_releasepage(page, GFP_NOFS);
5027 5053
5028 ClearPageChecked(page); 5054 ClearPageChecked(page);
@@ -5055,6 +5081,7 @@ int btrfs_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf)
5055 struct btrfs_root *root = BTRFS_I(inode)->root; 5081 struct btrfs_root *root = BTRFS_I(inode)->root;
5056 struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree; 5082 struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
5057 struct btrfs_ordered_extent *ordered; 5083 struct btrfs_ordered_extent *ordered;
5084 struct extent_state *cached_state = NULL;
5058 char *kaddr; 5085 char *kaddr;
5059 unsigned long zero_start; 5086 unsigned long zero_start;
5060 loff_t size; 5087 loff_t size;
@@ -5093,7 +5120,8 @@ again:
5093 } 5120 }
5094 wait_on_page_writeback(page); 5121 wait_on_page_writeback(page);
5095 5122
5096 lock_extent(io_tree, page_start, page_end, GFP_NOFS); 5123 lock_extent_bits(io_tree, page_start, page_end, 0, &cached_state,
5124 GFP_NOFS);
5097 set_page_extent_mapped(page); 5125 set_page_extent_mapped(page);
5098 5126
5099 /* 5127 /*
@@ -5102,7 +5130,8 @@ again:
5102 */ 5130 */
5103 ordered = btrfs_lookup_ordered_extent(inode, page_start); 5131 ordered = btrfs_lookup_ordered_extent(inode, page_start);
5104 if (ordered) { 5132 if (ordered) {
5105 unlock_extent(io_tree, page_start, page_end, GFP_NOFS); 5133 unlock_extent_cached(io_tree, page_start, page_end,
5134 &cached_state, GFP_NOFS);
5106 unlock_page(page); 5135 unlock_page(page);
5107 btrfs_start_ordered_extent(inode, ordered, 1); 5136 btrfs_start_ordered_extent(inode, ordered, 1);
5108 btrfs_put_ordered_extent(ordered); 5137 btrfs_put_ordered_extent(ordered);
@@ -5116,13 +5145,15 @@ again:
5116 * is probably a better way to do this, but for now keep consistent with 5145 * is probably a better way to do this, but for now keep consistent with
5117 * prepare_pages in the normal write path. 5146 * prepare_pages in the normal write path.
5118 */ 5147 */
5119 clear_extent_bits(&BTRFS_I(inode)->io_tree, page_start, page_end, 5148 clear_extent_bit(&BTRFS_I(inode)->io_tree, page_start, page_end,
5120 EXTENT_DIRTY | EXTENT_DELALLOC | EXTENT_DO_ACCOUNTING, 5149 EXTENT_DIRTY | EXTENT_DELALLOC | EXTENT_DO_ACCOUNTING,
5121 GFP_NOFS); 5150 0, 0, &cached_state, GFP_NOFS);
5122 5151
5123 ret = btrfs_set_extent_delalloc(inode, page_start, page_end); 5152 ret = btrfs_set_extent_delalloc(inode, page_start, page_end,
5153 &cached_state);
5124 if (ret) { 5154 if (ret) {
5125 unlock_extent(io_tree, page_start, page_end, GFP_NOFS); 5155 unlock_extent_cached(io_tree, page_start, page_end,
5156 &cached_state, GFP_NOFS);
5126 ret = VM_FAULT_SIGBUS; 5157 ret = VM_FAULT_SIGBUS;
5127 btrfs_free_reserved_data_space(root, inode, PAGE_CACHE_SIZE); 5158 btrfs_free_reserved_data_space(root, inode, PAGE_CACHE_SIZE);
5128 goto out_unlock; 5159 goto out_unlock;
@@ -5148,7 +5179,7 @@ again:
5148 BTRFS_I(inode)->last_trans = root->fs_info->generation; 5179 BTRFS_I(inode)->last_trans = root->fs_info->generation;
5149 BTRFS_I(inode)->last_sub_trans = BTRFS_I(inode)->root->log_transid; 5180 BTRFS_I(inode)->last_sub_trans = BTRFS_I(inode)->root->log_transid;
5150 5181
5151 unlock_extent(io_tree, page_start, page_end, GFP_NOFS); 5182 unlock_extent_cached(io_tree, page_start, page_end, &cached_state, GFP_NOFS);
5152 5183
5153out_unlock: 5184out_unlock:
5154 btrfs_unreserve_metadata_for_delalloc(root, inode, 1); 5185 btrfs_unreserve_metadata_for_delalloc(root, inode, 1);
@@ -5827,6 +5858,7 @@ stop_trans:
5827static long btrfs_fallocate(struct inode *inode, int mode, 5858static long btrfs_fallocate(struct inode *inode, int mode,
5828 loff_t offset, loff_t len) 5859 loff_t offset, loff_t len)
5829{ 5860{
5861 struct extent_state *cached_state = NULL;
5830 u64 cur_offset; 5862 u64 cur_offset;
5831 u64 last_byte; 5863 u64 last_byte;
5832 u64 alloc_start; 5864 u64 alloc_start;
@@ -5865,16 +5897,17 @@ static long btrfs_fallocate(struct inode *inode, int mode,
5865 /* the extent lock is ordered inside the running 5897 /* the extent lock is ordered inside the running
5866 * transaction 5898 * transaction
5867 */ 5899 */
5868 lock_extent(&BTRFS_I(inode)->io_tree, alloc_start, locked_end, 5900 lock_extent_bits(&BTRFS_I(inode)->io_tree, alloc_start,
5869 GFP_NOFS); 5901 locked_end, 0, &cached_state, GFP_NOFS);
5870 ordered = btrfs_lookup_first_ordered_extent(inode, 5902 ordered = btrfs_lookup_first_ordered_extent(inode,
5871 alloc_end - 1); 5903 alloc_end - 1);
5872 if (ordered && 5904 if (ordered &&
5873 ordered->file_offset + ordered->len > alloc_start && 5905 ordered->file_offset + ordered->len > alloc_start &&
5874 ordered->file_offset < alloc_end) { 5906 ordered->file_offset < alloc_end) {
5875 btrfs_put_ordered_extent(ordered); 5907 btrfs_put_ordered_extent(ordered);
5876 unlock_extent(&BTRFS_I(inode)->io_tree, 5908 unlock_extent_cached(&BTRFS_I(inode)->io_tree,
5877 alloc_start, locked_end, GFP_NOFS); 5909 alloc_start, locked_end,
5910 &cached_state, GFP_NOFS);
5878 /* 5911 /*
5879 * we can't wait on the range with the transaction 5912 * we can't wait on the range with the transaction
5880 * running or with the extent lock held 5913 * running or with the extent lock held
@@ -5916,8 +5949,8 @@ static long btrfs_fallocate(struct inode *inode, int mode,
5916 break; 5949 break;
5917 } 5950 }
5918 } 5951 }
5919 unlock_extent(&BTRFS_I(inode)->io_tree, alloc_start, locked_end, 5952 unlock_extent_cached(&BTRFS_I(inode)->io_tree, alloc_start, locked_end,
5920 GFP_NOFS); 5953 &cached_state, GFP_NOFS);
5921 5954
5922 btrfs_free_reserved_data_space(BTRFS_I(inode)->root, inode, 5955 btrfs_free_reserved_data_space(BTRFS_I(inode)->root, inode,
5923 alloc_end - alloc_start); 5956 alloc_end - alloc_start);
diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c
index 645a17927a8f..2845c6ceecd2 100644
--- a/fs/btrfs/ioctl.c
+++ b/fs/btrfs/ioctl.c
@@ -48,6 +48,7 @@
48#include "print-tree.h" 48#include "print-tree.h"
49#include "volumes.h" 49#include "volumes.h"
50#include "locking.h" 50#include "locking.h"
51#include "ctree.h"
51 52
52/* Mask out flags that are inappropriate for the given type of inode. */ 53/* Mask out flags that are inappropriate for the given type of inode. */
53static inline __u32 btrfs_mask_flags(umode_t mode, __u32 flags) 54static inline __u32 btrfs_mask_flags(umode_t mode, __u32 flags)
@@ -474,7 +475,79 @@ out_unlock:
474 return error; 475 return error;
475} 476}
476 477
477static int btrfs_defrag_file(struct file *file) 478static int should_defrag_range(struct inode *inode, u64 start, u64 len,
479 int thresh, u64 *last_len, u64 *skip,
480 u64 *defrag_end)
481{
482 struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
483 struct extent_map *em = NULL;
484 struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree;
485 int ret = 1;
486
487
488 if (thresh == 0)
489 thresh = 256 * 1024;
490
491 /*
492 * make sure that once we start defragging and extent, we keep on
493 * defragging it
494 */
495 if (start < *defrag_end)
496 return 1;
497
498 *skip = 0;
499
500 /*
501 * hopefully we have this extent in the tree already, try without
502 * the full extent lock
503 */
504 read_lock(&em_tree->lock);
505 em = lookup_extent_mapping(em_tree, start, len);
506 read_unlock(&em_tree->lock);
507
508 if (!em) {
509 /* get the big lock and read metadata off disk */
510 lock_extent(io_tree, start, start + len - 1, GFP_NOFS);
511 em = btrfs_get_extent(inode, NULL, 0, start, len, 0);
512 unlock_extent(io_tree, start, start + len - 1, GFP_NOFS);
513
514 if (!em)
515 return 0;
516 }
517
518 /* this will cover holes, and inline extents */
519 if (em->block_start >= EXTENT_MAP_LAST_BYTE)
520 ret = 0;
521
522 /*
523 * we hit a real extent, if it is big don't bother defragging it again
524 */
525 if ((*last_len == 0 || *last_len >= thresh) && em->len >= thresh)
526 ret = 0;
527
528 /*
529 * last_len ends up being a counter of how many bytes we've defragged.
530 * every time we choose not to defrag an extent, we reset *last_len
531 * so that the next tiny extent will force a defrag.
532 *
533 * The end result of this is that tiny extents before a single big
534 * extent will force at least part of that big extent to be defragged.
535 */
536 if (ret) {
537 *last_len += len;
538 *defrag_end = extent_map_end(em);
539 } else {
540 *last_len = 0;
541 *skip = extent_map_end(em);
542 *defrag_end = 0;
543 }
544
545 free_extent_map(em);
546 return ret;
547}
548
549static int btrfs_defrag_file(struct file *file,
550 struct btrfs_ioctl_defrag_range_args *range)
478{ 551{
479 struct inode *inode = fdentry(file)->d_inode; 552 struct inode *inode = fdentry(file)->d_inode;
480 struct btrfs_root *root = BTRFS_I(inode)->root; 553 struct btrfs_root *root = BTRFS_I(inode)->root;
@@ -486,37 +559,96 @@ static int btrfs_defrag_file(struct file *file)
486 unsigned long total_read = 0; 559 unsigned long total_read = 0;
487 u64 page_start; 560 u64 page_start;
488 u64 page_end; 561 u64 page_end;
562 u64 last_len = 0;
563 u64 skip = 0;
564 u64 defrag_end = 0;
489 unsigned long i; 565 unsigned long i;
490 int ret; 566 int ret;
491 567
492 ret = btrfs_check_data_free_space(root, inode, inode->i_size); 568 if (inode->i_size == 0)
493 if (ret) 569 return 0;
494 return -ENOSPC; 570
571 if (range->start + range->len > range->start) {
572 last_index = min_t(u64, inode->i_size - 1,
573 range->start + range->len - 1) >> PAGE_CACHE_SHIFT;
574 } else {
575 last_index = (inode->i_size - 1) >> PAGE_CACHE_SHIFT;
576 }
577
578 i = range->start >> PAGE_CACHE_SHIFT;
579 while (i <= last_index) {
580 if (!should_defrag_range(inode, (u64)i << PAGE_CACHE_SHIFT,
581 PAGE_CACHE_SIZE,
582 range->extent_thresh,
583 &last_len, &skip,
584 &defrag_end)) {
585 unsigned long next;
586 /*
587 * the should_defrag function tells us how much to skip
588 * bump our counter by the suggested amount
589 */
590 next = (skip + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
591 i = max(i + 1, next);
592 continue;
593 }
495 594
496 mutex_lock(&inode->i_mutex);
497 last_index = inode->i_size >> PAGE_CACHE_SHIFT;
498 for (i = 0; i <= last_index; i++) {
499 if (total_read % ra_pages == 0) { 595 if (total_read % ra_pages == 0) {
500 btrfs_force_ra(inode->i_mapping, &file->f_ra, file, i, 596 btrfs_force_ra(inode->i_mapping, &file->f_ra, file, i,
501 min(last_index, i + ra_pages - 1)); 597 min(last_index, i + ra_pages - 1));
502 } 598 }
503 total_read++; 599 total_read++;
600 mutex_lock(&inode->i_mutex);
601 if (range->flags & BTRFS_DEFRAG_RANGE_COMPRESS)
602 BTRFS_I(inode)->force_compress = 1;
603
604 ret = btrfs_check_data_free_space(root, inode, PAGE_CACHE_SIZE);
605 if (ret) {
606 ret = -ENOSPC;
607 break;
608 }
609
610 ret = btrfs_reserve_metadata_for_delalloc(root, inode, 1);
611 if (ret) {
612 btrfs_free_reserved_data_space(root, inode,
613 PAGE_CACHE_SIZE);
614 ret = -ENOSPC;
615 break;
616 }
504again: 617again:
618 if (inode->i_size == 0 ||
619 i > ((inode->i_size - 1) >> PAGE_CACHE_SHIFT)) {
620 ret = 0;
621 goto err_reservations;
622 }
623
505 page = grab_cache_page(inode->i_mapping, i); 624 page = grab_cache_page(inode->i_mapping, i);
506 if (!page) 625 if (!page)
507 goto out_unlock; 626 goto err_reservations;
627
508 if (!PageUptodate(page)) { 628 if (!PageUptodate(page)) {
509 btrfs_readpage(NULL, page); 629 btrfs_readpage(NULL, page);
510 lock_page(page); 630 lock_page(page);
511 if (!PageUptodate(page)) { 631 if (!PageUptodate(page)) {
512 unlock_page(page); 632 unlock_page(page);
513 page_cache_release(page); 633 page_cache_release(page);
514 goto out_unlock; 634 goto err_reservations;
515 } 635 }
516 } 636 }
517 637
638 if (page->mapping != inode->i_mapping) {
639 unlock_page(page);
640 page_cache_release(page);
641 goto again;
642 }
643
518 wait_on_page_writeback(page); 644 wait_on_page_writeback(page);
519 645
646 if (PageDirty(page)) {
647 btrfs_free_reserved_data_space(root, inode,
648 PAGE_CACHE_SIZE);
649 goto loop_unlock;
650 }
651
520 page_start = (u64)page->index << PAGE_CACHE_SHIFT; 652 page_start = (u64)page->index << PAGE_CACHE_SHIFT;
521 page_end = page_start + PAGE_CACHE_SIZE - 1; 653 page_end = page_start + PAGE_CACHE_SIZE - 1;
522 lock_extent(io_tree, page_start, page_end, GFP_NOFS); 654 lock_extent(io_tree, page_start, page_end, GFP_NOFS);
@@ -537,18 +669,54 @@ again:
537 * page if it is dirtied again later 669 * page if it is dirtied again later
538 */ 670 */
539 clear_page_dirty_for_io(page); 671 clear_page_dirty_for_io(page);
672 clear_extent_bits(&BTRFS_I(inode)->io_tree, page_start,
673 page_end, EXTENT_DIRTY | EXTENT_DELALLOC |
674 EXTENT_DO_ACCOUNTING, GFP_NOFS);
540 675
541 btrfs_set_extent_delalloc(inode, page_start, page_end); 676 btrfs_set_extent_delalloc(inode, page_start, page_end, NULL);
677 ClearPageChecked(page);
542 set_page_dirty(page); 678 set_page_dirty(page);
543 unlock_extent(io_tree, page_start, page_end, GFP_NOFS); 679 unlock_extent(io_tree, page_start, page_end, GFP_NOFS);
680
681loop_unlock:
544 unlock_page(page); 682 unlock_page(page);
545 page_cache_release(page); 683 page_cache_release(page);
684 mutex_unlock(&inode->i_mutex);
685
686 btrfs_unreserve_metadata_for_delalloc(root, inode, 1);
546 balance_dirty_pages_ratelimited_nr(inode->i_mapping, 1); 687 balance_dirty_pages_ratelimited_nr(inode->i_mapping, 1);
688 i++;
689 }
690
691 if ((range->flags & BTRFS_DEFRAG_RANGE_START_IO))
692 filemap_flush(inode->i_mapping);
693
694 if ((range->flags & BTRFS_DEFRAG_RANGE_COMPRESS)) {
695 /* the filemap_flush will queue IO into the worker threads, but
696 * we have to make sure the IO is actually started and that
697 * ordered extents get created before we return
698 */
699 atomic_inc(&root->fs_info->async_submit_draining);
700 while (atomic_read(&root->fs_info->nr_async_submits) ||
701 atomic_read(&root->fs_info->async_delalloc_pages)) {
702 wait_event(root->fs_info->async_submit_wait,
703 (atomic_read(&root->fs_info->nr_async_submits) == 0 &&
704 atomic_read(&root->fs_info->async_delalloc_pages) == 0));
705 }
706 atomic_dec(&root->fs_info->async_submit_draining);
707
708 mutex_lock(&inode->i_mutex);
709 BTRFS_I(inode)->force_compress = 0;
710 mutex_unlock(&inode->i_mutex);
547 } 711 }
548 712
549out_unlock:
550 mutex_unlock(&inode->i_mutex);
551 return 0; 713 return 0;
714
715err_reservations:
716 mutex_unlock(&inode->i_mutex);
717 btrfs_free_reserved_data_space(root, inode, PAGE_CACHE_SIZE);
718 btrfs_unreserve_metadata_for_delalloc(root, inode, 1);
719 return ret;
552} 720}
553 721
554static noinline int btrfs_ioctl_resize(struct btrfs_root *root, 722static noinline int btrfs_ioctl_resize(struct btrfs_root *root,
@@ -608,7 +776,7 @@ static noinline int btrfs_ioctl_resize(struct btrfs_root *root,
608 mod = 1; 776 mod = 1;
609 sizestr++; 777 sizestr++;
610 } 778 }
611 new_size = btrfs_parse_size(sizestr); 779 new_size = memparse(sizestr, NULL);
612 if (new_size == 0) { 780 if (new_size == 0) {
613 ret = -EINVAL; 781 ret = -EINVAL;
614 goto out_unlock; 782 goto out_unlock;
@@ -743,6 +911,327 @@ out:
743 return ret; 911 return ret;
744} 912}
745 913
914static noinline int key_in_sk(struct btrfs_key *key,
915 struct btrfs_ioctl_search_key *sk)
916{
917 struct btrfs_key test;
918 int ret;
919
920 test.objectid = sk->min_objectid;
921 test.type = sk->min_type;
922 test.offset = sk->min_offset;
923
924 ret = btrfs_comp_cpu_keys(key, &test);
925 if (ret < 0)
926 return 0;
927
928 test.objectid = sk->max_objectid;
929 test.type = sk->max_type;
930 test.offset = sk->max_offset;
931
932 ret = btrfs_comp_cpu_keys(key, &test);
933 if (ret > 0)
934 return 0;
935 return 1;
936}
937
938static noinline int copy_to_sk(struct btrfs_root *root,
939 struct btrfs_path *path,
940 struct btrfs_key *key,
941 struct btrfs_ioctl_search_key *sk,
942 char *buf,
943 unsigned long *sk_offset,
944 int *num_found)
945{
946 u64 found_transid;
947 struct extent_buffer *leaf;
948 struct btrfs_ioctl_search_header sh;
949 unsigned long item_off;
950 unsigned long item_len;
951 int nritems;
952 int i;
953 int slot;
954 int found = 0;
955 int ret = 0;
956
957 leaf = path->nodes[0];
958 slot = path->slots[0];
959 nritems = btrfs_header_nritems(leaf);
960
961 if (btrfs_header_generation(leaf) > sk->max_transid) {
962 i = nritems;
963 goto advance_key;
964 }
965 found_transid = btrfs_header_generation(leaf);
966
967 for (i = slot; i < nritems; i++) {
968 item_off = btrfs_item_ptr_offset(leaf, i);
969 item_len = btrfs_item_size_nr(leaf, i);
970
971 if (item_len > BTRFS_SEARCH_ARGS_BUFSIZE)
972 item_len = 0;
973
974 if (sizeof(sh) + item_len + *sk_offset >
975 BTRFS_SEARCH_ARGS_BUFSIZE) {
976 ret = 1;
977 goto overflow;
978 }
979
980 btrfs_item_key_to_cpu(leaf, key, i);
981 if (!key_in_sk(key, sk))
982 continue;
983
984 sh.objectid = key->objectid;
985 sh.offset = key->offset;
986 sh.type = key->type;
987 sh.len = item_len;
988 sh.transid = found_transid;
989
990 /* copy search result header */
991 memcpy(buf + *sk_offset, &sh, sizeof(sh));
992 *sk_offset += sizeof(sh);
993
994 if (item_len) {
995 char *p = buf + *sk_offset;
996 /* copy the item */
997 read_extent_buffer(leaf, p,
998 item_off, item_len);
999 *sk_offset += item_len;
1000 }
1001 found++;
1002
1003 if (*num_found >= sk->nr_items)
1004 break;
1005 }
1006advance_key:
1007 ret = 0;
1008 if (key->offset < (u64)-1 && key->offset < sk->max_offset)
1009 key->offset++;
1010 else if (key->type < (u8)-1 && key->type < sk->max_type) {
1011 key->offset = 0;
1012 key->type++;
1013 } else if (key->objectid < (u64)-1 && key->objectid < sk->max_objectid) {
1014 key->offset = 0;
1015 key->type = 0;
1016 key->objectid++;
1017 } else
1018 ret = 1;
1019overflow:
1020 *num_found += found;
1021 return ret;
1022}
1023
1024static noinline int search_ioctl(struct inode *inode,
1025 struct btrfs_ioctl_search_args *args)
1026{
1027 struct btrfs_root *root;
1028 struct btrfs_key key;
1029 struct btrfs_key max_key;
1030 struct btrfs_path *path;
1031 struct btrfs_ioctl_search_key *sk = &args->key;
1032 struct btrfs_fs_info *info = BTRFS_I(inode)->root->fs_info;
1033 int ret;
1034 int num_found = 0;
1035 unsigned long sk_offset = 0;
1036
1037 path = btrfs_alloc_path();
1038 if (!path)
1039 return -ENOMEM;
1040
1041 if (sk->tree_id == 0) {
1042 /* search the root of the inode that was passed */
1043 root = BTRFS_I(inode)->root;
1044 } else {
1045 key.objectid = sk->tree_id;
1046 key.type = BTRFS_ROOT_ITEM_KEY;
1047 key.offset = (u64)-1;
1048 root = btrfs_read_fs_root_no_name(info, &key);
1049 if (IS_ERR(root)) {
1050 printk(KERN_ERR "could not find root %llu\n",
1051 sk->tree_id);
1052 btrfs_free_path(path);
1053 return -ENOENT;
1054 }
1055 }
1056
1057 key.objectid = sk->min_objectid;
1058 key.type = sk->min_type;
1059 key.offset = sk->min_offset;
1060
1061 max_key.objectid = sk->max_objectid;
1062 max_key.type = sk->max_type;
1063 max_key.offset = sk->max_offset;
1064
1065 path->keep_locks = 1;
1066
1067 while(1) {
1068 ret = btrfs_search_forward(root, &key, &max_key, path, 0,
1069 sk->min_transid);
1070 if (ret != 0) {
1071 if (ret > 0)
1072 ret = 0;
1073 goto err;
1074 }
1075 ret = copy_to_sk(root, path, &key, sk, args->buf,
1076 &sk_offset, &num_found);
1077 btrfs_release_path(root, path);
1078 if (ret || num_found >= sk->nr_items)
1079 break;
1080
1081 }
1082 ret = 0;
1083err:
1084 sk->nr_items = num_found;
1085 btrfs_free_path(path);
1086 return ret;
1087}
1088
1089static noinline int btrfs_ioctl_tree_search(struct file *file,
1090 void __user *argp)
1091{
1092 struct btrfs_ioctl_search_args *args;
1093 struct inode *inode;
1094 int ret;
1095
1096 if (!capable(CAP_SYS_ADMIN))
1097 return -EPERM;
1098
1099 args = kmalloc(sizeof(*args), GFP_KERNEL);
1100 if (!args)
1101 return -ENOMEM;
1102
1103 if (copy_from_user(args, argp, sizeof(*args))) {
1104 kfree(args);
1105 return -EFAULT;
1106 }
1107 inode = fdentry(file)->d_inode;
1108 ret = search_ioctl(inode, args);
1109 if (ret == 0 && copy_to_user(argp, args, sizeof(*args)))
1110 ret = -EFAULT;
1111 kfree(args);
1112 return ret;
1113}
1114
1115/*
1116 * Search INODE_REFs to identify path name of 'dirid' directory
1117 * in a 'tree_id' tree. and sets path name to 'name'.
1118 */
1119static noinline int btrfs_search_path_in_tree(struct btrfs_fs_info *info,
1120 u64 tree_id, u64 dirid, char *name)
1121{
1122 struct btrfs_root *root;
1123 struct btrfs_key key;
1124 char *ptr;
1125 int ret = -1;
1126 int slot;
1127 int len;
1128 int total_len = 0;
1129 struct btrfs_inode_ref *iref;
1130 struct extent_buffer *l;
1131 struct btrfs_path *path;
1132
1133 if (dirid == BTRFS_FIRST_FREE_OBJECTID) {
1134 name[0]='\0';
1135 return 0;
1136 }
1137
1138 path = btrfs_alloc_path();
1139 if (!path)
1140 return -ENOMEM;
1141
1142 ptr = &name[BTRFS_INO_LOOKUP_PATH_MAX];
1143
1144 key.objectid = tree_id;
1145 key.type = BTRFS_ROOT_ITEM_KEY;
1146 key.offset = (u64)-1;
1147 root = btrfs_read_fs_root_no_name(info, &key);
1148 if (IS_ERR(root)) {
1149 printk(KERN_ERR "could not find root %llu\n", tree_id);
1150 ret = -ENOENT;
1151 goto out;
1152 }
1153
1154 key.objectid = dirid;
1155 key.type = BTRFS_INODE_REF_KEY;
1156 key.offset = (u64)-1;
1157
1158 while(1) {
1159 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
1160 if (ret < 0)
1161 goto out;
1162
1163 l = path->nodes[0];
1164 slot = path->slots[0];
1165 if (ret > 0 && slot > 0)
1166 slot--;
1167 btrfs_item_key_to_cpu(l, &key, slot);
1168
1169 if (ret > 0 && (key.objectid != dirid ||
1170 key.type != BTRFS_INODE_REF_KEY)) {
1171 ret = -ENOENT;
1172 goto out;
1173 }
1174
1175 iref = btrfs_item_ptr(l, slot, struct btrfs_inode_ref);
1176 len = btrfs_inode_ref_name_len(l, iref);
1177 ptr -= len + 1;
1178 total_len += len + 1;
1179 if (ptr < name)
1180 goto out;
1181
1182 *(ptr + len) = '/';
1183 read_extent_buffer(l, ptr,(unsigned long)(iref + 1), len);
1184
1185 if (key.offset == BTRFS_FIRST_FREE_OBJECTID)
1186 break;
1187
1188 btrfs_release_path(root, path);
1189 key.objectid = key.offset;
1190 key.offset = (u64)-1;
1191 dirid = key.objectid;
1192
1193 }
1194 if (ptr < name)
1195 goto out;
1196 memcpy(name, ptr, total_len);
1197 name[total_len]='\0';
1198 ret = 0;
1199out:
1200 btrfs_free_path(path);
1201 return ret;
1202}
1203
1204static noinline int btrfs_ioctl_ino_lookup(struct file *file,
1205 void __user *argp)
1206{
1207 struct btrfs_ioctl_ino_lookup_args *args;
1208 struct inode *inode;
1209 int ret;
1210
1211 if (!capable(CAP_SYS_ADMIN))
1212 return -EPERM;
1213
1214 args = kmalloc(sizeof(*args), GFP_KERNEL);
1215 if (copy_from_user(args, argp, sizeof(*args))) {
1216 kfree(args);
1217 return -EFAULT;
1218 }
1219 inode = fdentry(file)->d_inode;
1220
1221 if (args->treeid == 0)
1222 args->treeid = BTRFS_I(inode)->root->root_key.objectid;
1223
1224 ret = btrfs_search_path_in_tree(BTRFS_I(inode)->root->fs_info,
1225 args->treeid, args->objectid,
1226 args->name);
1227
1228 if (ret == 0 && copy_to_user(argp, args, sizeof(*args)))
1229 ret = -EFAULT;
1230
1231 kfree(args);
1232 return ret;
1233}
1234
746static noinline int btrfs_ioctl_snap_destroy(struct file *file, 1235static noinline int btrfs_ioctl_snap_destroy(struct file *file,
747 void __user *arg) 1236 void __user *arg)
748{ 1237{
@@ -849,10 +1338,11 @@ out:
849 return err; 1338 return err;
850} 1339}
851 1340
852static int btrfs_ioctl_defrag(struct file *file) 1341static int btrfs_ioctl_defrag(struct file *file, void __user *argp)
853{ 1342{
854 struct inode *inode = fdentry(file)->d_inode; 1343 struct inode *inode = fdentry(file)->d_inode;
855 struct btrfs_root *root = BTRFS_I(inode)->root; 1344 struct btrfs_root *root = BTRFS_I(inode)->root;
1345 struct btrfs_ioctl_defrag_range_args *range;
856 int ret; 1346 int ret;
857 1347
858 ret = mnt_want_write(file->f_path.mnt); 1348 ret = mnt_want_write(file->f_path.mnt);
@@ -873,7 +1363,30 @@ static int btrfs_ioctl_defrag(struct file *file)
873 ret = -EINVAL; 1363 ret = -EINVAL;
874 goto out; 1364 goto out;
875 } 1365 }
876 btrfs_defrag_file(file); 1366
1367 range = kzalloc(sizeof(*range), GFP_KERNEL);
1368 if (!range) {
1369 ret = -ENOMEM;
1370 goto out;
1371 }
1372
1373 if (argp) {
1374 if (copy_from_user(range, argp,
1375 sizeof(*range))) {
1376 ret = -EFAULT;
1377 kfree(range);
1378 }
1379 /* compression requires us to start the IO */
1380 if ((range->flags & BTRFS_DEFRAG_RANGE_COMPRESS)) {
1381 range->flags |= BTRFS_DEFRAG_RANGE_START_IO;
1382 range->extent_thresh = (u32)-1;
1383 }
1384 } else {
1385 /* the rest are all set to zero by kzalloc */
1386 range->len = (u64)-1;
1387 }
1388 btrfs_defrag_file(file, range);
1389 kfree(range);
877 break; 1390 break;
878 } 1391 }
879out: 1392out:
@@ -1274,6 +1787,157 @@ out:
1274 return ret; 1787 return ret;
1275} 1788}
1276 1789
1790static long btrfs_ioctl_default_subvol(struct file *file, void __user *argp)
1791{
1792 struct inode *inode = fdentry(file)->d_inode;
1793 struct btrfs_root *root = BTRFS_I(inode)->root;
1794 struct btrfs_root *new_root;
1795 struct btrfs_dir_item *di;
1796 struct btrfs_trans_handle *trans;
1797 struct btrfs_path *path;
1798 struct btrfs_key location;
1799 struct btrfs_disk_key disk_key;
1800 struct btrfs_super_block *disk_super;
1801 u64 features;
1802 u64 objectid = 0;
1803 u64 dir_id;
1804
1805 if (!capable(CAP_SYS_ADMIN))
1806 return -EPERM;
1807
1808 if (copy_from_user(&objectid, argp, sizeof(objectid)))
1809 return -EFAULT;
1810
1811 if (!objectid)
1812 objectid = root->root_key.objectid;
1813
1814 location.objectid = objectid;
1815 location.type = BTRFS_ROOT_ITEM_KEY;
1816 location.offset = (u64)-1;
1817
1818 new_root = btrfs_read_fs_root_no_name(root->fs_info, &location);
1819 if (IS_ERR(new_root))
1820 return PTR_ERR(new_root);
1821
1822 if (btrfs_root_refs(&new_root->root_item) == 0)
1823 return -ENOENT;
1824
1825 path = btrfs_alloc_path();
1826 if (!path)
1827 return -ENOMEM;
1828 path->leave_spinning = 1;
1829
1830 trans = btrfs_start_transaction(root, 1);
1831 if (!trans) {
1832 btrfs_free_path(path);
1833 return -ENOMEM;
1834 }
1835
1836 dir_id = btrfs_super_root_dir(&root->fs_info->super_copy);
1837 di = btrfs_lookup_dir_item(trans, root->fs_info->tree_root, path,
1838 dir_id, "default", 7, 1);
1839 if (!di) {
1840 btrfs_free_path(path);
1841 btrfs_end_transaction(trans, root);
1842 printk(KERN_ERR "Umm, you don't have the default dir item, "
1843 "this isn't going to work\n");
1844 return -ENOENT;
1845 }
1846
1847 btrfs_cpu_key_to_disk(&disk_key, &new_root->root_key);
1848 btrfs_set_dir_item_key(path->nodes[0], di, &disk_key);
1849 btrfs_mark_buffer_dirty(path->nodes[0]);
1850 btrfs_free_path(path);
1851
1852 disk_super = &root->fs_info->super_copy;
1853 features = btrfs_super_incompat_flags(disk_super);
1854 if (!(features & BTRFS_FEATURE_INCOMPAT_DEFAULT_SUBVOL)) {
1855 features |= BTRFS_FEATURE_INCOMPAT_DEFAULT_SUBVOL;
1856 btrfs_set_super_incompat_flags(disk_super, features);
1857 }
1858 btrfs_end_transaction(trans, root);
1859
1860 return 0;
1861}
1862
1863long btrfs_ioctl_space_info(struct btrfs_root *root, void __user *arg)
1864{
1865 struct btrfs_ioctl_space_args space_args;
1866 struct btrfs_ioctl_space_info space;
1867 struct btrfs_ioctl_space_info *dest;
1868 struct btrfs_ioctl_space_info *dest_orig;
1869 struct btrfs_ioctl_space_info *user_dest;
1870 struct btrfs_space_info *info;
1871 int alloc_size;
1872 int ret = 0;
1873 int slot_count = 0;
1874
1875 if (copy_from_user(&space_args,
1876 (struct btrfs_ioctl_space_args __user *)arg,
1877 sizeof(space_args)))
1878 return -EFAULT;
1879
1880 /* first we count slots */
1881 rcu_read_lock();
1882 list_for_each_entry_rcu(info, &root->fs_info->space_info, list)
1883 slot_count++;
1884 rcu_read_unlock();
1885
1886 /* space_slots == 0 means they are asking for a count */
1887 if (space_args.space_slots == 0) {
1888 space_args.total_spaces = slot_count;
1889 goto out;
1890 }
1891 alloc_size = sizeof(*dest) * slot_count;
1892 /* we generally have at most 6 or so space infos, one for each raid
1893 * level. So, a whole page should be more than enough for everyone
1894 */
1895 if (alloc_size > PAGE_CACHE_SIZE)
1896 return -ENOMEM;
1897
1898 space_args.total_spaces = 0;
1899 dest = kmalloc(alloc_size, GFP_NOFS);
1900 if (!dest)
1901 return -ENOMEM;
1902 dest_orig = dest;
1903
1904 /* now we have a buffer to copy into */
1905 rcu_read_lock();
1906 list_for_each_entry_rcu(info, &root->fs_info->space_info, list) {
1907 /* make sure we don't copy more than we allocated
1908 * in our buffer
1909 */
1910 if (slot_count == 0)
1911 break;
1912 slot_count--;
1913
1914 /* make sure userland has enough room in their buffer */
1915 if (space_args.total_spaces >= space_args.space_slots)
1916 break;
1917
1918 space.flags = info->flags;
1919 space.total_bytes = info->total_bytes;
1920 space.used_bytes = info->bytes_used;
1921 memcpy(dest, &space, sizeof(space));
1922 dest++;
1923 space_args.total_spaces++;
1924 }
1925 rcu_read_unlock();
1926
1927 user_dest = (struct btrfs_ioctl_space_info *)
1928 (arg + sizeof(struct btrfs_ioctl_space_args));
1929
1930 if (copy_to_user(user_dest, dest_orig, alloc_size))
1931 ret = -EFAULT;
1932
1933 kfree(dest_orig);
1934out:
1935 if (ret == 0 && copy_to_user(arg, &space_args, sizeof(space_args)))
1936 ret = -EFAULT;
1937
1938 return ret;
1939}
1940
1277/* 1941/*
1278 * there are many ways the trans_start and trans_end ioctls can lead 1942 * there are many ways the trans_start and trans_end ioctls can lead
1279 * to deadlocks. They should only be used by applications that 1943 * to deadlocks. They should only be used by applications that
@@ -1320,8 +1984,12 @@ long btrfs_ioctl(struct file *file, unsigned int
1320 return btrfs_ioctl_snap_create(file, argp, 1); 1984 return btrfs_ioctl_snap_create(file, argp, 1);
1321 case BTRFS_IOC_SNAP_DESTROY: 1985 case BTRFS_IOC_SNAP_DESTROY:
1322 return btrfs_ioctl_snap_destroy(file, argp); 1986 return btrfs_ioctl_snap_destroy(file, argp);
1987 case BTRFS_IOC_DEFAULT_SUBVOL:
1988 return btrfs_ioctl_default_subvol(file, argp);
1323 case BTRFS_IOC_DEFRAG: 1989 case BTRFS_IOC_DEFRAG:
1324 return btrfs_ioctl_defrag(file); 1990 return btrfs_ioctl_defrag(file, NULL);
1991 case BTRFS_IOC_DEFRAG_RANGE:
1992 return btrfs_ioctl_defrag(file, argp);
1325 case BTRFS_IOC_RESIZE: 1993 case BTRFS_IOC_RESIZE:
1326 return btrfs_ioctl_resize(root, argp); 1994 return btrfs_ioctl_resize(root, argp);
1327 case BTRFS_IOC_ADD_DEV: 1995 case BTRFS_IOC_ADD_DEV:
@@ -1338,6 +2006,12 @@ long btrfs_ioctl(struct file *file, unsigned int
1338 return btrfs_ioctl_trans_start(file); 2006 return btrfs_ioctl_trans_start(file);
1339 case BTRFS_IOC_TRANS_END: 2007 case BTRFS_IOC_TRANS_END:
1340 return btrfs_ioctl_trans_end(file); 2008 return btrfs_ioctl_trans_end(file);
2009 case BTRFS_IOC_TREE_SEARCH:
2010 return btrfs_ioctl_tree_search(file, argp);
2011 case BTRFS_IOC_INO_LOOKUP:
2012 return btrfs_ioctl_ino_lookup(file, argp);
2013 case BTRFS_IOC_SPACE_INFO:
2014 return btrfs_ioctl_space_info(root, argp);
1341 case BTRFS_IOC_SYNC: 2015 case BTRFS_IOC_SYNC:
1342 btrfs_sync_fs(file->f_dentry->d_sb, 1); 2016 btrfs_sync_fs(file->f_dentry->d_sb, 1);
1343 return 0; 2017 return 0;
diff --git a/fs/btrfs/ioctl.h b/fs/btrfs/ioctl.h
index bc49914475eb..424694aa517f 100644
--- a/fs/btrfs/ioctl.h
+++ b/fs/btrfs/ioctl.h
@@ -30,12 +30,114 @@ struct btrfs_ioctl_vol_args {
30 char name[BTRFS_PATH_NAME_MAX + 1]; 30 char name[BTRFS_PATH_NAME_MAX + 1];
31}; 31};
32 32
33#define BTRFS_INO_LOOKUP_PATH_MAX 4080
34struct btrfs_ioctl_ino_lookup_args {
35 __u64 treeid;
36 __u64 objectid;
37 char name[BTRFS_INO_LOOKUP_PATH_MAX];
38};
39
40struct btrfs_ioctl_search_key {
41 /* which root are we searching. 0 is the tree of tree roots */
42 __u64 tree_id;
43
44 /* keys returned will be >= min and <= max */
45 __u64 min_objectid;
46 __u64 max_objectid;
47
48 /* keys returned will be >= min and <= max */
49 __u64 min_offset;
50 __u64 max_offset;
51
52 /* max and min transids to search for */
53 __u64 min_transid;
54 __u64 max_transid;
55
56 /* keys returned will be >= min and <= max */
57 __u32 min_type;
58 __u32 max_type;
59
60 /*
61 * how many items did userland ask for, and how many are we
62 * returning
63 */
64 __u32 nr_items;
65
66 /* align to 64 bits */
67 __u32 unused;
68
69 /* some extra for later */
70 __u64 unused1;
71 __u64 unused2;
72 __u64 unused3;
73 __u64 unused4;
74};
75
76struct btrfs_ioctl_search_header {
77 __u64 transid;
78 __u64 objectid;
79 __u64 offset;
80 __u32 type;
81 __u32 len;
82};
83
84#define BTRFS_SEARCH_ARGS_BUFSIZE (4096 - sizeof(struct btrfs_ioctl_search_key))
85/*
86 * the buf is an array of search headers where
87 * each header is followed by the actual item
88 * the type field is expanded to 32 bits for alignment
89 */
90struct btrfs_ioctl_search_args {
91 struct btrfs_ioctl_search_key key;
92 char buf[BTRFS_SEARCH_ARGS_BUFSIZE];
93};
94
33struct btrfs_ioctl_clone_range_args { 95struct btrfs_ioctl_clone_range_args {
34 __s64 src_fd; 96 __s64 src_fd;
35 __u64 src_offset, src_length; 97 __u64 src_offset, src_length;
36 __u64 dest_offset; 98 __u64 dest_offset;
37}; 99};
38 100
101/* flags for the defrag range ioctl */
102#define BTRFS_DEFRAG_RANGE_COMPRESS 1
103#define BTRFS_DEFRAG_RANGE_START_IO 2
104
105struct btrfs_ioctl_defrag_range_args {
106 /* start of the defrag operation */
107 __u64 start;
108
109 /* number of bytes to defrag, use (u64)-1 to say all */
110 __u64 len;
111
112 /*
113 * flags for the operation, which can include turning
114 * on compression for this one defrag
115 */
116 __u64 flags;
117
118 /*
119 * any extent bigger than this will be considered
120 * already defragged. Use 0 to take the kernel default
121 * Use 1 to say every single extent must be rewritten
122 */
123 __u32 extent_thresh;
124
125 /* spare for later */
126 __u32 unused[5];
127};
128
129struct btrfs_ioctl_space_info {
130 __u64 flags;
131 __u64 total_bytes;
132 __u64 used_bytes;
133};
134
135struct btrfs_ioctl_space_args {
136 __u64 space_slots;
137 __u64 total_spaces;
138 struct btrfs_ioctl_space_info spaces[0];
139};
140
39#define BTRFS_IOC_SNAP_CREATE _IOW(BTRFS_IOCTL_MAGIC, 1, \ 141#define BTRFS_IOC_SNAP_CREATE _IOW(BTRFS_IOCTL_MAGIC, 1, \
40 struct btrfs_ioctl_vol_args) 142 struct btrfs_ioctl_vol_args)
41#define BTRFS_IOC_DEFRAG _IOW(BTRFS_IOCTL_MAGIC, 2, \ 143#define BTRFS_IOC_DEFRAG _IOW(BTRFS_IOCTL_MAGIC, 2, \
@@ -67,4 +169,13 @@ struct btrfs_ioctl_clone_range_args {
67 struct btrfs_ioctl_vol_args) 169 struct btrfs_ioctl_vol_args)
68#define BTRFS_IOC_SNAP_DESTROY _IOW(BTRFS_IOCTL_MAGIC, 15, \ 170#define BTRFS_IOC_SNAP_DESTROY _IOW(BTRFS_IOCTL_MAGIC, 15, \
69 struct btrfs_ioctl_vol_args) 171 struct btrfs_ioctl_vol_args)
172#define BTRFS_IOC_DEFRAG_RANGE _IOW(BTRFS_IOCTL_MAGIC, 16, \
173 struct btrfs_ioctl_defrag_range_args)
174#define BTRFS_IOC_TREE_SEARCH _IOWR(BTRFS_IOCTL_MAGIC, 17, \
175 struct btrfs_ioctl_search_args)
176#define BTRFS_IOC_INO_LOOKUP _IOWR(BTRFS_IOCTL_MAGIC, 18, \
177 struct btrfs_ioctl_ino_lookup_args)
178#define BTRFS_IOC_DEFAULT_SUBVOL _IOW(BTRFS_IOCTL_MAGIC, 19, u64)
179#define BTRFS_IOC_SPACE_INFO _IOWR(BTRFS_IOCTL_MAGIC, 20, \
180 struct btrfs_ioctl_space_args)
70#endif 181#endif
diff --git a/fs/btrfs/ordered-data.c b/fs/btrfs/ordered-data.c
index 5c2a9e78a949..a8ffecd0b491 100644
--- a/fs/btrfs/ordered-data.c
+++ b/fs/btrfs/ordered-data.c
@@ -174,7 +174,6 @@ int btrfs_add_ordered_extent(struct inode *inode, u64 file_offset,
174 if (!entry) 174 if (!entry)
175 return -ENOMEM; 175 return -ENOMEM;
176 176
177 mutex_lock(&tree->mutex);
178 entry->file_offset = file_offset; 177 entry->file_offset = file_offset;
179 entry->start = start; 178 entry->start = start;
180 entry->len = len; 179 entry->len = len;
@@ -190,16 +189,17 @@ int btrfs_add_ordered_extent(struct inode *inode, u64 file_offset,
190 INIT_LIST_HEAD(&entry->list); 189 INIT_LIST_HEAD(&entry->list);
191 INIT_LIST_HEAD(&entry->root_extent_list); 190 INIT_LIST_HEAD(&entry->root_extent_list);
192 191
192 spin_lock(&tree->lock);
193 node = tree_insert(&tree->tree, file_offset, 193 node = tree_insert(&tree->tree, file_offset,
194 &entry->rb_node); 194 &entry->rb_node);
195 BUG_ON(node); 195 BUG_ON(node);
196 spin_unlock(&tree->lock);
196 197
197 spin_lock(&BTRFS_I(inode)->root->fs_info->ordered_extent_lock); 198 spin_lock(&BTRFS_I(inode)->root->fs_info->ordered_extent_lock);
198 list_add_tail(&entry->root_extent_list, 199 list_add_tail(&entry->root_extent_list,
199 &BTRFS_I(inode)->root->fs_info->ordered_extents); 200 &BTRFS_I(inode)->root->fs_info->ordered_extents);
200 spin_unlock(&BTRFS_I(inode)->root->fs_info->ordered_extent_lock); 201 spin_unlock(&BTRFS_I(inode)->root->fs_info->ordered_extent_lock);
201 202
202 mutex_unlock(&tree->mutex);
203 BUG_ON(node); 203 BUG_ON(node);
204 return 0; 204 return 0;
205} 205}
@@ -216,9 +216,9 @@ int btrfs_add_ordered_sum(struct inode *inode,
216 struct btrfs_ordered_inode_tree *tree; 216 struct btrfs_ordered_inode_tree *tree;
217 217
218 tree = &BTRFS_I(inode)->ordered_tree; 218 tree = &BTRFS_I(inode)->ordered_tree;
219 mutex_lock(&tree->mutex); 219 spin_lock(&tree->lock);
220 list_add_tail(&sum->list, &entry->list); 220 list_add_tail(&sum->list, &entry->list);
221 mutex_unlock(&tree->mutex); 221 spin_unlock(&tree->lock);
222 return 0; 222 return 0;
223} 223}
224 224
@@ -232,15 +232,16 @@ int btrfs_add_ordered_sum(struct inode *inode,
232 * to make sure this function only returns 1 once for a given ordered extent. 232 * to make sure this function only returns 1 once for a given ordered extent.
233 */ 233 */
234int btrfs_dec_test_ordered_pending(struct inode *inode, 234int btrfs_dec_test_ordered_pending(struct inode *inode,
235 struct btrfs_ordered_extent **cached,
235 u64 file_offset, u64 io_size) 236 u64 file_offset, u64 io_size)
236{ 237{
237 struct btrfs_ordered_inode_tree *tree; 238 struct btrfs_ordered_inode_tree *tree;
238 struct rb_node *node; 239 struct rb_node *node;
239 struct btrfs_ordered_extent *entry; 240 struct btrfs_ordered_extent *entry = NULL;
240 int ret; 241 int ret;
241 242
242 tree = &BTRFS_I(inode)->ordered_tree; 243 tree = &BTRFS_I(inode)->ordered_tree;
243 mutex_lock(&tree->mutex); 244 spin_lock(&tree->lock);
244 node = tree_search(tree, file_offset); 245 node = tree_search(tree, file_offset);
245 if (!node) { 246 if (!node) {
246 ret = 1; 247 ret = 1;
@@ -264,7 +265,11 @@ int btrfs_dec_test_ordered_pending(struct inode *inode,
264 else 265 else
265 ret = 1; 266 ret = 1;
266out: 267out:
267 mutex_unlock(&tree->mutex); 268 if (!ret && cached && entry) {
269 *cached = entry;
270 atomic_inc(&entry->refs);
271 }
272 spin_unlock(&tree->lock);
268 return ret == 0; 273 return ret == 0;
269} 274}
270 275
@@ -291,7 +296,7 @@ int btrfs_put_ordered_extent(struct btrfs_ordered_extent *entry)
291 296
292/* 297/*
293 * remove an ordered extent from the tree. No references are dropped 298 * remove an ordered extent from the tree. No references are dropped
294 * and you must wake_up entry->wait. You must hold the tree mutex 299 * and you must wake_up entry->wait. You must hold the tree lock
295 * while you call this function. 300 * while you call this function.
296 */ 301 */
297static int __btrfs_remove_ordered_extent(struct inode *inode, 302static int __btrfs_remove_ordered_extent(struct inode *inode,
@@ -340,9 +345,9 @@ int btrfs_remove_ordered_extent(struct inode *inode,
340 int ret; 345 int ret;
341 346
342 tree = &BTRFS_I(inode)->ordered_tree; 347 tree = &BTRFS_I(inode)->ordered_tree;
343 mutex_lock(&tree->mutex); 348 spin_lock(&tree->lock);
344 ret = __btrfs_remove_ordered_extent(inode, entry); 349 ret = __btrfs_remove_ordered_extent(inode, entry);
345 mutex_unlock(&tree->mutex); 350 spin_unlock(&tree->lock);
346 wake_up(&entry->wait); 351 wake_up(&entry->wait);
347 352
348 return ret; 353 return ret;
@@ -567,7 +572,7 @@ struct btrfs_ordered_extent *btrfs_lookup_ordered_extent(struct inode *inode,
567 struct btrfs_ordered_extent *entry = NULL; 572 struct btrfs_ordered_extent *entry = NULL;
568 573
569 tree = &BTRFS_I(inode)->ordered_tree; 574 tree = &BTRFS_I(inode)->ordered_tree;
570 mutex_lock(&tree->mutex); 575 spin_lock(&tree->lock);
571 node = tree_search(tree, file_offset); 576 node = tree_search(tree, file_offset);
572 if (!node) 577 if (!node)
573 goto out; 578 goto out;
@@ -578,7 +583,7 @@ struct btrfs_ordered_extent *btrfs_lookup_ordered_extent(struct inode *inode,
578 if (entry) 583 if (entry)
579 atomic_inc(&entry->refs); 584 atomic_inc(&entry->refs);
580out: 585out:
581 mutex_unlock(&tree->mutex); 586 spin_unlock(&tree->lock);
582 return entry; 587 return entry;
583} 588}
584 589
@@ -594,7 +599,7 @@ btrfs_lookup_first_ordered_extent(struct inode *inode, u64 file_offset)
594 struct btrfs_ordered_extent *entry = NULL; 599 struct btrfs_ordered_extent *entry = NULL;
595 600
596 tree = &BTRFS_I(inode)->ordered_tree; 601 tree = &BTRFS_I(inode)->ordered_tree;
597 mutex_lock(&tree->mutex); 602 spin_lock(&tree->lock);
598 node = tree_search(tree, file_offset); 603 node = tree_search(tree, file_offset);
599 if (!node) 604 if (!node)
600 goto out; 605 goto out;
@@ -602,7 +607,7 @@ btrfs_lookup_first_ordered_extent(struct inode *inode, u64 file_offset)
602 entry = rb_entry(node, struct btrfs_ordered_extent, rb_node); 607 entry = rb_entry(node, struct btrfs_ordered_extent, rb_node);
603 atomic_inc(&entry->refs); 608 atomic_inc(&entry->refs);
604out: 609out:
605 mutex_unlock(&tree->mutex); 610 spin_unlock(&tree->lock);
606 return entry; 611 return entry;
607} 612}
608 613
@@ -629,7 +634,7 @@ int btrfs_ordered_update_i_size(struct inode *inode, u64 offset,
629 else 634 else
630 offset = ALIGN(offset, BTRFS_I(inode)->root->sectorsize); 635 offset = ALIGN(offset, BTRFS_I(inode)->root->sectorsize);
631 636
632 mutex_lock(&tree->mutex); 637 spin_lock(&tree->lock);
633 disk_i_size = BTRFS_I(inode)->disk_i_size; 638 disk_i_size = BTRFS_I(inode)->disk_i_size;
634 639
635 /* truncate file */ 640 /* truncate file */
@@ -735,7 +740,7 @@ out:
735 */ 740 */
736 if (ordered) 741 if (ordered)
737 __btrfs_remove_ordered_extent(inode, ordered); 742 __btrfs_remove_ordered_extent(inode, ordered);
738 mutex_unlock(&tree->mutex); 743 spin_unlock(&tree->lock);
739 if (ordered) 744 if (ordered)
740 wake_up(&ordered->wait); 745 wake_up(&ordered->wait);
741 return ret; 746 return ret;
@@ -762,7 +767,7 @@ int btrfs_find_ordered_sum(struct inode *inode, u64 offset, u64 disk_bytenr,
762 if (!ordered) 767 if (!ordered)
763 return 1; 768 return 1;
764 769
765 mutex_lock(&tree->mutex); 770 spin_lock(&tree->lock);
766 list_for_each_entry_reverse(ordered_sum, &ordered->list, list) { 771 list_for_each_entry_reverse(ordered_sum, &ordered->list, list) {
767 if (disk_bytenr >= ordered_sum->bytenr) { 772 if (disk_bytenr >= ordered_sum->bytenr) {
768 num_sectors = ordered_sum->len / sectorsize; 773 num_sectors = ordered_sum->len / sectorsize;
@@ -777,7 +782,7 @@ int btrfs_find_ordered_sum(struct inode *inode, u64 offset, u64 disk_bytenr,
777 } 782 }
778 } 783 }
779out: 784out:
780 mutex_unlock(&tree->mutex); 785 spin_unlock(&tree->lock);
781 btrfs_put_ordered_extent(ordered); 786 btrfs_put_ordered_extent(ordered);
782 return ret; 787 return ret;
783} 788}
diff --git a/fs/btrfs/ordered-data.h b/fs/btrfs/ordered-data.h
index 9116c6d0c5a9..c82f76a9f040 100644
--- a/fs/btrfs/ordered-data.h
+++ b/fs/btrfs/ordered-data.h
@@ -21,7 +21,7 @@
21 21
22/* one of these per inode */ 22/* one of these per inode */
23struct btrfs_ordered_inode_tree { 23struct btrfs_ordered_inode_tree {
24 struct mutex mutex; 24 spinlock_t lock;
25 struct rb_root tree; 25 struct rb_root tree;
26 struct rb_node *last; 26 struct rb_node *last;
27}; 27};
@@ -128,7 +128,7 @@ static inline int btrfs_ordered_sum_size(struct btrfs_root *root,
128static inline void 128static inline void
129btrfs_ordered_inode_tree_init(struct btrfs_ordered_inode_tree *t) 129btrfs_ordered_inode_tree_init(struct btrfs_ordered_inode_tree *t)
130{ 130{
131 mutex_init(&t->mutex); 131 spin_lock_init(&t->lock);
132 t->tree = RB_ROOT; 132 t->tree = RB_ROOT;
133 t->last = NULL; 133 t->last = NULL;
134} 134}
@@ -137,7 +137,8 @@ int btrfs_put_ordered_extent(struct btrfs_ordered_extent *entry);
137int btrfs_remove_ordered_extent(struct inode *inode, 137int btrfs_remove_ordered_extent(struct inode *inode,
138 struct btrfs_ordered_extent *entry); 138 struct btrfs_ordered_extent *entry);
139int btrfs_dec_test_ordered_pending(struct inode *inode, 139int btrfs_dec_test_ordered_pending(struct inode *inode,
140 u64 file_offset, u64 io_size); 140 struct btrfs_ordered_extent **cached,
141 u64 file_offset, u64 io_size);
141int btrfs_add_ordered_extent(struct inode *inode, u64 file_offset, 142int btrfs_add_ordered_extent(struct inode *inode, u64 file_offset,
142 u64 start, u64 len, u64 disk_len, int tyep); 143 u64 start, u64 len, u64 disk_len, int tyep);
143int btrfs_add_ordered_sum(struct inode *inode, 144int btrfs_add_ordered_sum(struct inode *inode,
diff --git a/fs/btrfs/relocation.c b/fs/btrfs/relocation.c
index 0109e5606bad..0b23942cbc0d 100644
--- a/fs/btrfs/relocation.c
+++ b/fs/btrfs/relocation.c
@@ -2659,7 +2659,7 @@ static int relocate_file_extent_cluster(struct inode *inode,
2659 EXTENT_BOUNDARY, GFP_NOFS); 2659 EXTENT_BOUNDARY, GFP_NOFS);
2660 nr++; 2660 nr++;
2661 } 2661 }
2662 btrfs_set_extent_delalloc(inode, page_start, page_end); 2662 btrfs_set_extent_delalloc(inode, page_start, page_end, NULL);
2663 2663
2664 set_page_dirty(page); 2664 set_page_dirty(page);
2665 dirty_page++; 2665 dirty_page++;
@@ -3487,7 +3487,7 @@ static struct inode *create_reloc_inode(struct btrfs_fs_info *fs_info,
3487 key.objectid = objectid; 3487 key.objectid = objectid;
3488 key.type = BTRFS_INODE_ITEM_KEY; 3488 key.type = BTRFS_INODE_ITEM_KEY;
3489 key.offset = 0; 3489 key.offset = 0;
3490 inode = btrfs_iget(root->fs_info->sb, &key, root); 3490 inode = btrfs_iget(root->fs_info->sb, &key, root, NULL);
3491 BUG_ON(IS_ERR(inode) || is_bad_inode(inode)); 3491 BUG_ON(IS_ERR(inode) || is_bad_inode(inode));
3492 BTRFS_I(inode)->index_cnt = group->key.objectid; 3492 BTRFS_I(inode)->index_cnt = group->key.objectid;
3493 3493
diff --git a/fs/btrfs/super.c b/fs/btrfs/super.c
index f8b4521de907..9ac612e6ca60 100644
--- a/fs/btrfs/super.c
+++ b/fs/btrfs/super.c
@@ -63,10 +63,10 @@ static void btrfs_put_super(struct super_block *sb)
63} 63}
64 64
65enum { 65enum {
66 Opt_degraded, Opt_subvol, Opt_device, Opt_nodatasum, Opt_nodatacow, 66 Opt_degraded, Opt_subvol, Opt_subvolid, Opt_device, Opt_nodatasum,
67 Opt_max_extent, Opt_max_inline, Opt_alloc_start, Opt_nobarrier, 67 Opt_nodatacow, Opt_max_extent, Opt_max_inline, Opt_alloc_start,
68 Opt_ssd, Opt_nossd, Opt_ssd_spread, Opt_thread_pool, Opt_noacl, 68 Opt_nobarrier, Opt_ssd, Opt_nossd, Opt_ssd_spread, Opt_thread_pool,
69 Opt_compress, Opt_compress_force, Opt_notreelog, Opt_ratio, 69 Opt_noacl, Opt_compress, Opt_compress_force, Opt_notreelog, Opt_ratio,
70 Opt_flushoncommit, 70 Opt_flushoncommit,
71 Opt_discard, Opt_err, 71 Opt_discard, Opt_err,
72}; 72};
@@ -74,6 +74,7 @@ enum {
74static match_table_t tokens = { 74static match_table_t tokens = {
75 {Opt_degraded, "degraded"}, 75 {Opt_degraded, "degraded"},
76 {Opt_subvol, "subvol=%s"}, 76 {Opt_subvol, "subvol=%s"},
77 {Opt_subvolid, "subvolid=%d"},
77 {Opt_device, "device=%s"}, 78 {Opt_device, "device=%s"},
78 {Opt_nodatasum, "nodatasum"}, 79 {Opt_nodatasum, "nodatasum"},
79 {Opt_nodatacow, "nodatacow"}, 80 {Opt_nodatacow, "nodatacow"},
@@ -95,31 +96,6 @@ static match_table_t tokens = {
95 {Opt_err, NULL}, 96 {Opt_err, NULL},
96}; 97};
97 98
98u64 btrfs_parse_size(char *str)
99{
100 u64 res;
101 int mult = 1;
102 char *end;
103 char last;
104
105 res = simple_strtoul(str, &end, 10);
106
107 last = end[0];
108 if (isalpha(last)) {
109 last = tolower(last);
110 switch (last) {
111 case 'g':
112 mult *= 1024;
113 case 'm':
114 mult *= 1024;
115 case 'k':
116 mult *= 1024;
117 }
118 res = res * mult;
119 }
120 return res;
121}
122
123/* 99/*
124 * Regular mount options parser. Everything that is needed only when 100 * Regular mount options parser. Everything that is needed only when
125 * reading in a new superblock is parsed here. 101 * reading in a new superblock is parsed here.
@@ -157,6 +133,7 @@ int btrfs_parse_options(struct btrfs_root *root, char *options)
157 btrfs_set_opt(info->mount_opt, DEGRADED); 133 btrfs_set_opt(info->mount_opt, DEGRADED);
158 break; 134 break;
159 case Opt_subvol: 135 case Opt_subvol:
136 case Opt_subvolid:
160 case Opt_device: 137 case Opt_device:
161 /* 138 /*
162 * These are parsed by btrfs_parse_early_options 139 * These are parsed by btrfs_parse_early_options
@@ -214,7 +191,7 @@ int btrfs_parse_options(struct btrfs_root *root, char *options)
214 case Opt_max_extent: 191 case Opt_max_extent:
215 num = match_strdup(&args[0]); 192 num = match_strdup(&args[0]);
216 if (num) { 193 if (num) {
217 info->max_extent = btrfs_parse_size(num); 194 info->max_extent = memparse(num, NULL);
218 kfree(num); 195 kfree(num);
219 196
220 info->max_extent = max_t(u64, 197 info->max_extent = max_t(u64,
@@ -226,7 +203,7 @@ int btrfs_parse_options(struct btrfs_root *root, char *options)
226 case Opt_max_inline: 203 case Opt_max_inline:
227 num = match_strdup(&args[0]); 204 num = match_strdup(&args[0]);
228 if (num) { 205 if (num) {
229 info->max_inline = btrfs_parse_size(num); 206 info->max_inline = memparse(num, NULL);
230 kfree(num); 207 kfree(num);
231 208
232 if (info->max_inline) { 209 if (info->max_inline) {
@@ -241,7 +218,7 @@ int btrfs_parse_options(struct btrfs_root *root, char *options)
241 case Opt_alloc_start: 218 case Opt_alloc_start:
242 num = match_strdup(&args[0]); 219 num = match_strdup(&args[0]);
243 if (num) { 220 if (num) {
244 info->alloc_start = btrfs_parse_size(num); 221 info->alloc_start = memparse(num, NULL);
245 kfree(num); 222 kfree(num);
246 printk(KERN_INFO 223 printk(KERN_INFO
247 "btrfs: allocations start at %llu\n", 224 "btrfs: allocations start at %llu\n",
@@ -292,12 +269,13 @@ out:
292 * only when we need to allocate a new super block. 269 * only when we need to allocate a new super block.
293 */ 270 */
294static int btrfs_parse_early_options(const char *options, fmode_t flags, 271static int btrfs_parse_early_options(const char *options, fmode_t flags,
295 void *holder, char **subvol_name, 272 void *holder, char **subvol_name, u64 *subvol_objectid,
296 struct btrfs_fs_devices **fs_devices) 273 struct btrfs_fs_devices **fs_devices)
297{ 274{
298 substring_t args[MAX_OPT_ARGS]; 275 substring_t args[MAX_OPT_ARGS];
299 char *opts, *p; 276 char *opts, *p;
300 int error = 0; 277 int error = 0;
278 int intarg;
301 279
302 if (!options) 280 if (!options)
303 goto out; 281 goto out;
@@ -320,6 +298,18 @@ static int btrfs_parse_early_options(const char *options, fmode_t flags,
320 case Opt_subvol: 298 case Opt_subvol:
321 *subvol_name = match_strdup(&args[0]); 299 *subvol_name = match_strdup(&args[0]);
322 break; 300 break;
301 case Opt_subvolid:
302 intarg = 0;
303 error = match_int(&args[0], &intarg);
304 if (!error) {
305 /* we want the original fs_tree */
306 if (!intarg)
307 *subvol_objectid =
308 BTRFS_FS_TREE_OBJECTID;
309 else
310 *subvol_objectid = intarg;
311 }
312 break;
323 case Opt_device: 313 case Opt_device:
324 error = btrfs_scan_one_device(match_strdup(&args[0]), 314 error = btrfs_scan_one_device(match_strdup(&args[0]),
325 flags, holder, fs_devices); 315 flags, holder, fs_devices);
@@ -347,6 +337,110 @@ static int btrfs_parse_early_options(const char *options, fmode_t flags,
347 return error; 337 return error;
348} 338}
349 339
340static struct dentry *get_default_root(struct super_block *sb,
341 u64 subvol_objectid)
342{
343 struct btrfs_root *root = sb->s_fs_info;
344 struct btrfs_root *new_root;
345 struct btrfs_dir_item *di;
346 struct btrfs_path *path;
347 struct btrfs_key location;
348 struct inode *inode;
349 struct dentry *dentry;
350 u64 dir_id;
351 int new = 0;
352
353 /*
354 * We have a specific subvol we want to mount, just setup location and
355 * go look up the root.
356 */
357 if (subvol_objectid) {
358 location.objectid = subvol_objectid;
359 location.type = BTRFS_ROOT_ITEM_KEY;
360 location.offset = (u64)-1;
361 goto find_root;
362 }
363
364 path = btrfs_alloc_path();
365 if (!path)
366 return ERR_PTR(-ENOMEM);
367 path->leave_spinning = 1;
368
369 /*
370 * Find the "default" dir item which points to the root item that we
371 * will mount by default if we haven't been given a specific subvolume
372 * to mount.
373 */
374 dir_id = btrfs_super_root_dir(&root->fs_info->super_copy);
375 di = btrfs_lookup_dir_item(NULL, root, path, dir_id, "default", 7, 0);
376 if (!di) {
377 /*
378 * Ok the default dir item isn't there. This is weird since
379 * it's always been there, but don't freak out, just try and
380 * mount to root most subvolume.
381 */
382 btrfs_free_path(path);
383 dir_id = BTRFS_FIRST_FREE_OBJECTID;
384 new_root = root->fs_info->fs_root;
385 goto setup_root;
386 }
387
388 btrfs_dir_item_key_to_cpu(path->nodes[0], di, &location);
389 btrfs_free_path(path);
390
391find_root:
392 new_root = btrfs_read_fs_root_no_name(root->fs_info, &location);
393 if (IS_ERR(new_root))
394 return ERR_PTR(PTR_ERR(new_root));
395
396 if (btrfs_root_refs(&new_root->root_item) == 0)
397 return ERR_PTR(-ENOENT);
398
399 dir_id = btrfs_root_dirid(&new_root->root_item);
400setup_root:
401 location.objectid = dir_id;
402 location.type = BTRFS_INODE_ITEM_KEY;
403 location.offset = 0;
404
405 inode = btrfs_iget(sb, &location, new_root, &new);
406 if (!inode)
407 return ERR_PTR(-ENOMEM);
408
409 /*
410 * If we're just mounting the root most subvol put the inode and return
411 * a reference to the dentry. We will have already gotten a reference
412 * to the inode in btrfs_fill_super so we're good to go.
413 */
414 if (!new && sb->s_root->d_inode == inode) {
415 iput(inode);
416 return dget(sb->s_root);
417 }
418
419 if (new) {
420 const struct qstr name = { .name = "/", .len = 1 };
421
422 /*
423 * New inode, we need to make the dentry a sibling of s_root so
424 * everything gets cleaned up properly on unmount.
425 */
426 dentry = d_alloc(sb->s_root, &name);
427 if (!dentry) {
428 iput(inode);
429 return ERR_PTR(-ENOMEM);
430 }
431 d_splice_alias(inode, dentry);
432 } else {
433 /*
434 * We found the inode in cache, just find a dentry for it and
435 * put the reference to the inode we just got.
436 */
437 dentry = d_find_alias(inode);
438 iput(inode);
439 }
440
441 return dentry;
442}
443
350static int btrfs_fill_super(struct super_block *sb, 444static int btrfs_fill_super(struct super_block *sb,
351 struct btrfs_fs_devices *fs_devices, 445 struct btrfs_fs_devices *fs_devices,
352 void *data, int silent) 446 void *data, int silent)
@@ -380,7 +474,7 @@ static int btrfs_fill_super(struct super_block *sb,
380 key.objectid = BTRFS_FIRST_FREE_OBJECTID; 474 key.objectid = BTRFS_FIRST_FREE_OBJECTID;
381 key.type = BTRFS_INODE_ITEM_KEY; 475 key.type = BTRFS_INODE_ITEM_KEY;
382 key.offset = 0; 476 key.offset = 0;
383 inode = btrfs_iget(sb, &key, tree_root->fs_info->fs_root); 477 inode = btrfs_iget(sb, &key, tree_root->fs_info->fs_root, NULL);
384 if (IS_ERR(inode)) { 478 if (IS_ERR(inode)) {
385 err = PTR_ERR(inode); 479 err = PTR_ERR(inode);
386 goto fail_close; 480 goto fail_close;
@@ -392,12 +486,6 @@ static int btrfs_fill_super(struct super_block *sb,
392 err = -ENOMEM; 486 err = -ENOMEM;
393 goto fail_close; 487 goto fail_close;
394 } 488 }
395#if 0
396 /* this does the super kobj at the same time */
397 err = btrfs_sysfs_add_super(tree_root->fs_info);
398 if (err)
399 goto fail_close;
400#endif
401 489
402 sb->s_root = root_dentry; 490 sb->s_root = root_dentry;
403 491
@@ -489,19 +577,22 @@ static int btrfs_test_super(struct super_block *s, void *data)
489static int btrfs_get_sb(struct file_system_type *fs_type, int flags, 577static int btrfs_get_sb(struct file_system_type *fs_type, int flags,
490 const char *dev_name, void *data, struct vfsmount *mnt) 578 const char *dev_name, void *data, struct vfsmount *mnt)
491{ 579{
492 char *subvol_name = NULL;
493 struct block_device *bdev = NULL; 580 struct block_device *bdev = NULL;
494 struct super_block *s; 581 struct super_block *s;
495 struct dentry *root; 582 struct dentry *root;
496 struct btrfs_fs_devices *fs_devices = NULL; 583 struct btrfs_fs_devices *fs_devices = NULL;
497 fmode_t mode = FMODE_READ; 584 fmode_t mode = FMODE_READ;
585 char *subvol_name = NULL;
586 u64 subvol_objectid = 0;
498 int error = 0; 587 int error = 0;
588 int found = 0;
499 589
500 if (!(flags & MS_RDONLY)) 590 if (!(flags & MS_RDONLY))
501 mode |= FMODE_WRITE; 591 mode |= FMODE_WRITE;
502 592
503 error = btrfs_parse_early_options(data, mode, fs_type, 593 error = btrfs_parse_early_options(data, mode, fs_type,
504 &subvol_name, &fs_devices); 594 &subvol_name, &subvol_objectid,
595 &fs_devices);
505 if (error) 596 if (error)
506 return error; 597 return error;
507 598
@@ -530,6 +621,7 @@ static int btrfs_get_sb(struct file_system_type *fs_type, int flags,
530 goto error_close_devices; 621 goto error_close_devices;
531 } 622 }
532 623
624 found = 1;
533 btrfs_close_devices(fs_devices); 625 btrfs_close_devices(fs_devices);
534 } else { 626 } else {
535 char b[BDEVNAME_SIZE]; 627 char b[BDEVNAME_SIZE];
@@ -547,25 +639,35 @@ static int btrfs_get_sb(struct file_system_type *fs_type, int flags,
547 s->s_flags |= MS_ACTIVE; 639 s->s_flags |= MS_ACTIVE;
548 } 640 }
549 641
550 if (!strcmp(subvol_name, ".")) 642 root = get_default_root(s, subvol_objectid);
551 root = dget(s->s_root); 643 if (IS_ERR(root)) {
552 else { 644 error = PTR_ERR(root);
553 mutex_lock(&s->s_root->d_inode->i_mutex); 645 deactivate_locked_super(s);
554 root = lookup_one_len(subvol_name, s->s_root, 646 goto error;
647 }
648 /* if they gave us a subvolume name bind mount into that */
649 if (strcmp(subvol_name, ".")) {
650 struct dentry *new_root;
651 mutex_lock(&root->d_inode->i_mutex);
652 new_root = lookup_one_len(subvol_name, root,
555 strlen(subvol_name)); 653 strlen(subvol_name));
556 mutex_unlock(&s->s_root->d_inode->i_mutex); 654 mutex_unlock(&root->d_inode->i_mutex);
557 655
558 if (IS_ERR(root)) { 656 if (IS_ERR(new_root)) {
559 deactivate_locked_super(s); 657 deactivate_locked_super(s);
560 error = PTR_ERR(root); 658 error = PTR_ERR(new_root);
561 goto error_free_subvol_name; 659 dput(root);
660 goto error_close_devices;
562 } 661 }
563 if (!root->d_inode) { 662 if (!new_root->d_inode) {
564 dput(root); 663 dput(root);
664 dput(new_root);
565 deactivate_locked_super(s); 665 deactivate_locked_super(s);
566 error = -ENXIO; 666 error = -ENXIO;
567 goto error_free_subvol_name; 667 goto error_close_devices;
568 } 668 }
669 dput(root);
670 root = new_root;
569 } 671 }
570 672
571 mnt->mnt_sb = s; 673 mnt->mnt_sb = s;
@@ -580,6 +682,7 @@ error_close_devices:
580 btrfs_close_devices(fs_devices); 682 btrfs_close_devices(fs_devices);
581error_free_subvol_name: 683error_free_subvol_name:
582 kfree(subvol_name); 684 kfree(subvol_name);
685error:
583 return error; 686 return error;
584} 687}
585 688
@@ -624,14 +727,37 @@ static int btrfs_statfs(struct dentry *dentry, struct kstatfs *buf)
624{ 727{
625 struct btrfs_root *root = btrfs_sb(dentry->d_sb); 728 struct btrfs_root *root = btrfs_sb(dentry->d_sb);
626 struct btrfs_super_block *disk_super = &root->fs_info->super_copy; 729 struct btrfs_super_block *disk_super = &root->fs_info->super_copy;
730 struct list_head *head = &root->fs_info->space_info;
731 struct btrfs_space_info *found;
732 u64 total_used = 0;
733 u64 data_used = 0;
627 int bits = dentry->d_sb->s_blocksize_bits; 734 int bits = dentry->d_sb->s_blocksize_bits;
628 __be32 *fsid = (__be32 *)root->fs_info->fsid; 735 __be32 *fsid = (__be32 *)root->fs_info->fsid;
629 736
737 rcu_read_lock();
738 list_for_each_entry_rcu(found, head, list) {
739 if (found->flags & (BTRFS_BLOCK_GROUP_DUP|
740 BTRFS_BLOCK_GROUP_RAID10|
741 BTRFS_BLOCK_GROUP_RAID1)) {
742 total_used += found->bytes_used;
743 if (found->flags & BTRFS_BLOCK_GROUP_DATA)
744 data_used += found->bytes_used;
745 else
746 data_used += found->total_bytes;
747 }
748
749 total_used += found->bytes_used;
750 if (found->flags & BTRFS_BLOCK_GROUP_DATA)
751 data_used += found->bytes_used;
752 else
753 data_used += found->total_bytes;
754 }
755 rcu_read_unlock();
756
630 buf->f_namelen = BTRFS_NAME_LEN; 757 buf->f_namelen = BTRFS_NAME_LEN;
631 buf->f_blocks = btrfs_super_total_bytes(disk_super) >> bits; 758 buf->f_blocks = btrfs_super_total_bytes(disk_super) >> bits;
632 buf->f_bfree = buf->f_blocks - 759 buf->f_bfree = buf->f_blocks - (total_used >> bits);
633 (btrfs_super_bytes_used(disk_super) >> bits); 760 buf->f_bavail = buf->f_blocks - (data_used >> bits);
634 buf->f_bavail = buf->f_bfree;
635 buf->f_bsize = dentry->d_sb->s_blocksize; 761 buf->f_bsize = dentry->d_sb->s_blocksize;
636 buf->f_type = BTRFS_SUPER_MAGIC; 762 buf->f_type = BTRFS_SUPER_MAGIC;
637 763
diff --git a/fs/btrfs/transaction.c b/fs/btrfs/transaction.c
index 2a36e236a492..2d654c1c794d 100644
--- a/fs/btrfs/transaction.c
+++ b/fs/btrfs/transaction.c
@@ -997,13 +997,10 @@ int btrfs_commit_transaction(struct btrfs_trans_handle *trans,
997 997
998 mutex_unlock(&root->fs_info->trans_mutex); 998 mutex_unlock(&root->fs_info->trans_mutex);
999 999
1000 if (flush_on_commit) { 1000 if (flush_on_commit || snap_pending) {
1001 btrfs_start_delalloc_inodes(root, 1); 1001 btrfs_start_delalloc_inodes(root, 1);
1002 ret = btrfs_wait_ordered_extents(root, 0, 1); 1002 ret = btrfs_wait_ordered_extents(root, 0, 1);
1003 BUG_ON(ret); 1003 BUG_ON(ret);
1004 } else if (snap_pending) {
1005 ret = btrfs_wait_ordered_extents(root, 0, 1);
1006 BUG_ON(ret);
1007 } 1004 }
1008 1005
1009 /* 1006 /*
diff --git a/fs/btrfs/tree-log.c b/fs/btrfs/tree-log.c
index 4a9434b622ec..1255fcc8ade5 100644
--- a/fs/btrfs/tree-log.c
+++ b/fs/btrfs/tree-log.c
@@ -445,7 +445,7 @@ static noinline struct inode *read_one_inode(struct btrfs_root *root,
445 key.objectid = objectid; 445 key.objectid = objectid;
446 key.type = BTRFS_INODE_ITEM_KEY; 446 key.type = BTRFS_INODE_ITEM_KEY;
447 key.offset = 0; 447 key.offset = 0;
448 inode = btrfs_iget(root->fs_info->sb, &key, root); 448 inode = btrfs_iget(root->fs_info->sb, &key, root, NULL);
449 if (IS_ERR(inode)) { 449 if (IS_ERR(inode)) {
450 inode = NULL; 450 inode = NULL;
451 } else if (is_bad_inode(inode)) { 451 } else if (is_bad_inode(inode)) {
diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c
index 41ecbb2347f2..9df8e3f1ccab 100644
--- a/fs/btrfs/volumes.c
+++ b/fs/btrfs/volumes.c
@@ -256,13 +256,13 @@ loop_lock:
256 wake_up(&fs_info->async_submit_wait); 256 wake_up(&fs_info->async_submit_wait);
257 257
258 BUG_ON(atomic_read(&cur->bi_cnt) == 0); 258 BUG_ON(atomic_read(&cur->bi_cnt) == 0);
259 submit_bio(cur->bi_rw, cur);
260 num_run++;
261 batch_run++;
262 259
263 if (bio_rw_flagged(cur, BIO_RW_SYNCIO)) 260 if (bio_rw_flagged(cur, BIO_RW_SYNCIO))
264 num_sync_run++; 261 num_sync_run++;
265 262
263 submit_bio(cur->bi_rw, cur);
264 num_run++;
265 batch_run++;
266 if (need_resched()) { 266 if (need_resched()) {
267 if (num_sync_run) { 267 if (num_sync_run) {
268 blk_run_backing_dev(bdi, NULL); 268 blk_run_backing_dev(bdi, NULL);
@@ -325,16 +325,6 @@ loop_lock:
325 num_sync_run = 0; 325 num_sync_run = 0;
326 blk_run_backing_dev(bdi, NULL); 326 blk_run_backing_dev(bdi, NULL);
327 } 327 }
328
329 cond_resched();
330 if (again)
331 goto loop;
332
333 spin_lock(&device->io_lock);
334 if (device->pending_bios.head || device->pending_sync_bios.head)
335 goto loop_lock;
336 spin_unlock(&device->io_lock);
337
338 /* 328 /*
339 * IO has already been through a long path to get here. Checksumming, 329 * IO has already been through a long path to get here. Checksumming,
340 * async helper threads, perhaps compression. We've done a pretty 330 * async helper threads, perhaps compression. We've done a pretty
@@ -346,6 +336,16 @@ loop_lock:
346 * cared about found its way down here. 336 * cared about found its way down here.
347 */ 337 */
348 blk_run_backing_dev(bdi, NULL); 338 blk_run_backing_dev(bdi, NULL);
339
340 cond_resched();
341 if (again)
342 goto loop;
343
344 spin_lock(&device->io_lock);
345 if (device->pending_bios.head || device->pending_sync_bios.head)
346 goto loop_lock;
347 spin_unlock(&device->io_lock);
348
349done: 349done:
350 return 0; 350 return 0;
351} 351}
@@ -365,6 +365,7 @@ static noinline int device_list_add(const char *path,
365 struct btrfs_device *device; 365 struct btrfs_device *device;
366 struct btrfs_fs_devices *fs_devices; 366 struct btrfs_fs_devices *fs_devices;
367 u64 found_transid = btrfs_super_generation(disk_super); 367 u64 found_transid = btrfs_super_generation(disk_super);
368 char *name;
368 369
369 fs_devices = find_fsid(disk_super->fsid); 370 fs_devices = find_fsid(disk_super->fsid);
370 if (!fs_devices) { 371 if (!fs_devices) {
@@ -411,6 +412,12 @@ static noinline int device_list_add(const char *path,
411 412
412 device->fs_devices = fs_devices; 413 device->fs_devices = fs_devices;
413 fs_devices->num_devices++; 414 fs_devices->num_devices++;
415 } else if (strcmp(device->name, path)) {
416 name = kstrdup(path, GFP_NOFS);
417 if (!name)
418 return -ENOMEM;
419 kfree(device->name);
420 device->name = name;
414 } 421 }
415 422
416 if (found_transid > fs_devices->latest_trans) { 423 if (found_transid > fs_devices->latest_trans) {
@@ -592,7 +599,7 @@ static int __btrfs_open_devices(struct btrfs_fs_devices *fs_devices,
592 goto error_close; 599 goto error_close;
593 600
594 disk_super = (struct btrfs_super_block *)bh->b_data; 601 disk_super = (struct btrfs_super_block *)bh->b_data;
595 devid = le64_to_cpu(disk_super->dev_item.devid); 602 devid = btrfs_stack_device_id(&disk_super->dev_item);
596 if (devid != device->devid) 603 if (devid != device->devid)
597 goto error_brelse; 604 goto error_brelse;
598 605
@@ -694,7 +701,7 @@ int btrfs_scan_one_device(const char *path, fmode_t flags, void *holder,
694 goto error_close; 701 goto error_close;
695 } 702 }
696 disk_super = (struct btrfs_super_block *)bh->b_data; 703 disk_super = (struct btrfs_super_block *)bh->b_data;
697 devid = le64_to_cpu(disk_super->dev_item.devid); 704 devid = btrfs_stack_device_id(&disk_super->dev_item);
698 transid = btrfs_super_generation(disk_super); 705 transid = btrfs_super_generation(disk_super);
699 if (disk_super->label[0]) 706 if (disk_super->label[0])
700 printk(KERN_INFO "device label %s ", disk_super->label); 707 printk(KERN_INFO "device label %s ", disk_super->label);
@@ -1187,7 +1194,7 @@ int btrfs_rm_device(struct btrfs_root *root, char *device_path)
1187 goto error_close; 1194 goto error_close;
1188 } 1195 }
1189 disk_super = (struct btrfs_super_block *)bh->b_data; 1196 disk_super = (struct btrfs_super_block *)bh->b_data;
1190 devid = le64_to_cpu(disk_super->dev_item.devid); 1197 devid = btrfs_stack_device_id(&disk_super->dev_item);
1191 dev_uuid = disk_super->dev_item.uuid; 1198 dev_uuid = disk_super->dev_item.uuid;
1192 device = btrfs_find_device(root, devid, dev_uuid, 1199 device = btrfs_find_device(root, devid, dev_uuid,
1193 disk_super->fsid); 1200 disk_super->fsid);
diff --git a/fs/ceph/Kconfig b/fs/ceph/Kconfig
new file mode 100644
index 000000000000..04b8280582a9
--- /dev/null
+++ b/fs/ceph/Kconfig
@@ -0,0 +1,27 @@
1config CEPH_FS
2 tristate "Ceph distributed file system (EXPERIMENTAL)"
3 depends on INET && EXPERIMENTAL
4 select LIBCRC32C
5 select CONFIG_CRYPTO_AES
6 help
7 Choose Y or M here to include support for mounting the
8 experimental Ceph distributed file system. Ceph is an extremely
9 scalable file system designed to provide high performance,
10 reliable access to petabytes of storage.
11
12 More information at http://ceph.newdream.net/.
13
14 If unsure, say N.
15
16config CEPH_FS_PRETTYDEBUG
17 bool "Include file:line in ceph debug output"
18 depends on CEPH_FS
19 default n
20 help
21 If you say Y here, debug output will include a filename and
22 line to aid debugging. This icnreases kernel size and slows
23 execution slightly when debug call sites are enabled (e.g.,
24 via CONFIG_DYNAMIC_DEBUG).
25
26 If unsure, say N.
27
diff --git a/fs/ceph/Makefile b/fs/ceph/Makefile
new file mode 100644
index 000000000000..6a660e610be8
--- /dev/null
+++ b/fs/ceph/Makefile
@@ -0,0 +1,39 @@
1#
2# Makefile for CEPH filesystem.
3#
4
5ifneq ($(KERNELRELEASE),)
6
7obj-$(CONFIG_CEPH_FS) += ceph.o
8
9ceph-objs := super.o inode.o dir.o file.o addr.o ioctl.o \
10 export.o caps.o snap.o xattr.o \
11 messenger.o msgpool.o buffer.o pagelist.o \
12 mds_client.o mdsmap.o \
13 mon_client.o \
14 osd_client.o osdmap.o crush/crush.o crush/mapper.o crush/hash.o \
15 debugfs.o \
16 auth.o auth_none.o \
17 crypto.o armor.o \
18 auth_x.o \
19 ceph_fs.o ceph_strings.o ceph_hash.o ceph_frag.o
20
21else
22#Otherwise we were called directly from the command
23# line; invoke the kernel build system.
24
25KERNELDIR ?= /lib/modules/$(shell uname -r)/build
26PWD := $(shell pwd)
27
28default: all
29
30all:
31 $(MAKE) -C $(KERNELDIR) M=$(PWD) CONFIG_CEPH_FS=m modules
32
33modules_install:
34 $(MAKE) -C $(KERNELDIR) M=$(PWD) CONFIG_CEPH_FS=m modules_install
35
36clean:
37 $(MAKE) -C $(KERNELDIR) M=$(PWD) clean
38
39endif
diff --git a/fs/ceph/README b/fs/ceph/README
new file mode 100644
index 000000000000..18352fab37c0
--- /dev/null
+++ b/fs/ceph/README
@@ -0,0 +1,20 @@
1#
2# The following files are shared by (and manually synchronized
3# between) the Ceph userland and kernel client.
4#
5# userland kernel
6src/include/ceph_fs.h fs/ceph/ceph_fs.h
7src/include/ceph_fs.cc fs/ceph/ceph_fs.c
8src/include/msgr.h fs/ceph/msgr.h
9src/include/rados.h fs/ceph/rados.h
10src/include/ceph_strings.cc fs/ceph/ceph_strings.c
11src/include/ceph_frag.h fs/ceph/ceph_frag.h
12src/include/ceph_frag.cc fs/ceph/ceph_frag.c
13src/include/ceph_hash.h fs/ceph/ceph_hash.h
14src/include/ceph_hash.cc fs/ceph/ceph_hash.c
15src/crush/crush.c fs/ceph/crush/crush.c
16src/crush/crush.h fs/ceph/crush/crush.h
17src/crush/mapper.c fs/ceph/crush/mapper.c
18src/crush/mapper.h fs/ceph/crush/mapper.h
19src/crush/hash.h fs/ceph/crush/hash.h
20src/crush/hash.c fs/ceph/crush/hash.c
diff --git a/fs/ceph/addr.c b/fs/ceph/addr.c
new file mode 100644
index 000000000000..23bb0ceabe31
--- /dev/null
+++ b/fs/ceph/addr.c
@@ -0,0 +1,1188 @@
1#include "ceph_debug.h"
2
3#include <linux/backing-dev.h>
4#include <linux/fs.h>
5#include <linux/mm.h>
6#include <linux/pagemap.h>
7#include <linux/writeback.h> /* generic_writepages */
8#include <linux/pagevec.h>
9#include <linux/task_io_accounting_ops.h>
10
11#include "super.h"
12#include "osd_client.h"
13
14/*
15 * Ceph address space ops.
16 *
17 * There are a few funny things going on here.
18 *
19 * The page->private field is used to reference a struct
20 * ceph_snap_context for _every_ dirty page. This indicates which
21 * snapshot the page was logically dirtied in, and thus which snap
22 * context needs to be associated with the osd write during writeback.
23 *
24 * Similarly, struct ceph_inode_info maintains a set of counters to
25 * count dirty pages on the inode. In the absense of snapshots,
26 * i_wrbuffer_ref == i_wrbuffer_ref_head == the dirty page count.
27 *
28 * When a snapshot is taken (that is, when the client receives
29 * notification that a snapshot was taken), each inode with caps and
30 * with dirty pages (dirty pages implies there is a cap) gets a new
31 * ceph_cap_snap in the i_cap_snaps list (which is sorted in ascending
32 * order, new snaps go to the tail). The i_wrbuffer_ref_head count is
33 * moved to capsnap->dirty. (Unless a sync write is currently in
34 * progress. In that case, the capsnap is said to be "pending", new
35 * writes cannot start, and the capsnap isn't "finalized" until the
36 * write completes (or fails) and a final size/mtime for the inode for
37 * that snap can be settled upon.) i_wrbuffer_ref_head is reset to 0.
38 *
39 * On writeback, we must submit writes to the osd IN SNAP ORDER. So,
40 * we look for the first capsnap in i_cap_snaps and write out pages in
41 * that snap context _only_. Then we move on to the next capsnap,
42 * eventually reaching the "live" or "head" context (i.e., pages that
43 * are not yet snapped) and are writing the most recently dirtied
44 * pages.
45 *
46 * Invalidate and so forth must take care to ensure the dirty page
47 * accounting is preserved.
48 */
49
50#define CONGESTION_ON_THRESH(congestion_kb) (congestion_kb >> (PAGE_SHIFT-10))
51#define CONGESTION_OFF_THRESH(congestion_kb) \
52 (CONGESTION_ON_THRESH(congestion_kb) - \
53 (CONGESTION_ON_THRESH(congestion_kb) >> 2))
54
55
56
57/*
58 * Dirty a page. Optimistically adjust accounting, on the assumption
59 * that we won't race with invalidate. If we do, readjust.
60 */
61static int ceph_set_page_dirty(struct page *page)
62{
63 struct address_space *mapping = page->mapping;
64 struct inode *inode;
65 struct ceph_inode_info *ci;
66 int undo = 0;
67 struct ceph_snap_context *snapc;
68
69 if (unlikely(!mapping))
70 return !TestSetPageDirty(page);
71
72 if (TestSetPageDirty(page)) {
73 dout("%p set_page_dirty %p idx %lu -- already dirty\n",
74 mapping->host, page, page->index);
75 return 0;
76 }
77
78 inode = mapping->host;
79 ci = ceph_inode(inode);
80
81 /*
82 * Note that we're grabbing a snapc ref here without holding
83 * any locks!
84 */
85 snapc = ceph_get_snap_context(ci->i_snap_realm->cached_context);
86
87 /* dirty the head */
88 spin_lock(&inode->i_lock);
89 if (ci->i_wrbuffer_ref_head == 0)
90 ci->i_head_snapc = ceph_get_snap_context(snapc);
91 ++ci->i_wrbuffer_ref_head;
92 if (ci->i_wrbuffer_ref == 0)
93 igrab(inode);
94 ++ci->i_wrbuffer_ref;
95 dout("%p set_page_dirty %p idx %lu head %d/%d -> %d/%d "
96 "snapc %p seq %lld (%d snaps)\n",
97 mapping->host, page, page->index,
98 ci->i_wrbuffer_ref-1, ci->i_wrbuffer_ref_head-1,
99 ci->i_wrbuffer_ref, ci->i_wrbuffer_ref_head,
100 snapc, snapc->seq, snapc->num_snaps);
101 spin_unlock(&inode->i_lock);
102
103 /* now adjust page */
104 spin_lock_irq(&mapping->tree_lock);
105 if (page->mapping) { /* Race with truncate? */
106 WARN_ON_ONCE(!PageUptodate(page));
107
108 if (mapping_cap_account_dirty(mapping)) {
109 __inc_zone_page_state(page, NR_FILE_DIRTY);
110 __inc_bdi_stat(mapping->backing_dev_info,
111 BDI_RECLAIMABLE);
112 task_io_account_write(PAGE_CACHE_SIZE);
113 }
114 radix_tree_tag_set(&mapping->page_tree,
115 page_index(page), PAGECACHE_TAG_DIRTY);
116
117 /*
118 * Reference snap context in page->private. Also set
119 * PagePrivate so that we get invalidatepage callback.
120 */
121 page->private = (unsigned long)snapc;
122 SetPagePrivate(page);
123 } else {
124 dout("ANON set_page_dirty %p (raced truncate?)\n", page);
125 undo = 1;
126 }
127
128 spin_unlock_irq(&mapping->tree_lock);
129
130 if (undo)
131 /* whoops, we failed to dirty the page */
132 ceph_put_wrbuffer_cap_refs(ci, 1, snapc);
133
134 __mark_inode_dirty(mapping->host, I_DIRTY_PAGES);
135
136 BUG_ON(!PageDirty(page));
137 return 1;
138}
139
140/*
141 * If we are truncating the full page (i.e. offset == 0), adjust the
142 * dirty page counters appropriately. Only called if there is private
143 * data on the page.
144 */
145static void ceph_invalidatepage(struct page *page, unsigned long offset)
146{
147 struct inode *inode;
148 struct ceph_inode_info *ci;
149 struct ceph_snap_context *snapc = (void *)page->private;
150
151 BUG_ON(!PageLocked(page));
152 BUG_ON(!page->private);
153 BUG_ON(!PagePrivate(page));
154 BUG_ON(!page->mapping);
155
156 inode = page->mapping->host;
157
158 /*
159 * We can get non-dirty pages here due to races between
160 * set_page_dirty and truncate_complete_page; just spit out a
161 * warning, in case we end up with accounting problems later.
162 */
163 if (!PageDirty(page))
164 pr_err("%p invalidatepage %p page not dirty\n", inode, page);
165
166 if (offset == 0)
167 ClearPageChecked(page);
168
169 ci = ceph_inode(inode);
170 if (offset == 0) {
171 dout("%p invalidatepage %p idx %lu full dirty page %lu\n",
172 inode, page, page->index, offset);
173 ceph_put_wrbuffer_cap_refs(ci, 1, snapc);
174 ceph_put_snap_context(snapc);
175 page->private = 0;
176 ClearPagePrivate(page);
177 } else {
178 dout("%p invalidatepage %p idx %lu partial dirty page\n",
179 inode, page, page->index);
180 }
181}
182
183/* just a sanity check */
184static int ceph_releasepage(struct page *page, gfp_t g)
185{
186 struct inode *inode = page->mapping ? page->mapping->host : NULL;
187 dout("%p releasepage %p idx %lu\n", inode, page, page->index);
188 WARN_ON(PageDirty(page));
189 WARN_ON(page->private);
190 WARN_ON(PagePrivate(page));
191 return 0;
192}
193
194/*
195 * read a single page, without unlocking it.
196 */
197static int readpage_nounlock(struct file *filp, struct page *page)
198{
199 struct inode *inode = filp->f_dentry->d_inode;
200 struct ceph_inode_info *ci = ceph_inode(inode);
201 struct ceph_osd_client *osdc = &ceph_inode_to_client(inode)->osdc;
202 int err = 0;
203 u64 len = PAGE_CACHE_SIZE;
204
205 dout("readpage inode %p file %p page %p index %lu\n",
206 inode, filp, page, page->index);
207 err = ceph_osdc_readpages(osdc, ceph_vino(inode), &ci->i_layout,
208 page->index << PAGE_CACHE_SHIFT, &len,
209 ci->i_truncate_seq, ci->i_truncate_size,
210 &page, 1);
211 if (err == -ENOENT)
212 err = 0;
213 if (err < 0) {
214 SetPageError(page);
215 goto out;
216 } else if (err < PAGE_CACHE_SIZE) {
217 /* zero fill remainder of page */
218 zero_user_segment(page, err, PAGE_CACHE_SIZE);
219 }
220 SetPageUptodate(page);
221
222out:
223 return err < 0 ? err : 0;
224}
225
226static int ceph_readpage(struct file *filp, struct page *page)
227{
228 int r = readpage_nounlock(filp, page);
229 unlock_page(page);
230 return r;
231}
232
233/*
234 * Build a vector of contiguous pages from the provided page list.
235 */
236static struct page **page_vector_from_list(struct list_head *page_list,
237 unsigned *nr_pages)
238{
239 struct page **pages;
240 struct page *page;
241 int next_index, contig_pages = 0;
242
243 /* build page vector */
244 pages = kmalloc(sizeof(*pages) * *nr_pages, GFP_NOFS);
245 if (!pages)
246 return ERR_PTR(-ENOMEM);
247
248 BUG_ON(list_empty(page_list));
249 next_index = list_entry(page_list->prev, struct page, lru)->index;
250 list_for_each_entry_reverse(page, page_list, lru) {
251 if (page->index == next_index) {
252 dout("readpages page %d %p\n", contig_pages, page);
253 pages[contig_pages] = page;
254 contig_pages++;
255 next_index++;
256 } else {
257 break;
258 }
259 }
260 *nr_pages = contig_pages;
261 return pages;
262}
263
264/*
265 * Read multiple pages. Leave pages we don't read + unlock in page_list;
266 * the caller (VM) cleans them up.
267 */
268static int ceph_readpages(struct file *file, struct address_space *mapping,
269 struct list_head *page_list, unsigned nr_pages)
270{
271 struct inode *inode = file->f_dentry->d_inode;
272 struct ceph_inode_info *ci = ceph_inode(inode);
273 struct ceph_osd_client *osdc = &ceph_inode_to_client(inode)->osdc;
274 int rc = 0;
275 struct page **pages;
276 struct pagevec pvec;
277 loff_t offset;
278 u64 len;
279
280 dout("readpages %p file %p nr_pages %d\n",
281 inode, file, nr_pages);
282
283 pages = page_vector_from_list(page_list, &nr_pages);
284 if (IS_ERR(pages))
285 return PTR_ERR(pages);
286
287 /* guess read extent */
288 offset = pages[0]->index << PAGE_CACHE_SHIFT;
289 len = nr_pages << PAGE_CACHE_SHIFT;
290 rc = ceph_osdc_readpages(osdc, ceph_vino(inode), &ci->i_layout,
291 offset, &len,
292 ci->i_truncate_seq, ci->i_truncate_size,
293 pages, nr_pages);
294 if (rc == -ENOENT)
295 rc = 0;
296 if (rc < 0)
297 goto out;
298
299 /* set uptodate and add to lru in pagevec-sized chunks */
300 pagevec_init(&pvec, 0);
301 for (; !list_empty(page_list) && len > 0;
302 rc -= PAGE_CACHE_SIZE, len -= PAGE_CACHE_SIZE) {
303 struct page *page =
304 list_entry(page_list->prev, struct page, lru);
305
306 list_del(&page->lru);
307
308 if (rc < (int)PAGE_CACHE_SIZE) {
309 /* zero (remainder of) page */
310 int s = rc < 0 ? 0 : rc;
311 zero_user_segment(page, s, PAGE_CACHE_SIZE);
312 }
313
314 if (add_to_page_cache(page, mapping, page->index, GFP_NOFS)) {
315 page_cache_release(page);
316 dout("readpages %p add_to_page_cache failed %p\n",
317 inode, page);
318 continue;
319 }
320 dout("readpages %p adding %p idx %lu\n", inode, page,
321 page->index);
322 flush_dcache_page(page);
323 SetPageUptodate(page);
324 unlock_page(page);
325 if (pagevec_add(&pvec, page) == 0)
326 pagevec_lru_add_file(&pvec); /* add to lru */
327 }
328 pagevec_lru_add_file(&pvec);
329 rc = 0;
330
331out:
332 kfree(pages);
333 return rc;
334}
335
336/*
337 * Get ref for the oldest snapc for an inode with dirty data... that is, the
338 * only snap context we are allowed to write back.
339 *
340 * Caller holds i_lock.
341 */
342static struct ceph_snap_context *__get_oldest_context(struct inode *inode,
343 u64 *snap_size)
344{
345 struct ceph_inode_info *ci = ceph_inode(inode);
346 struct ceph_snap_context *snapc = NULL;
347 struct ceph_cap_snap *capsnap = NULL;
348
349 list_for_each_entry(capsnap, &ci->i_cap_snaps, ci_item) {
350 dout(" cap_snap %p snapc %p has %d dirty pages\n", capsnap,
351 capsnap->context, capsnap->dirty_pages);
352 if (capsnap->dirty_pages) {
353 snapc = ceph_get_snap_context(capsnap->context);
354 if (snap_size)
355 *snap_size = capsnap->size;
356 break;
357 }
358 }
359 if (!snapc && ci->i_snap_realm) {
360 snapc = ceph_get_snap_context(ci->i_snap_realm->cached_context);
361 dout(" head snapc %p has %d dirty pages\n",
362 snapc, ci->i_wrbuffer_ref_head);
363 }
364 return snapc;
365}
366
367static struct ceph_snap_context *get_oldest_context(struct inode *inode,
368 u64 *snap_size)
369{
370 struct ceph_snap_context *snapc = NULL;
371
372 spin_lock(&inode->i_lock);
373 snapc = __get_oldest_context(inode, snap_size);
374 spin_unlock(&inode->i_lock);
375 return snapc;
376}
377
378/*
379 * Write a single page, but leave the page locked.
380 *
381 * If we get a write error, set the page error bit, but still adjust the
382 * dirty page accounting (i.e., page is no longer dirty).
383 */
384static int writepage_nounlock(struct page *page, struct writeback_control *wbc)
385{
386 struct inode *inode;
387 struct ceph_inode_info *ci;
388 struct ceph_client *client;
389 struct ceph_osd_client *osdc;
390 loff_t page_off = page->index << PAGE_CACHE_SHIFT;
391 int len = PAGE_CACHE_SIZE;
392 loff_t i_size;
393 int err = 0;
394 struct ceph_snap_context *snapc;
395 u64 snap_size = 0;
396 long writeback_stat;
397
398 dout("writepage %p idx %lu\n", page, page->index);
399
400 if (!page->mapping || !page->mapping->host) {
401 dout("writepage %p - no mapping\n", page);
402 return -EFAULT;
403 }
404 inode = page->mapping->host;
405 ci = ceph_inode(inode);
406 client = ceph_inode_to_client(inode);
407 osdc = &client->osdc;
408
409 /* verify this is a writeable snap context */
410 snapc = (void *)page->private;
411 if (snapc == NULL) {
412 dout("writepage %p page %p not dirty?\n", inode, page);
413 goto out;
414 }
415 if (snapc != get_oldest_context(inode, &snap_size)) {
416 dout("writepage %p page %p snapc %p not writeable - noop\n",
417 inode, page, (void *)page->private);
418 /* we should only noop if called by kswapd */
419 WARN_ON((current->flags & PF_MEMALLOC) == 0);
420 goto out;
421 }
422
423 /* is this a partial page at end of file? */
424 if (snap_size)
425 i_size = snap_size;
426 else
427 i_size = i_size_read(inode);
428 if (i_size < page_off + len)
429 len = i_size - page_off;
430
431 dout("writepage %p page %p index %lu on %llu~%u\n",
432 inode, page, page->index, page_off, len);
433
434 writeback_stat = atomic_long_inc_return(&client->writeback_count);
435 if (writeback_stat >
436 CONGESTION_ON_THRESH(client->mount_args->congestion_kb))
437 set_bdi_congested(&client->backing_dev_info, BLK_RW_ASYNC);
438
439 set_page_writeback(page);
440 err = ceph_osdc_writepages(osdc, ceph_vino(inode),
441 &ci->i_layout, snapc,
442 page_off, len,
443 ci->i_truncate_seq, ci->i_truncate_size,
444 &inode->i_mtime,
445 &page, 1, 0, 0, true);
446 if (err < 0) {
447 dout("writepage setting page/mapping error %d %p\n", err, page);
448 SetPageError(page);
449 mapping_set_error(&inode->i_data, err);
450 if (wbc)
451 wbc->pages_skipped++;
452 } else {
453 dout("writepage cleaned page %p\n", page);
454 err = 0; /* vfs expects us to return 0 */
455 }
456 page->private = 0;
457 ClearPagePrivate(page);
458 end_page_writeback(page);
459 ceph_put_wrbuffer_cap_refs(ci, 1, snapc);
460 ceph_put_snap_context(snapc);
461out:
462 return err;
463}
464
465static int ceph_writepage(struct page *page, struct writeback_control *wbc)
466{
467 int err;
468 struct inode *inode = page->mapping->host;
469 BUG_ON(!inode);
470 igrab(inode);
471 err = writepage_nounlock(page, wbc);
472 unlock_page(page);
473 iput(inode);
474 return err;
475}
476
477
478/*
479 * lame release_pages helper. release_pages() isn't exported to
480 * modules.
481 */
482static void ceph_release_pages(struct page **pages, int num)
483{
484 struct pagevec pvec;
485 int i;
486
487 pagevec_init(&pvec, 0);
488 for (i = 0; i < num; i++) {
489 if (pagevec_add(&pvec, pages[i]) == 0)
490 pagevec_release(&pvec);
491 }
492 pagevec_release(&pvec);
493}
494
495
496/*
497 * async writeback completion handler.
498 *
499 * If we get an error, set the mapping error bit, but not the individual
500 * page error bits.
501 */
502static void writepages_finish(struct ceph_osd_request *req,
503 struct ceph_msg *msg)
504{
505 struct inode *inode = req->r_inode;
506 struct ceph_osd_reply_head *replyhead;
507 struct ceph_osd_op *op;
508 struct ceph_inode_info *ci = ceph_inode(inode);
509 unsigned wrote;
510 struct page *page;
511 int i;
512 struct ceph_snap_context *snapc = req->r_snapc;
513 struct address_space *mapping = inode->i_mapping;
514 struct writeback_control *wbc = req->r_wbc;
515 __s32 rc = -EIO;
516 u64 bytes = 0;
517 struct ceph_client *client = ceph_inode_to_client(inode);
518 long writeback_stat;
519 unsigned issued = __ceph_caps_issued(ci, NULL);
520
521 /* parse reply */
522 replyhead = msg->front.iov_base;
523 WARN_ON(le32_to_cpu(replyhead->num_ops) == 0);
524 op = (void *)(replyhead + 1);
525 rc = le32_to_cpu(replyhead->result);
526 bytes = le64_to_cpu(op->extent.length);
527
528 if (rc >= 0) {
529 /*
530 * Assume we wrote the pages we originally sent. The
531 * osd might reply with fewer pages if our writeback
532 * raced with a truncation and was adjusted at the osd,
533 * so don't believe the reply.
534 */
535 wrote = req->r_num_pages;
536 } else {
537 wrote = 0;
538 mapping_set_error(mapping, rc);
539 }
540 dout("writepages_finish %p rc %d bytes %llu wrote %d (pages)\n",
541 inode, rc, bytes, wrote);
542
543 /* clean all pages */
544 for (i = 0; i < req->r_num_pages; i++) {
545 page = req->r_pages[i];
546 BUG_ON(!page);
547 WARN_ON(!PageUptodate(page));
548
549 writeback_stat =
550 atomic_long_dec_return(&client->writeback_count);
551 if (writeback_stat <
552 CONGESTION_OFF_THRESH(client->mount_args->congestion_kb))
553 clear_bdi_congested(&client->backing_dev_info,
554 BLK_RW_ASYNC);
555
556 if (i >= wrote) {
557 dout("inode %p skipping page %p\n", inode, page);
558 wbc->pages_skipped++;
559 }
560 page->private = 0;
561 ClearPagePrivate(page);
562 ceph_put_snap_context(snapc);
563 dout("unlocking %d %p\n", i, page);
564 end_page_writeback(page);
565
566 /*
567 * We lost the cache cap, need to truncate the page before
568 * it is unlocked, otherwise we'd truncate it later in the
569 * page truncation thread, possibly losing some data that
570 * raced its way in
571 */
572 if ((issued & CEPH_CAP_FILE_CACHE) == 0)
573 generic_error_remove_page(inode->i_mapping, page);
574
575 unlock_page(page);
576 }
577 dout("%p wrote+cleaned %d pages\n", inode, wrote);
578 ceph_put_wrbuffer_cap_refs(ci, req->r_num_pages, snapc);
579
580 ceph_release_pages(req->r_pages, req->r_num_pages);
581 if (req->r_pages_from_pool)
582 mempool_free(req->r_pages,
583 ceph_client(inode->i_sb)->wb_pagevec_pool);
584 else
585 kfree(req->r_pages);
586 ceph_osdc_put_request(req);
587}
588
589/*
590 * allocate a page vec, either directly, or if necessary, via a the
591 * mempool. we avoid the mempool if we can because req->r_num_pages
592 * may be less than the maximum write size.
593 */
594static void alloc_page_vec(struct ceph_client *client,
595 struct ceph_osd_request *req)
596{
597 req->r_pages = kmalloc(sizeof(struct page *) * req->r_num_pages,
598 GFP_NOFS);
599 if (!req->r_pages) {
600 req->r_pages = mempool_alloc(client->wb_pagevec_pool, GFP_NOFS);
601 req->r_pages_from_pool = 1;
602 WARN_ON(!req->r_pages);
603 }
604}
605
606/*
607 * initiate async writeback
608 */
609static int ceph_writepages_start(struct address_space *mapping,
610 struct writeback_control *wbc)
611{
612 struct inode *inode = mapping->host;
613 struct backing_dev_info *bdi = mapping->backing_dev_info;
614 struct ceph_inode_info *ci = ceph_inode(inode);
615 struct ceph_client *client;
616 pgoff_t index, start, end;
617 int range_whole = 0;
618 int should_loop = 1;
619 pgoff_t max_pages = 0, max_pages_ever = 0;
620 struct ceph_snap_context *snapc = NULL, *last_snapc = NULL;
621 struct pagevec pvec;
622 int done = 0;
623 int rc = 0;
624 unsigned wsize = 1 << inode->i_blkbits;
625 struct ceph_osd_request *req = NULL;
626 int do_sync;
627 u64 snap_size = 0;
628
629 /*
630 * Include a 'sync' in the OSD request if this is a data
631 * integrity write (e.g., O_SYNC write or fsync()), or if our
632 * cap is being revoked.
633 */
634 do_sync = wbc->sync_mode == WB_SYNC_ALL;
635 if (ceph_caps_revoking(ci, CEPH_CAP_FILE_BUFFER))
636 do_sync = 1;
637 dout("writepages_start %p dosync=%d (mode=%s)\n",
638 inode, do_sync,
639 wbc->sync_mode == WB_SYNC_NONE ? "NONE" :
640 (wbc->sync_mode == WB_SYNC_ALL ? "ALL" : "HOLD"));
641
642 client = ceph_inode_to_client(inode);
643 if (client->mount_state == CEPH_MOUNT_SHUTDOWN) {
644 pr_warning("writepage_start %p on forced umount\n", inode);
645 return -EIO; /* we're in a forced umount, don't write! */
646 }
647 if (client->mount_args->wsize && client->mount_args->wsize < wsize)
648 wsize = client->mount_args->wsize;
649 if (wsize < PAGE_CACHE_SIZE)
650 wsize = PAGE_CACHE_SIZE;
651 max_pages_ever = wsize >> PAGE_CACHE_SHIFT;
652
653 pagevec_init(&pvec, 0);
654
655 /* ?? */
656 if (wbc->nonblocking && bdi_write_congested(bdi)) {
657 dout(" writepages congested\n");
658 wbc->encountered_congestion = 1;
659 goto out_final;
660 }
661
662 /* where to start/end? */
663 if (wbc->range_cyclic) {
664 start = mapping->writeback_index; /* Start from prev offset */
665 end = -1;
666 dout(" cyclic, start at %lu\n", start);
667 } else {
668 start = wbc->range_start >> PAGE_CACHE_SHIFT;
669 end = wbc->range_end >> PAGE_CACHE_SHIFT;
670 if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX)
671 range_whole = 1;
672 should_loop = 0;
673 dout(" not cyclic, %lu to %lu\n", start, end);
674 }
675 index = start;
676
677retry:
678 /* find oldest snap context with dirty data */
679 ceph_put_snap_context(snapc);
680 snapc = get_oldest_context(inode, &snap_size);
681 if (!snapc) {
682 /* hmm, why does writepages get called when there
683 is no dirty data? */
684 dout(" no snap context with dirty data?\n");
685 goto out;
686 }
687 dout(" oldest snapc is %p seq %lld (%d snaps)\n",
688 snapc, snapc->seq, snapc->num_snaps);
689 if (last_snapc && snapc != last_snapc) {
690 /* if we switched to a newer snapc, restart our scan at the
691 * start of the original file range. */
692 dout(" snapc differs from last pass, restarting at %lu\n",
693 index);
694 index = start;
695 }
696 last_snapc = snapc;
697
698 while (!done && index <= end) {
699 unsigned i;
700 int first;
701 pgoff_t next;
702 int pvec_pages, locked_pages;
703 struct page *page;
704 int want;
705 u64 offset, len;
706 struct ceph_osd_request_head *reqhead;
707 struct ceph_osd_op *op;
708 long writeback_stat;
709
710 next = 0;
711 locked_pages = 0;
712 max_pages = max_pages_ever;
713
714get_more_pages:
715 first = -1;
716 want = min(end - index,
717 min((pgoff_t)PAGEVEC_SIZE,
718 max_pages - (pgoff_t)locked_pages) - 1)
719 + 1;
720 pvec_pages = pagevec_lookup_tag(&pvec, mapping, &index,
721 PAGECACHE_TAG_DIRTY,
722 want);
723 dout("pagevec_lookup_tag got %d\n", pvec_pages);
724 if (!pvec_pages && !locked_pages)
725 break;
726 for (i = 0; i < pvec_pages && locked_pages < max_pages; i++) {
727 page = pvec.pages[i];
728 dout("? %p idx %lu\n", page, page->index);
729 if (locked_pages == 0)
730 lock_page(page); /* first page */
731 else if (!trylock_page(page))
732 break;
733
734 /* only dirty pages, or our accounting breaks */
735 if (unlikely(!PageDirty(page)) ||
736 unlikely(page->mapping != mapping)) {
737 dout("!dirty or !mapping %p\n", page);
738 unlock_page(page);
739 break;
740 }
741 if (!wbc->range_cyclic && page->index > end) {
742 dout("end of range %p\n", page);
743 done = 1;
744 unlock_page(page);
745 break;
746 }
747 if (next && (page->index != next)) {
748 dout("not consecutive %p\n", page);
749 unlock_page(page);
750 break;
751 }
752 if (wbc->sync_mode != WB_SYNC_NONE) {
753 dout("waiting on writeback %p\n", page);
754 wait_on_page_writeback(page);
755 }
756 if ((snap_size && page_offset(page) > snap_size) ||
757 (!snap_size &&
758 page_offset(page) > i_size_read(inode))) {
759 dout("%p page eof %llu\n", page, snap_size ?
760 snap_size : i_size_read(inode));
761 done = 1;
762 unlock_page(page);
763 break;
764 }
765 if (PageWriteback(page)) {
766 dout("%p under writeback\n", page);
767 unlock_page(page);
768 break;
769 }
770
771 /* only if matching snap context */
772 if (snapc != (void *)page->private) {
773 dout("page snapc %p != oldest %p\n",
774 (void *)page->private, snapc);
775 unlock_page(page);
776 if (!locked_pages)
777 continue; /* keep looking for snap */
778 break;
779 }
780
781 if (!clear_page_dirty_for_io(page)) {
782 dout("%p !clear_page_dirty_for_io\n", page);
783 unlock_page(page);
784 break;
785 }
786
787 /* ok */
788 if (locked_pages == 0) {
789 /* prepare async write request */
790 offset = page->index << PAGE_CACHE_SHIFT;
791 len = wsize;
792 req = ceph_osdc_new_request(&client->osdc,
793 &ci->i_layout,
794 ceph_vino(inode),
795 offset, &len,
796 CEPH_OSD_OP_WRITE,
797 CEPH_OSD_FLAG_WRITE |
798 CEPH_OSD_FLAG_ONDISK,
799 snapc, do_sync,
800 ci->i_truncate_seq,
801 ci->i_truncate_size,
802 &inode->i_mtime, true, 1);
803 max_pages = req->r_num_pages;
804
805 alloc_page_vec(client, req);
806 req->r_callback = writepages_finish;
807 req->r_inode = inode;
808 req->r_wbc = wbc;
809 }
810
811 /* note position of first page in pvec */
812 if (first < 0)
813 first = i;
814 dout("%p will write page %p idx %lu\n",
815 inode, page, page->index);
816
817 writeback_stat = atomic_long_inc_return(&client->writeback_count);
818 if (writeback_stat > CONGESTION_ON_THRESH(client->mount_args->congestion_kb)) {
819 set_bdi_congested(&client->backing_dev_info, BLK_RW_ASYNC);
820 }
821
822 set_page_writeback(page);
823 req->r_pages[locked_pages] = page;
824 locked_pages++;
825 next = page->index + 1;
826 }
827
828 /* did we get anything? */
829 if (!locked_pages)
830 goto release_pvec_pages;
831 if (i) {
832 int j;
833 BUG_ON(!locked_pages || first < 0);
834
835 if (pvec_pages && i == pvec_pages &&
836 locked_pages < max_pages) {
837 dout("reached end pvec, trying for more\n");
838 pagevec_reinit(&pvec);
839 goto get_more_pages;
840 }
841
842 /* shift unused pages over in the pvec... we
843 * will need to release them below. */
844 for (j = i; j < pvec_pages; j++) {
845 dout(" pvec leftover page %p\n",
846 pvec.pages[j]);
847 pvec.pages[j-i+first] = pvec.pages[j];
848 }
849 pvec.nr -= i-first;
850 }
851
852 /* submit the write */
853 offset = req->r_pages[0]->index << PAGE_CACHE_SHIFT;
854 len = min((snap_size ? snap_size : i_size_read(inode)) - offset,
855 (u64)locked_pages << PAGE_CACHE_SHIFT);
856 dout("writepages got %d pages at %llu~%llu\n",
857 locked_pages, offset, len);
858
859 /* revise final length, page count */
860 req->r_num_pages = locked_pages;
861 reqhead = req->r_request->front.iov_base;
862 op = (void *)(reqhead + 1);
863 op->extent.length = cpu_to_le64(len);
864 op->payload_len = cpu_to_le32(len);
865 req->r_request->hdr.data_len = cpu_to_le32(len);
866
867 ceph_osdc_start_request(&client->osdc, req, true);
868 req = NULL;
869
870 /* continue? */
871 index = next;
872 wbc->nr_to_write -= locked_pages;
873 if (wbc->nr_to_write <= 0)
874 done = 1;
875
876release_pvec_pages:
877 dout("pagevec_release on %d pages (%p)\n", (int)pvec.nr,
878 pvec.nr ? pvec.pages[0] : NULL);
879 pagevec_release(&pvec);
880
881 if (locked_pages && !done)
882 goto retry;
883 }
884
885 if (should_loop && !done) {
886 /* more to do; loop back to beginning of file */
887 dout("writepages looping back to beginning of file\n");
888 should_loop = 0;
889 index = 0;
890 goto retry;
891 }
892
893 if (wbc->range_cyclic || (range_whole && wbc->nr_to_write > 0))
894 mapping->writeback_index = index;
895
896out:
897 if (req)
898 ceph_osdc_put_request(req);
899 if (rc > 0)
900 rc = 0; /* vfs expects us to return 0 */
901 ceph_put_snap_context(snapc);
902 dout("writepages done, rc = %d\n", rc);
903out_final:
904 return rc;
905}
906
907
908
909/*
910 * See if a given @snapc is either writeable, or already written.
911 */
912static int context_is_writeable_or_written(struct inode *inode,
913 struct ceph_snap_context *snapc)
914{
915 struct ceph_snap_context *oldest = get_oldest_context(inode, NULL);
916 return !oldest || snapc->seq <= oldest->seq;
917}
918
919/*
920 * We are only allowed to write into/dirty the page if the page is
921 * clean, or already dirty within the same snap context.
922 */
923static int ceph_update_writeable_page(struct file *file,
924 loff_t pos, unsigned len,
925 struct page *page)
926{
927 struct inode *inode = file->f_dentry->d_inode;
928 struct ceph_inode_info *ci = ceph_inode(inode);
929 struct ceph_mds_client *mdsc = &ceph_inode_to_client(inode)->mdsc;
930 loff_t page_off = pos & PAGE_CACHE_MASK;
931 int pos_in_page = pos & ~PAGE_CACHE_MASK;
932 int end_in_page = pos_in_page + len;
933 loff_t i_size;
934 struct ceph_snap_context *snapc;
935 int r;
936
937retry_locked:
938 /* writepages currently holds page lock, but if we change that later, */
939 wait_on_page_writeback(page);
940
941 /* check snap context */
942 BUG_ON(!ci->i_snap_realm);
943 down_read(&mdsc->snap_rwsem);
944 BUG_ON(!ci->i_snap_realm->cached_context);
945 if (page->private &&
946 (void *)page->private != ci->i_snap_realm->cached_context) {
947 /*
948 * this page is already dirty in another (older) snap
949 * context! is it writeable now?
950 */
951 snapc = get_oldest_context(inode, NULL);
952 up_read(&mdsc->snap_rwsem);
953
954 if (snapc != (void *)page->private) {
955 dout(" page %p snapc %p not current or oldest\n",
956 page, (void *)page->private);
957 /*
958 * queue for writeback, and wait for snapc to
959 * be writeable or written
960 */
961 snapc = ceph_get_snap_context((void *)page->private);
962 unlock_page(page);
963 ceph_queue_writeback(inode);
964 wait_event_interruptible(ci->i_cap_wq,
965 context_is_writeable_or_written(inode, snapc));
966 ceph_put_snap_context(snapc);
967 return -EAGAIN;
968 }
969
970 /* yay, writeable, do it now (without dropping page lock) */
971 dout(" page %p snapc %p not current, but oldest\n",
972 page, snapc);
973 if (!clear_page_dirty_for_io(page))
974 goto retry_locked;
975 r = writepage_nounlock(page, NULL);
976 if (r < 0)
977 goto fail_nosnap;
978 goto retry_locked;
979 }
980
981 if (PageUptodate(page)) {
982 dout(" page %p already uptodate\n", page);
983 return 0;
984 }
985
986 /* full page? */
987 if (pos_in_page == 0 && len == PAGE_CACHE_SIZE)
988 return 0;
989
990 /* past end of file? */
991 i_size = inode->i_size; /* caller holds i_mutex */
992
993 if (i_size + len > inode->i_sb->s_maxbytes) {
994 /* file is too big */
995 r = -EINVAL;
996 goto fail;
997 }
998
999 if (page_off >= i_size ||
1000 (pos_in_page == 0 && (pos+len) >= i_size &&
1001 end_in_page - pos_in_page != PAGE_CACHE_SIZE)) {
1002 dout(" zeroing %p 0 - %d and %d - %d\n",
1003 page, pos_in_page, end_in_page, (int)PAGE_CACHE_SIZE);
1004 zero_user_segments(page,
1005 0, pos_in_page,
1006 end_in_page, PAGE_CACHE_SIZE);
1007 return 0;
1008 }
1009
1010 /* we need to read it. */
1011 up_read(&mdsc->snap_rwsem);
1012 r = readpage_nounlock(file, page);
1013 if (r < 0)
1014 goto fail_nosnap;
1015 goto retry_locked;
1016
1017fail:
1018 up_read(&mdsc->snap_rwsem);
1019fail_nosnap:
1020 unlock_page(page);
1021 return r;
1022}
1023
1024/*
1025 * We are only allowed to write into/dirty the page if the page is
1026 * clean, or already dirty within the same snap context.
1027 */
1028static int ceph_write_begin(struct file *file, struct address_space *mapping,
1029 loff_t pos, unsigned len, unsigned flags,
1030 struct page **pagep, void **fsdata)
1031{
1032 struct inode *inode = file->f_dentry->d_inode;
1033 struct page *page;
1034 pgoff_t index = pos >> PAGE_CACHE_SHIFT;
1035 int r;
1036
1037 do {
1038 /* get a page*/
1039 page = grab_cache_page_write_begin(mapping, index, 0);
1040 if (!page)
1041 return -ENOMEM;
1042 *pagep = page;
1043
1044 dout("write_begin file %p inode %p page %p %d~%d\n", file,
1045 inode, page, (int)pos, (int)len);
1046
1047 r = ceph_update_writeable_page(file, pos, len, page);
1048 } while (r == -EAGAIN);
1049
1050 return r;
1051}
1052
1053/*
1054 * we don't do anything in here that simple_write_end doesn't do
1055 * except adjust dirty page accounting and drop read lock on
1056 * mdsc->snap_rwsem.
1057 */
1058static int ceph_write_end(struct file *file, struct address_space *mapping,
1059 loff_t pos, unsigned len, unsigned copied,
1060 struct page *page, void *fsdata)
1061{
1062 struct inode *inode = file->f_dentry->d_inode;
1063 struct ceph_client *client = ceph_inode_to_client(inode);
1064 struct ceph_mds_client *mdsc = &client->mdsc;
1065 unsigned from = pos & (PAGE_CACHE_SIZE - 1);
1066 int check_cap = 0;
1067
1068 dout("write_end file %p inode %p page %p %d~%d (%d)\n", file,
1069 inode, page, (int)pos, (int)copied, (int)len);
1070
1071 /* zero the stale part of the page if we did a short copy */
1072 if (copied < len)
1073 zero_user_segment(page, from+copied, len);
1074
1075 /* did file size increase? */
1076 /* (no need for i_size_read(); we caller holds i_mutex */
1077 if (pos+copied > inode->i_size)
1078 check_cap = ceph_inode_set_size(inode, pos+copied);
1079
1080 if (!PageUptodate(page))
1081 SetPageUptodate(page);
1082
1083 set_page_dirty(page);
1084
1085 unlock_page(page);
1086 up_read(&mdsc->snap_rwsem);
1087 page_cache_release(page);
1088
1089 if (check_cap)
1090 ceph_check_caps(ceph_inode(inode), CHECK_CAPS_AUTHONLY, NULL);
1091
1092 return copied;
1093}
1094
1095/*
1096 * we set .direct_IO to indicate direct io is supported, but since we
1097 * intercept O_DIRECT reads and writes early, this function should
1098 * never get called.
1099 */
1100static ssize_t ceph_direct_io(int rw, struct kiocb *iocb,
1101 const struct iovec *iov,
1102 loff_t pos, unsigned long nr_segs)
1103{
1104 WARN_ON(1);
1105 return -EINVAL;
1106}
1107
1108const struct address_space_operations ceph_aops = {
1109 .readpage = ceph_readpage,
1110 .readpages = ceph_readpages,
1111 .writepage = ceph_writepage,
1112 .writepages = ceph_writepages_start,
1113 .write_begin = ceph_write_begin,
1114 .write_end = ceph_write_end,
1115 .set_page_dirty = ceph_set_page_dirty,
1116 .invalidatepage = ceph_invalidatepage,
1117 .releasepage = ceph_releasepage,
1118 .direct_IO = ceph_direct_io,
1119};
1120
1121
1122/*
1123 * vm ops
1124 */
1125
1126/*
1127 * Reuse write_begin here for simplicity.
1128 */
1129static int ceph_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf)
1130{
1131 struct inode *inode = vma->vm_file->f_dentry->d_inode;
1132 struct page *page = vmf->page;
1133 struct ceph_mds_client *mdsc = &ceph_inode_to_client(inode)->mdsc;
1134 loff_t off = page->index << PAGE_CACHE_SHIFT;
1135 loff_t size, len;
1136 int ret;
1137
1138 size = i_size_read(inode);
1139 if (off + PAGE_CACHE_SIZE <= size)
1140 len = PAGE_CACHE_SIZE;
1141 else
1142 len = size & ~PAGE_CACHE_MASK;
1143
1144 dout("page_mkwrite %p %llu~%llu page %p idx %lu\n", inode,
1145 off, len, page, page->index);
1146
1147 lock_page(page);
1148
1149 ret = VM_FAULT_NOPAGE;
1150 if ((off > size) ||
1151 (page->mapping != inode->i_mapping))
1152 goto out;
1153
1154 ret = ceph_update_writeable_page(vma->vm_file, off, len, page);
1155 if (ret == 0) {
1156 /* success. we'll keep the page locked. */
1157 set_page_dirty(page);
1158 up_read(&mdsc->snap_rwsem);
1159 ret = VM_FAULT_LOCKED;
1160 } else {
1161 if (ret == -ENOMEM)
1162 ret = VM_FAULT_OOM;
1163 else
1164 ret = VM_FAULT_SIGBUS;
1165 }
1166out:
1167 dout("page_mkwrite %p %llu~%llu = %d\n", inode, off, len, ret);
1168 if (ret != VM_FAULT_LOCKED)
1169 unlock_page(page);
1170 return ret;
1171}
1172
1173static struct vm_operations_struct ceph_vmops = {
1174 .fault = filemap_fault,
1175 .page_mkwrite = ceph_page_mkwrite,
1176};
1177
1178int ceph_mmap(struct file *file, struct vm_area_struct *vma)
1179{
1180 struct address_space *mapping = file->f_mapping;
1181
1182 if (!mapping->a_ops->readpage)
1183 return -ENOEXEC;
1184 file_accessed(file);
1185 vma->vm_ops = &ceph_vmops;
1186 vma->vm_flags |= VM_CAN_NONLINEAR;
1187 return 0;
1188}
diff --git a/fs/ceph/armor.c b/fs/ceph/armor.c
new file mode 100644
index 000000000000..67b2c030924b
--- /dev/null
+++ b/fs/ceph/armor.c
@@ -0,0 +1,99 @@
1
2#include <linux/errno.h>
3
4/*
5 * base64 encode/decode.
6 */
7
8const char *pem_key = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/";
9
10static int encode_bits(int c)
11{
12 return pem_key[c];
13}
14
15static int decode_bits(char c)
16{
17 if (c >= 'A' && c <= 'Z')
18 return c - 'A';
19 if (c >= 'a' && c <= 'z')
20 return c - 'a' + 26;
21 if (c >= '0' && c <= '9')
22 return c - '0' + 52;
23 if (c == '+')
24 return 62;
25 if (c == '/')
26 return 63;
27 if (c == '=')
28 return 0; /* just non-negative, please */
29 return -EINVAL;
30}
31
32int ceph_armor(char *dst, const char *src, const char *end)
33{
34 int olen = 0;
35 int line = 0;
36
37 while (src < end) {
38 unsigned char a, b, c;
39
40 a = *src++;
41 *dst++ = encode_bits(a >> 2);
42 if (src < end) {
43 b = *src++;
44 *dst++ = encode_bits(((a & 3) << 4) | (b >> 4));
45 if (src < end) {
46 c = *src++;
47 *dst++ = encode_bits(((b & 15) << 2) |
48 (c >> 6));
49 *dst++ = encode_bits(c & 63);
50 } else {
51 *dst++ = encode_bits((b & 15) << 2);
52 *dst++ = '=';
53 }
54 } else {
55 *dst++ = encode_bits(((a & 3) << 4));
56 *dst++ = '=';
57 *dst++ = '=';
58 }
59 olen += 4;
60 line += 4;
61 if (line == 64) {
62 line = 0;
63 *(dst++) = '\n';
64 olen++;
65 }
66 }
67 return olen;
68}
69
70int ceph_unarmor(char *dst, const char *src, const char *end)
71{
72 int olen = 0;
73
74 while (src < end) {
75 int a, b, c, d;
76
77 if (src < end && src[0] == '\n')
78 src++;
79 if (src + 4 > end)
80 return -EINVAL;
81 a = decode_bits(src[0]);
82 b = decode_bits(src[1]);
83 c = decode_bits(src[2]);
84 d = decode_bits(src[3]);
85 if (a < 0 || b < 0 || c < 0 || d < 0)
86 return -EINVAL;
87
88 *dst++ = (a << 2) | (b >> 4);
89 if (src[2] == '=')
90 return olen + 1;
91 *dst++ = ((b & 15) << 4) | (c >> 2);
92 if (src[3] == '=')
93 return olen + 2;
94 *dst++ = ((c & 3) << 6) | d;
95 olen += 3;
96 src += 4;
97 }
98 return olen;
99}
diff --git a/fs/ceph/auth.c b/fs/ceph/auth.c
new file mode 100644
index 000000000000..abb204fea6c7
--- /dev/null
+++ b/fs/ceph/auth.c
@@ -0,0 +1,257 @@
1#include "ceph_debug.h"
2
3#include <linux/module.h>
4#include <linux/err.h>
5
6#include "types.h"
7#include "auth_none.h"
8#include "auth_x.h"
9#include "decode.h"
10#include "super.h"
11
12#include "messenger.h"
13
14/*
15 * get protocol handler
16 */
17static u32 supported_protocols[] = {
18 CEPH_AUTH_NONE,
19 CEPH_AUTH_CEPHX
20};
21
22int ceph_auth_init_protocol(struct ceph_auth_client *ac, int protocol)
23{
24 switch (protocol) {
25 case CEPH_AUTH_NONE:
26 return ceph_auth_none_init(ac);
27 case CEPH_AUTH_CEPHX:
28 return ceph_x_init(ac);
29 default:
30 return -ENOENT;
31 }
32}
33
34/*
35 * setup, teardown.
36 */
37struct ceph_auth_client *ceph_auth_init(const char *name, const char *secret)
38{
39 struct ceph_auth_client *ac;
40 int ret;
41
42 dout("auth_init name '%s' secret '%s'\n", name, secret);
43
44 ret = -ENOMEM;
45 ac = kzalloc(sizeof(*ac), GFP_NOFS);
46 if (!ac)
47 goto out;
48
49 ac->negotiating = true;
50 if (name)
51 ac->name = name;
52 else
53 ac->name = CEPH_AUTH_NAME_DEFAULT;
54 dout("auth_init name %s secret %s\n", ac->name, secret);
55 ac->secret = secret;
56 return ac;
57
58out:
59 return ERR_PTR(ret);
60}
61
62void ceph_auth_destroy(struct ceph_auth_client *ac)
63{
64 dout("auth_destroy %p\n", ac);
65 if (ac->ops)
66 ac->ops->destroy(ac);
67 kfree(ac);
68}
69
70/*
71 * Reset occurs when reconnecting to the monitor.
72 */
73void ceph_auth_reset(struct ceph_auth_client *ac)
74{
75 dout("auth_reset %p\n", ac);
76 if (ac->ops && !ac->negotiating)
77 ac->ops->reset(ac);
78 ac->negotiating = true;
79}
80
81int ceph_entity_name_encode(const char *name, void **p, void *end)
82{
83 int len = strlen(name);
84
85 if (*p + 2*sizeof(u32) + len > end)
86 return -ERANGE;
87 ceph_encode_32(p, CEPH_ENTITY_TYPE_CLIENT);
88 ceph_encode_32(p, len);
89 ceph_encode_copy(p, name, len);
90 return 0;
91}
92
93/*
94 * Initiate protocol negotiation with monitor. Include entity name
95 * and list supported protocols.
96 */
97int ceph_auth_build_hello(struct ceph_auth_client *ac, void *buf, size_t len)
98{
99 struct ceph_mon_request_header *monhdr = buf;
100 void *p = monhdr + 1, *end = buf + len, *lenp;
101 int i, num;
102 int ret;
103
104 dout("auth_build_hello\n");
105 monhdr->have_version = 0;
106 monhdr->session_mon = cpu_to_le16(-1);
107 monhdr->session_mon_tid = 0;
108
109 ceph_encode_32(&p, 0); /* no protocol, yet */
110
111 lenp = p;
112 p += sizeof(u32);
113
114 ceph_decode_need(&p, end, 1 + sizeof(u32), bad);
115 ceph_encode_8(&p, 1);
116 num = ARRAY_SIZE(supported_protocols);
117 ceph_encode_32(&p, num);
118 ceph_decode_need(&p, end, num * sizeof(u32), bad);
119 for (i = 0; i < num; i++)
120 ceph_encode_32(&p, supported_protocols[i]);
121
122 ret = ceph_entity_name_encode(ac->name, &p, end);
123 if (ret < 0)
124 return ret;
125 ceph_decode_need(&p, end, sizeof(u64), bad);
126 ceph_encode_64(&p, ac->global_id);
127
128 ceph_encode_32(&lenp, p - lenp - sizeof(u32));
129 return p - buf;
130
131bad:
132 return -ERANGE;
133}
134
135int ceph_build_auth_request(struct ceph_auth_client *ac,
136 void *msg_buf, size_t msg_len)
137{
138 struct ceph_mon_request_header *monhdr = msg_buf;
139 void *p = monhdr + 1;
140 void *end = msg_buf + msg_len;
141 int ret;
142
143 monhdr->have_version = 0;
144 monhdr->session_mon = cpu_to_le16(-1);
145 monhdr->session_mon_tid = 0;
146
147 ceph_encode_32(&p, ac->protocol);
148
149 ret = ac->ops->build_request(ac, p + sizeof(u32), end);
150 if (ret < 0) {
151 pr_err("error %d building request\n", ret);
152 return ret;
153 }
154 dout(" built request %d bytes\n", ret);
155 ceph_encode_32(&p, ret);
156 return p + ret - msg_buf;
157}
158
159/*
160 * Handle auth message from monitor.
161 */
162int ceph_handle_auth_reply(struct ceph_auth_client *ac,
163 void *buf, size_t len,
164 void *reply_buf, size_t reply_len)
165{
166 void *p = buf;
167 void *end = buf + len;
168 int protocol;
169 s32 result;
170 u64 global_id;
171 void *payload, *payload_end;
172 int payload_len;
173 char *result_msg;
174 int result_msg_len;
175 int ret = -EINVAL;
176
177 dout("handle_auth_reply %p %p\n", p, end);
178 ceph_decode_need(&p, end, sizeof(u32) * 3 + sizeof(u64), bad);
179 protocol = ceph_decode_32(&p);
180 result = ceph_decode_32(&p);
181 global_id = ceph_decode_64(&p);
182 payload_len = ceph_decode_32(&p);
183 payload = p;
184 p += payload_len;
185 ceph_decode_need(&p, end, sizeof(u32), bad);
186 result_msg_len = ceph_decode_32(&p);
187 result_msg = p;
188 p += result_msg_len;
189 if (p != end)
190 goto bad;
191
192 dout(" result %d '%.*s' gid %llu len %d\n", result, result_msg_len,
193 result_msg, global_id, payload_len);
194
195 payload_end = payload + payload_len;
196
197 if (global_id && ac->global_id != global_id) {
198 dout(" set global_id %lld -> %lld\n", ac->global_id, global_id);
199 ac->global_id = global_id;
200 }
201
202 if (ac->negotiating) {
203 /* server does not support our protocols? */
204 if (!protocol && result < 0) {
205 ret = result;
206 goto out;
207 }
208 /* set up (new) protocol handler? */
209 if (ac->protocol && ac->protocol != protocol) {
210 ac->ops->destroy(ac);
211 ac->protocol = 0;
212 ac->ops = NULL;
213 }
214 if (ac->protocol != protocol) {
215 ret = ceph_auth_init_protocol(ac, protocol);
216 if (ret) {
217 pr_err("error %d on auth protocol %d init\n",
218 ret, protocol);
219 goto out;
220 }
221 }
222
223 ac->negotiating = false;
224 }
225
226 ret = ac->ops->handle_reply(ac, result, payload, payload_end);
227 if (ret == -EAGAIN) {
228 return ceph_build_auth_request(ac, reply_buf, reply_len);
229 } else if (ret) {
230 pr_err("authentication error %d\n", ret);
231 return ret;
232 }
233 return 0;
234
235bad:
236 pr_err("failed to decode auth msg\n");
237out:
238 return ret;
239}
240
241int ceph_build_auth(struct ceph_auth_client *ac,
242 void *msg_buf, size_t msg_len)
243{
244 if (!ac->protocol)
245 return ceph_auth_build_hello(ac, msg_buf, msg_len);
246 BUG_ON(!ac->ops);
247 if (!ac->ops->is_authenticated(ac))
248 return ceph_build_auth_request(ac, msg_buf, msg_len);
249 return 0;
250}
251
252int ceph_auth_is_authenticated(struct ceph_auth_client *ac)
253{
254 if (!ac->ops)
255 return 0;
256 return ac->ops->is_authenticated(ac);
257}
diff --git a/fs/ceph/auth.h b/fs/ceph/auth.h
new file mode 100644
index 000000000000..ca4f57cfb267
--- /dev/null
+++ b/fs/ceph/auth.h
@@ -0,0 +1,84 @@
1#ifndef _FS_CEPH_AUTH_H
2#define _FS_CEPH_AUTH_H
3
4#include "types.h"
5#include "buffer.h"
6
7/*
8 * Abstract interface for communicating with the authenticate module.
9 * There is some handshake that takes place between us and the monitor
10 * to acquire the necessary keys. These are used to generate an
11 * 'authorizer' that we use when connecting to a service (mds, osd).
12 */
13
14struct ceph_auth_client;
15struct ceph_authorizer;
16
17struct ceph_auth_client_ops {
18 /*
19 * true if we are authenticated and can connect to
20 * services.
21 */
22 int (*is_authenticated)(struct ceph_auth_client *ac);
23
24 /*
25 * build requests and process replies during monitor
26 * handshake. if handle_reply returns -EAGAIN, we build
27 * another request.
28 */
29 int (*build_request)(struct ceph_auth_client *ac, void *buf, void *end);
30 int (*handle_reply)(struct ceph_auth_client *ac, int result,
31 void *buf, void *end);
32
33 /*
34 * Create authorizer for connecting to a service, and verify
35 * the response to authenticate the service.
36 */
37 int (*create_authorizer)(struct ceph_auth_client *ac, int peer_type,
38 struct ceph_authorizer **a,
39 void **buf, size_t *len,
40 void **reply_buf, size_t *reply_len);
41 int (*verify_authorizer_reply)(struct ceph_auth_client *ac,
42 struct ceph_authorizer *a, size_t len);
43 void (*destroy_authorizer)(struct ceph_auth_client *ac,
44 struct ceph_authorizer *a);
45 void (*invalidate_authorizer)(struct ceph_auth_client *ac,
46 int peer_type);
47
48 /* reset when we (re)connect to a monitor */
49 void (*reset)(struct ceph_auth_client *ac);
50
51 void (*destroy)(struct ceph_auth_client *ac);
52};
53
54struct ceph_auth_client {
55 u32 protocol; /* CEPH_AUTH_* */
56 void *private; /* for use by protocol implementation */
57 const struct ceph_auth_client_ops *ops; /* null iff protocol==0 */
58
59 bool negotiating; /* true if negotiating protocol */
60 const char *name; /* entity name */
61 u64 global_id; /* our unique id in system */
62 const char *secret; /* our secret key */
63 unsigned want_keys; /* which services we want */
64};
65
66extern struct ceph_auth_client *ceph_auth_init(const char *name,
67 const char *secret);
68extern void ceph_auth_destroy(struct ceph_auth_client *ac);
69
70extern void ceph_auth_reset(struct ceph_auth_client *ac);
71
72extern int ceph_auth_build_hello(struct ceph_auth_client *ac,
73 void *buf, size_t len);
74extern int ceph_handle_auth_reply(struct ceph_auth_client *ac,
75 void *buf, size_t len,
76 void *reply_buf, size_t reply_len);
77extern int ceph_entity_name_encode(const char *name, void **p, void *end);
78
79extern int ceph_build_auth(struct ceph_auth_client *ac,
80 void *msg_buf, size_t msg_len);
81
82extern int ceph_auth_is_authenticated(struct ceph_auth_client *ac);
83
84#endif
diff --git a/fs/ceph/auth_none.c b/fs/ceph/auth_none.c
new file mode 100644
index 000000000000..b4ef6f0a6c85
--- /dev/null
+++ b/fs/ceph/auth_none.c
@@ -0,0 +1,121 @@
1
2#include "ceph_debug.h"
3
4#include <linux/err.h>
5#include <linux/module.h>
6#include <linux/random.h>
7
8#include "auth_none.h"
9#include "auth.h"
10#include "decode.h"
11
12static void reset(struct ceph_auth_client *ac)
13{
14 struct ceph_auth_none_info *xi = ac->private;
15
16 xi->starting = true;
17 xi->built_authorizer = false;
18}
19
20static void destroy(struct ceph_auth_client *ac)
21{
22 kfree(ac->private);
23 ac->private = NULL;
24}
25
26static int is_authenticated(struct ceph_auth_client *ac)
27{
28 struct ceph_auth_none_info *xi = ac->private;
29
30 return !xi->starting;
31}
32
33/*
34 * the generic auth code decode the global_id, and we carry no actual
35 * authenticate state, so nothing happens here.
36 */
37static int handle_reply(struct ceph_auth_client *ac, int result,
38 void *buf, void *end)
39{
40 struct ceph_auth_none_info *xi = ac->private;
41
42 xi->starting = false;
43 return result;
44}
45
46/*
47 * build an 'authorizer' with our entity_name and global_id. we can
48 * reuse a single static copy since it is identical for all services
49 * we connect to.
50 */
51static int ceph_auth_none_create_authorizer(
52 struct ceph_auth_client *ac, int peer_type,
53 struct ceph_authorizer **a,
54 void **buf, size_t *len,
55 void **reply_buf, size_t *reply_len)
56{
57 struct ceph_auth_none_info *ai = ac->private;
58 struct ceph_none_authorizer *au = &ai->au;
59 void *p, *end;
60 int ret;
61
62 if (!ai->built_authorizer) {
63 p = au->buf;
64 end = p + sizeof(au->buf);
65 ceph_encode_8(&p, 1);
66 ret = ceph_entity_name_encode(ac->name, &p, end - 8);
67 if (ret < 0)
68 goto bad;
69 ceph_decode_need(&p, end, sizeof(u64), bad2);
70 ceph_encode_64(&p, ac->global_id);
71 au->buf_len = p - (void *)au->buf;
72 ai->built_authorizer = true;
73 dout("built authorizer len %d\n", au->buf_len);
74 }
75
76 *a = (struct ceph_authorizer *)au;
77 *buf = au->buf;
78 *len = au->buf_len;
79 *reply_buf = au->reply_buf;
80 *reply_len = sizeof(au->reply_buf);
81 return 0;
82
83bad2:
84 ret = -ERANGE;
85bad:
86 return ret;
87}
88
89static void ceph_auth_none_destroy_authorizer(struct ceph_auth_client *ac,
90 struct ceph_authorizer *a)
91{
92 /* nothing to do */
93}
94
95static const struct ceph_auth_client_ops ceph_auth_none_ops = {
96 .reset = reset,
97 .destroy = destroy,
98 .is_authenticated = is_authenticated,
99 .handle_reply = handle_reply,
100 .create_authorizer = ceph_auth_none_create_authorizer,
101 .destroy_authorizer = ceph_auth_none_destroy_authorizer,
102};
103
104int ceph_auth_none_init(struct ceph_auth_client *ac)
105{
106 struct ceph_auth_none_info *xi;
107
108 dout("ceph_auth_none_init %p\n", ac);
109 xi = kzalloc(sizeof(*xi), GFP_NOFS);
110 if (!xi)
111 return -ENOMEM;
112
113 xi->starting = true;
114 xi->built_authorizer = false;
115
116 ac->protocol = CEPH_AUTH_NONE;
117 ac->private = xi;
118 ac->ops = &ceph_auth_none_ops;
119 return 0;
120}
121
diff --git a/fs/ceph/auth_none.h b/fs/ceph/auth_none.h
new file mode 100644
index 000000000000..56c05533a31c
--- /dev/null
+++ b/fs/ceph/auth_none.h
@@ -0,0 +1,28 @@
1#ifndef _FS_CEPH_AUTH_NONE_H
2#define _FS_CEPH_AUTH_NONE_H
3
4#include "auth.h"
5
6/*
7 * null security mode.
8 *
9 * we use a single static authorizer that simply encodes our entity name
10 * and global id.
11 */
12
13struct ceph_none_authorizer {
14 char buf[128];
15 int buf_len;
16 char reply_buf[0];
17};
18
19struct ceph_auth_none_info {
20 bool starting;
21 bool built_authorizer;
22 struct ceph_none_authorizer au; /* we only need one; it's static */
23};
24
25extern int ceph_auth_none_init(struct ceph_auth_client *ac);
26
27#endif
28
diff --git a/fs/ceph/auth_x.c b/fs/ceph/auth_x.c
new file mode 100644
index 000000000000..f0318427b6da
--- /dev/null
+++ b/fs/ceph/auth_x.c
@@ -0,0 +1,656 @@
1
2#include "ceph_debug.h"
3
4#include <linux/err.h>
5#include <linux/module.h>
6#include <linux/random.h>
7
8#include "auth_x.h"
9#include "auth_x_protocol.h"
10#include "crypto.h"
11#include "auth.h"
12#include "decode.h"
13
14struct kmem_cache *ceph_x_ticketbuf_cachep;
15
16#define TEMP_TICKET_BUF_LEN 256
17
18static void ceph_x_validate_tickets(struct ceph_auth_client *ac, int *pneed);
19
20static int ceph_x_is_authenticated(struct ceph_auth_client *ac)
21{
22 struct ceph_x_info *xi = ac->private;
23 int need;
24
25 ceph_x_validate_tickets(ac, &need);
26 dout("ceph_x_is_authenticated want=%d need=%d have=%d\n",
27 ac->want_keys, need, xi->have_keys);
28 return (ac->want_keys & xi->have_keys) == ac->want_keys;
29}
30
31static int ceph_x_encrypt(struct ceph_crypto_key *secret,
32 void *ibuf, int ilen, void *obuf, size_t olen)
33{
34 struct ceph_x_encrypt_header head = {
35 .struct_v = 1,
36 .magic = cpu_to_le64(CEPHX_ENC_MAGIC)
37 };
38 size_t len = olen - sizeof(u32);
39 int ret;
40
41 ret = ceph_encrypt2(secret, obuf + sizeof(u32), &len,
42 &head, sizeof(head), ibuf, ilen);
43 if (ret)
44 return ret;
45 ceph_encode_32(&obuf, len);
46 return len + sizeof(u32);
47}
48
49static int ceph_x_decrypt(struct ceph_crypto_key *secret,
50 void **p, void *end, void *obuf, size_t olen)
51{
52 struct ceph_x_encrypt_header head;
53 size_t head_len = sizeof(head);
54 int len, ret;
55
56 len = ceph_decode_32(p);
57 if (*p + len > end)
58 return -EINVAL;
59
60 dout("ceph_x_decrypt len %d\n", len);
61 ret = ceph_decrypt2(secret, &head, &head_len, obuf, &olen,
62 *p, len);
63 if (ret)
64 return ret;
65 if (head.struct_v != 1 || le64_to_cpu(head.magic) != CEPHX_ENC_MAGIC)
66 return -EPERM;
67 *p += len;
68 return olen;
69}
70
71/*
72 * get existing (or insert new) ticket handler
73 */
74struct ceph_x_ticket_handler *get_ticket_handler(struct ceph_auth_client *ac,
75 int service)
76{
77 struct ceph_x_ticket_handler *th;
78 struct ceph_x_info *xi = ac->private;
79 struct rb_node *parent = NULL, **p = &xi->ticket_handlers.rb_node;
80
81 while (*p) {
82 parent = *p;
83 th = rb_entry(parent, struct ceph_x_ticket_handler, node);
84 if (service < th->service)
85 p = &(*p)->rb_left;
86 else if (service > th->service)
87 p = &(*p)->rb_right;
88 else
89 return th;
90 }
91
92 /* add it */
93 th = kzalloc(sizeof(*th), GFP_NOFS);
94 if (!th)
95 return ERR_PTR(-ENOMEM);
96 th->service = service;
97 rb_link_node(&th->node, parent, p);
98 rb_insert_color(&th->node, &xi->ticket_handlers);
99 return th;
100}
101
102static void remove_ticket_handler(struct ceph_auth_client *ac,
103 struct ceph_x_ticket_handler *th)
104{
105 struct ceph_x_info *xi = ac->private;
106
107 dout("remove_ticket_handler %p %d\n", th, th->service);
108 rb_erase(&th->node, &xi->ticket_handlers);
109 ceph_crypto_key_destroy(&th->session_key);
110 if (th->ticket_blob)
111 ceph_buffer_put(th->ticket_blob);
112 kfree(th);
113}
114
115static int ceph_x_proc_ticket_reply(struct ceph_auth_client *ac,
116 struct ceph_crypto_key *secret,
117 void *buf, void *end)
118{
119 struct ceph_x_info *xi = ac->private;
120 int num;
121 void *p = buf;
122 int ret;
123 char *dbuf;
124 char *ticket_buf;
125 u8 struct_v;
126
127 dbuf = kmem_cache_alloc(ceph_x_ticketbuf_cachep, GFP_NOFS | GFP_ATOMIC);
128 if (!dbuf)
129 return -ENOMEM;
130
131 ret = -ENOMEM;
132 ticket_buf = kmem_cache_alloc(ceph_x_ticketbuf_cachep,
133 GFP_NOFS | GFP_ATOMIC);
134 if (!ticket_buf)
135 goto out_dbuf;
136
137 ceph_decode_need(&p, end, 1 + sizeof(u32), bad);
138 struct_v = ceph_decode_8(&p);
139 if (struct_v != 1)
140 goto bad;
141 num = ceph_decode_32(&p);
142 dout("%d tickets\n", num);
143 while (num--) {
144 int type;
145 u8 struct_v;
146 struct ceph_x_ticket_handler *th;
147 void *dp, *dend;
148 int dlen;
149 char is_enc;
150 struct timespec validity;
151 struct ceph_crypto_key old_key;
152 void *tp, *tpend;
153
154 ceph_decode_need(&p, end, sizeof(u32) + 1, bad);
155
156 type = ceph_decode_32(&p);
157 dout(" ticket type %d %s\n", type, ceph_entity_type_name(type));
158
159 struct_v = ceph_decode_8(&p);
160 if (struct_v != 1)
161 goto bad;
162
163 th = get_ticket_handler(ac, type);
164 if (IS_ERR(th)) {
165 ret = PTR_ERR(th);
166 goto out;
167 }
168
169 /* blob for me */
170 dlen = ceph_x_decrypt(secret, &p, end, dbuf,
171 TEMP_TICKET_BUF_LEN);
172 if (dlen <= 0) {
173 ret = dlen;
174 goto out;
175 }
176 dout(" decrypted %d bytes\n", dlen);
177 dend = dbuf + dlen;
178 dp = dbuf;
179
180 struct_v = ceph_decode_8(&dp);
181 if (struct_v != 1)
182 goto bad;
183
184 memcpy(&old_key, &th->session_key, sizeof(old_key));
185 ret = ceph_crypto_key_decode(&th->session_key, &dp, dend);
186 if (ret)
187 goto out;
188
189 ceph_decode_copy(&dp, &th->validity, sizeof(th->validity));
190 ceph_decode_timespec(&validity, &th->validity);
191 th->expires = get_seconds() + validity.tv_sec;
192 th->renew_after = th->expires - (validity.tv_sec / 4);
193 dout(" expires=%lu renew_after=%lu\n", th->expires,
194 th->renew_after);
195
196 /* ticket blob for service */
197 ceph_decode_8_safe(&p, end, is_enc, bad);
198 tp = ticket_buf;
199 if (is_enc) {
200 /* encrypted */
201 dout(" encrypted ticket\n");
202 dlen = ceph_x_decrypt(&old_key, &p, end, ticket_buf,
203 TEMP_TICKET_BUF_LEN);
204 if (dlen < 0) {
205 ret = dlen;
206 goto out;
207 }
208 dlen = ceph_decode_32(&tp);
209 } else {
210 /* unencrypted */
211 ceph_decode_32_safe(&p, end, dlen, bad);
212 ceph_decode_need(&p, end, dlen, bad);
213 ceph_decode_copy(&p, ticket_buf, dlen);
214 }
215 tpend = tp + dlen;
216 dout(" ticket blob is %d bytes\n", dlen);
217 ceph_decode_need(&tp, tpend, 1 + sizeof(u64), bad);
218 struct_v = ceph_decode_8(&tp);
219 th->secret_id = ceph_decode_64(&tp);
220 ret = ceph_decode_buffer(&th->ticket_blob, &tp, tpend);
221 if (ret)
222 goto out;
223 dout(" got ticket service %d (%s) secret_id %lld len %d\n",
224 type, ceph_entity_type_name(type), th->secret_id,
225 (int)th->ticket_blob->vec.iov_len);
226 xi->have_keys |= th->service;
227 }
228
229 ret = 0;
230out:
231 kmem_cache_free(ceph_x_ticketbuf_cachep, ticket_buf);
232out_dbuf:
233 kmem_cache_free(ceph_x_ticketbuf_cachep, dbuf);
234 return ret;
235
236bad:
237 ret = -EINVAL;
238 goto out;
239}
240
241static int ceph_x_build_authorizer(struct ceph_auth_client *ac,
242 struct ceph_x_ticket_handler *th,
243 struct ceph_x_authorizer *au)
244{
245 int len;
246 struct ceph_x_authorize_a *msg_a;
247 struct ceph_x_authorize_b msg_b;
248 void *p, *end;
249 int ret;
250 int ticket_blob_len =
251 (th->ticket_blob ? th->ticket_blob->vec.iov_len : 0);
252
253 dout("build_authorizer for %s %p\n",
254 ceph_entity_type_name(th->service), au);
255
256 len = sizeof(*msg_a) + sizeof(msg_b) + sizeof(u32) +
257 ticket_blob_len + 16;
258 dout(" need len %d\n", len);
259 if (au->buf && au->buf->alloc_len < len) {
260 ceph_buffer_put(au->buf);
261 au->buf = NULL;
262 }
263 if (!au->buf) {
264 au->buf = ceph_buffer_new(len, GFP_NOFS);
265 if (!au->buf)
266 return -ENOMEM;
267 }
268 au->service = th->service;
269
270 msg_a = au->buf->vec.iov_base;
271 msg_a->struct_v = 1;
272 msg_a->global_id = cpu_to_le64(ac->global_id);
273 msg_a->service_id = cpu_to_le32(th->service);
274 msg_a->ticket_blob.struct_v = 1;
275 msg_a->ticket_blob.secret_id = cpu_to_le64(th->secret_id);
276 msg_a->ticket_blob.blob_len = cpu_to_le32(ticket_blob_len);
277 if (ticket_blob_len) {
278 memcpy(msg_a->ticket_blob.blob, th->ticket_blob->vec.iov_base,
279 th->ticket_blob->vec.iov_len);
280 }
281 dout(" th %p secret_id %lld %lld\n", th, th->secret_id,
282 le64_to_cpu(msg_a->ticket_blob.secret_id));
283
284 p = msg_a + 1;
285 p += ticket_blob_len;
286 end = au->buf->vec.iov_base + au->buf->vec.iov_len;
287
288 get_random_bytes(&au->nonce, sizeof(au->nonce));
289 msg_b.struct_v = 1;
290 msg_b.nonce = cpu_to_le64(au->nonce);
291 ret = ceph_x_encrypt(&th->session_key, &msg_b, sizeof(msg_b),
292 p, end - p);
293 if (ret < 0)
294 goto out_buf;
295 p += ret;
296 au->buf->vec.iov_len = p - au->buf->vec.iov_base;
297 dout(" built authorizer nonce %llx len %d\n", au->nonce,
298 (int)au->buf->vec.iov_len);
299 return 0;
300
301out_buf:
302 ceph_buffer_put(au->buf);
303 au->buf = NULL;
304 return ret;
305}
306
307static int ceph_x_encode_ticket(struct ceph_x_ticket_handler *th,
308 void **p, void *end)
309{
310 ceph_decode_need(p, end, 1 + sizeof(u64), bad);
311 ceph_encode_8(p, 1);
312 ceph_encode_64(p, th->secret_id);
313 if (th->ticket_blob) {
314 const char *buf = th->ticket_blob->vec.iov_base;
315 u32 len = th->ticket_blob->vec.iov_len;
316
317 ceph_encode_32_safe(p, end, len, bad);
318 ceph_encode_copy_safe(p, end, buf, len, bad);
319 } else {
320 ceph_encode_32_safe(p, end, 0, bad);
321 }
322
323 return 0;
324bad:
325 return -ERANGE;
326}
327
328static void ceph_x_validate_tickets(struct ceph_auth_client *ac, int *pneed)
329{
330 int want = ac->want_keys;
331 struct ceph_x_info *xi = ac->private;
332 int service;
333
334 *pneed = ac->want_keys & ~(xi->have_keys);
335
336 for (service = 1; service <= want; service <<= 1) {
337 struct ceph_x_ticket_handler *th;
338
339 if (!(ac->want_keys & service))
340 continue;
341
342 if (*pneed & service)
343 continue;
344
345 th = get_ticket_handler(ac, service);
346
347 if (!th) {
348 *pneed |= service;
349 continue;
350 }
351
352 if (get_seconds() >= th->renew_after)
353 *pneed |= service;
354 if (get_seconds() >= th->expires)
355 xi->have_keys &= ~service;
356 }
357}
358
359
360static int ceph_x_build_request(struct ceph_auth_client *ac,
361 void *buf, void *end)
362{
363 struct ceph_x_info *xi = ac->private;
364 int need;
365 struct ceph_x_request_header *head = buf;
366 int ret;
367 struct ceph_x_ticket_handler *th =
368 get_ticket_handler(ac, CEPH_ENTITY_TYPE_AUTH);
369
370 ceph_x_validate_tickets(ac, &need);
371
372 dout("build_request want %x have %x need %x\n",
373 ac->want_keys, xi->have_keys, need);
374
375 if (need & CEPH_ENTITY_TYPE_AUTH) {
376 struct ceph_x_authenticate *auth = (void *)(head + 1);
377 void *p = auth + 1;
378 struct ceph_x_challenge_blob tmp;
379 char tmp_enc[40];
380 u64 *u;
381
382 if (p > end)
383 return -ERANGE;
384
385 dout(" get_auth_session_key\n");
386 head->op = cpu_to_le16(CEPHX_GET_AUTH_SESSION_KEY);
387
388 /* encrypt and hash */
389 get_random_bytes(&auth->client_challenge, sizeof(u64));
390 tmp.client_challenge = auth->client_challenge;
391 tmp.server_challenge = cpu_to_le64(xi->server_challenge);
392 ret = ceph_x_encrypt(&xi->secret, &tmp, sizeof(tmp),
393 tmp_enc, sizeof(tmp_enc));
394 if (ret < 0)
395 return ret;
396
397 auth->struct_v = 1;
398 auth->key = 0;
399 for (u = (u64 *)tmp_enc; u + 1 <= (u64 *)(tmp_enc + ret); u++)
400 auth->key ^= *u;
401 dout(" server_challenge %llx client_challenge %llx key %llx\n",
402 xi->server_challenge, le64_to_cpu(auth->client_challenge),
403 le64_to_cpu(auth->key));
404
405 /* now encode the old ticket if exists */
406 ret = ceph_x_encode_ticket(th, &p, end);
407 if (ret < 0)
408 return ret;
409
410 return p - buf;
411 }
412
413 if (need) {
414 void *p = head + 1;
415 struct ceph_x_service_ticket_request *req;
416
417 if (p > end)
418 return -ERANGE;
419 head->op = cpu_to_le16(CEPHX_GET_PRINCIPAL_SESSION_KEY);
420
421 BUG_ON(!th);
422 ret = ceph_x_build_authorizer(ac, th, &xi->auth_authorizer);
423 if (ret)
424 return ret;
425 ceph_encode_copy(&p, xi->auth_authorizer.buf->vec.iov_base,
426 xi->auth_authorizer.buf->vec.iov_len);
427
428 req = p;
429 req->keys = cpu_to_le32(need);
430 p += sizeof(*req);
431 return p - buf;
432 }
433
434 return 0;
435}
436
437static int ceph_x_handle_reply(struct ceph_auth_client *ac, int result,
438 void *buf, void *end)
439{
440 struct ceph_x_info *xi = ac->private;
441 struct ceph_x_reply_header *head = buf;
442 struct ceph_x_ticket_handler *th;
443 int len = end - buf;
444 int op;
445 int ret;
446
447 if (result)
448 return result; /* XXX hmm? */
449
450 if (xi->starting) {
451 /* it's a hello */
452 struct ceph_x_server_challenge *sc = buf;
453
454 if (len != sizeof(*sc))
455 return -EINVAL;
456 xi->server_challenge = le64_to_cpu(sc->server_challenge);
457 dout("handle_reply got server challenge %llx\n",
458 xi->server_challenge);
459 xi->starting = false;
460 xi->have_keys &= ~CEPH_ENTITY_TYPE_AUTH;
461 return -EAGAIN;
462 }
463
464 op = le32_to_cpu(head->op);
465 result = le32_to_cpu(head->result);
466 dout("handle_reply op %d result %d\n", op, result);
467 switch (op) {
468 case CEPHX_GET_AUTH_SESSION_KEY:
469 /* verify auth key */
470 ret = ceph_x_proc_ticket_reply(ac, &xi->secret,
471 buf + sizeof(*head), end);
472 break;
473
474 case CEPHX_GET_PRINCIPAL_SESSION_KEY:
475 th = get_ticket_handler(ac, CEPH_ENTITY_TYPE_AUTH);
476 BUG_ON(!th);
477 ret = ceph_x_proc_ticket_reply(ac, &th->session_key,
478 buf + sizeof(*head), end);
479 break;
480
481 default:
482 return -EINVAL;
483 }
484 if (ret)
485 return ret;
486 if (ac->want_keys == xi->have_keys)
487 return 0;
488 return -EAGAIN;
489}
490
491static int ceph_x_create_authorizer(
492 struct ceph_auth_client *ac, int peer_type,
493 struct ceph_authorizer **a,
494 void **buf, size_t *len,
495 void **reply_buf, size_t *reply_len)
496{
497 struct ceph_x_authorizer *au;
498 struct ceph_x_ticket_handler *th;
499 int ret;
500
501 th = get_ticket_handler(ac, peer_type);
502 if (IS_ERR(th))
503 return PTR_ERR(th);
504
505 au = kzalloc(sizeof(*au), GFP_NOFS);
506 if (!au)
507 return -ENOMEM;
508
509 ret = ceph_x_build_authorizer(ac, th, au);
510 if (ret) {
511 kfree(au);
512 return ret;
513 }
514
515 *a = (struct ceph_authorizer *)au;
516 *buf = au->buf->vec.iov_base;
517 *len = au->buf->vec.iov_len;
518 *reply_buf = au->reply_buf;
519 *reply_len = sizeof(au->reply_buf);
520 return 0;
521}
522
523static int ceph_x_verify_authorizer_reply(struct ceph_auth_client *ac,
524 struct ceph_authorizer *a, size_t len)
525{
526 struct ceph_x_authorizer *au = (void *)a;
527 struct ceph_x_ticket_handler *th;
528 int ret = 0;
529 struct ceph_x_authorize_reply reply;
530 void *p = au->reply_buf;
531 void *end = p + sizeof(au->reply_buf);
532
533 th = get_ticket_handler(ac, au->service);
534 if (!th)
535 return -EIO; /* hrm! */
536 ret = ceph_x_decrypt(&th->session_key, &p, end, &reply, sizeof(reply));
537 if (ret < 0)
538 return ret;
539 if (ret != sizeof(reply))
540 return -EPERM;
541
542 if (au->nonce + 1 != le64_to_cpu(reply.nonce_plus_one))
543 ret = -EPERM;
544 else
545 ret = 0;
546 dout("verify_authorizer_reply nonce %llx got %llx ret %d\n",
547 au->nonce, le64_to_cpu(reply.nonce_plus_one), ret);
548 return ret;
549}
550
551static void ceph_x_destroy_authorizer(struct ceph_auth_client *ac,
552 struct ceph_authorizer *a)
553{
554 struct ceph_x_authorizer *au = (void *)a;
555
556 ceph_buffer_put(au->buf);
557 kfree(au);
558}
559
560
561static void ceph_x_reset(struct ceph_auth_client *ac)
562{
563 struct ceph_x_info *xi = ac->private;
564
565 dout("reset\n");
566 xi->starting = true;
567 xi->server_challenge = 0;
568}
569
570static void ceph_x_destroy(struct ceph_auth_client *ac)
571{
572 struct ceph_x_info *xi = ac->private;
573 struct rb_node *p;
574
575 dout("ceph_x_destroy %p\n", ac);
576 ceph_crypto_key_destroy(&xi->secret);
577
578 while ((p = rb_first(&xi->ticket_handlers)) != NULL) {
579 struct ceph_x_ticket_handler *th =
580 rb_entry(p, struct ceph_x_ticket_handler, node);
581 remove_ticket_handler(ac, th);
582 }
583
584 kmem_cache_destroy(ceph_x_ticketbuf_cachep);
585
586 kfree(ac->private);
587 ac->private = NULL;
588}
589
590static void ceph_x_invalidate_authorizer(struct ceph_auth_client *ac,
591 int peer_type)
592{
593 struct ceph_x_ticket_handler *th;
594
595 th = get_ticket_handler(ac, peer_type);
596 if (th && !IS_ERR(th))
597 remove_ticket_handler(ac, th);
598}
599
600
601static const struct ceph_auth_client_ops ceph_x_ops = {
602 .is_authenticated = ceph_x_is_authenticated,
603 .build_request = ceph_x_build_request,
604 .handle_reply = ceph_x_handle_reply,
605 .create_authorizer = ceph_x_create_authorizer,
606 .verify_authorizer_reply = ceph_x_verify_authorizer_reply,
607 .destroy_authorizer = ceph_x_destroy_authorizer,
608 .invalidate_authorizer = ceph_x_invalidate_authorizer,
609 .reset = ceph_x_reset,
610 .destroy = ceph_x_destroy,
611};
612
613
614int ceph_x_init(struct ceph_auth_client *ac)
615{
616 struct ceph_x_info *xi;
617 int ret;
618
619 dout("ceph_x_init %p\n", ac);
620 xi = kzalloc(sizeof(*xi), GFP_NOFS);
621 if (!xi)
622 return -ENOMEM;
623
624 ret = -ENOMEM;
625 ceph_x_ticketbuf_cachep = kmem_cache_create("ceph_x_ticketbuf",
626 TEMP_TICKET_BUF_LEN, 8,
627 (SLAB_RECLAIM_ACCOUNT|SLAB_MEM_SPREAD),
628 NULL);
629 if (!ceph_x_ticketbuf_cachep)
630 goto done_nomem;
631 ret = -EINVAL;
632 if (!ac->secret) {
633 pr_err("no secret set (for auth_x protocol)\n");
634 goto done_nomem;
635 }
636
637 ret = ceph_crypto_key_unarmor(&xi->secret, ac->secret);
638 if (ret)
639 goto done_nomem;
640
641 xi->starting = true;
642 xi->ticket_handlers = RB_ROOT;
643
644 ac->protocol = CEPH_AUTH_CEPHX;
645 ac->private = xi;
646 ac->ops = &ceph_x_ops;
647 return 0;
648
649done_nomem:
650 kfree(xi);
651 if (ceph_x_ticketbuf_cachep)
652 kmem_cache_destroy(ceph_x_ticketbuf_cachep);
653 return ret;
654}
655
656
diff --git a/fs/ceph/auth_x.h b/fs/ceph/auth_x.h
new file mode 100644
index 000000000000..ff6f8180e681
--- /dev/null
+++ b/fs/ceph/auth_x.h
@@ -0,0 +1,49 @@
1#ifndef _FS_CEPH_AUTH_X_H
2#define _FS_CEPH_AUTH_X_H
3
4#include <linux/rbtree.h>
5
6#include "crypto.h"
7#include "auth.h"
8#include "auth_x_protocol.h"
9
10/*
11 * Handle ticket for a single service.
12 */
13struct ceph_x_ticket_handler {
14 struct rb_node node;
15 unsigned service;
16
17 struct ceph_crypto_key session_key;
18 struct ceph_timespec validity;
19
20 u64 secret_id;
21 struct ceph_buffer *ticket_blob;
22
23 unsigned long renew_after, expires;
24};
25
26
27struct ceph_x_authorizer {
28 struct ceph_buffer *buf;
29 unsigned service;
30 u64 nonce;
31 char reply_buf[128]; /* big enough for encrypted blob */
32};
33
34struct ceph_x_info {
35 struct ceph_crypto_key secret;
36
37 bool starting;
38 u64 server_challenge;
39
40 unsigned have_keys;
41 struct rb_root ticket_handlers;
42
43 struct ceph_x_authorizer auth_authorizer;
44};
45
46extern int ceph_x_init(struct ceph_auth_client *ac);
47
48#endif
49
diff --git a/fs/ceph/auth_x_protocol.h b/fs/ceph/auth_x_protocol.h
new file mode 100644
index 000000000000..671d30576c4f
--- /dev/null
+++ b/fs/ceph/auth_x_protocol.h
@@ -0,0 +1,90 @@
1#ifndef __FS_CEPH_AUTH_X_PROTOCOL
2#define __FS_CEPH_AUTH_X_PROTOCOL
3
4#define CEPHX_GET_AUTH_SESSION_KEY 0x0100
5#define CEPHX_GET_PRINCIPAL_SESSION_KEY 0x0200
6#define CEPHX_GET_ROTATING_KEY 0x0400
7
8/* common bits */
9struct ceph_x_ticket_blob {
10 __u8 struct_v;
11 __le64 secret_id;
12 __le32 blob_len;
13 char blob[];
14} __attribute__ ((packed));
15
16
17/* common request/reply headers */
18struct ceph_x_request_header {
19 __le16 op;
20} __attribute__ ((packed));
21
22struct ceph_x_reply_header {
23 __le16 op;
24 __le32 result;
25} __attribute__ ((packed));
26
27
28/* authenticate handshake */
29
30/* initial hello (no reply header) */
31struct ceph_x_server_challenge {
32 __u8 struct_v;
33 __le64 server_challenge;
34} __attribute__ ((packed));
35
36struct ceph_x_authenticate {
37 __u8 struct_v;
38 __le64 client_challenge;
39 __le64 key;
40 /* ticket blob */
41} __attribute__ ((packed));
42
43struct ceph_x_service_ticket_request {
44 __u8 struct_v;
45 __le32 keys;
46} __attribute__ ((packed));
47
48struct ceph_x_challenge_blob {
49 __le64 server_challenge;
50 __le64 client_challenge;
51} __attribute__ ((packed));
52
53
54
55/* authorize handshake */
56
57/*
58 * The authorizer consists of two pieces:
59 * a - service id, ticket blob
60 * b - encrypted with session key
61 */
62struct ceph_x_authorize_a {
63 __u8 struct_v;
64 __le64 global_id;
65 __le32 service_id;
66 struct ceph_x_ticket_blob ticket_blob;
67} __attribute__ ((packed));
68
69struct ceph_x_authorize_b {
70 __u8 struct_v;
71 __le64 nonce;
72} __attribute__ ((packed));
73
74struct ceph_x_authorize_reply {
75 __u8 struct_v;
76 __le64 nonce_plus_one;
77} __attribute__ ((packed));
78
79
80/*
81 * encyption bundle
82 */
83#define CEPHX_ENC_MAGIC 0xff009cad8826aa55ull
84
85struct ceph_x_encrypt_header {
86 __u8 struct_v;
87 __le64 magic;
88} __attribute__ ((packed));
89
90#endif
diff --git a/fs/ceph/buffer.c b/fs/ceph/buffer.c
new file mode 100644
index 000000000000..b98086c7aeba
--- /dev/null
+++ b/fs/ceph/buffer.c
@@ -0,0 +1,78 @@
1
2#include "ceph_debug.h"
3#include "buffer.h"
4#include "decode.h"
5
6struct ceph_buffer *ceph_buffer_new(size_t len, gfp_t gfp)
7{
8 struct ceph_buffer *b;
9
10 b = kmalloc(sizeof(*b), gfp);
11 if (!b)
12 return NULL;
13
14 b->vec.iov_base = kmalloc(len, gfp | __GFP_NOWARN);
15 if (b->vec.iov_base) {
16 b->is_vmalloc = false;
17 } else {
18 b->vec.iov_base = __vmalloc(len, gfp, PAGE_KERNEL);
19 if (!b->vec.iov_base) {
20 kfree(b);
21 return NULL;
22 }
23 b->is_vmalloc = true;
24 }
25
26 kref_init(&b->kref);
27 b->alloc_len = len;
28 b->vec.iov_len = len;
29 dout("buffer_new %p\n", b);
30 return b;
31}
32
33void ceph_buffer_release(struct kref *kref)
34{
35 struct ceph_buffer *b = container_of(kref, struct ceph_buffer, kref);
36
37 dout("buffer_release %p\n", b);
38 if (b->vec.iov_base) {
39 if (b->is_vmalloc)
40 vfree(b->vec.iov_base);
41 else
42 kfree(b->vec.iov_base);
43 }
44 kfree(b);
45}
46
47int ceph_buffer_alloc(struct ceph_buffer *b, int len, gfp_t gfp)
48{
49 b->vec.iov_base = kmalloc(len, gfp | __GFP_NOWARN);
50 if (b->vec.iov_base) {
51 b->is_vmalloc = false;
52 } else {
53 b->vec.iov_base = __vmalloc(len, gfp, PAGE_KERNEL);
54 b->is_vmalloc = true;
55 }
56 if (!b->vec.iov_base)
57 return -ENOMEM;
58 b->alloc_len = len;
59 b->vec.iov_len = len;
60 return 0;
61}
62
63int ceph_decode_buffer(struct ceph_buffer **b, void **p, void *end)
64{
65 size_t len;
66
67 ceph_decode_need(p, end, sizeof(u32), bad);
68 len = ceph_decode_32(p);
69 dout("decode_buffer len %d\n", (int)len);
70 ceph_decode_need(p, end, len, bad);
71 *b = ceph_buffer_new(len, GFP_NOFS);
72 if (!*b)
73 return -ENOMEM;
74 ceph_decode_copy(p, (*b)->vec.iov_base, len);
75 return 0;
76bad:
77 return -EINVAL;
78}
diff --git a/fs/ceph/buffer.h b/fs/ceph/buffer.h
new file mode 100644
index 000000000000..58d19014068f
--- /dev/null
+++ b/fs/ceph/buffer.h
@@ -0,0 +1,39 @@
1#ifndef __FS_CEPH_BUFFER_H
2#define __FS_CEPH_BUFFER_H
3
4#include <linux/kref.h>
5#include <linux/mm.h>
6#include <linux/vmalloc.h>
7#include <linux/types.h>
8#include <linux/uio.h>
9
10/*
11 * a simple reference counted buffer.
12 *
13 * use kmalloc for small sizes (<= one page), vmalloc for larger
14 * sizes.
15 */
16struct ceph_buffer {
17 struct kref kref;
18 struct kvec vec;
19 size_t alloc_len;
20 bool is_vmalloc;
21};
22
23extern struct ceph_buffer *ceph_buffer_new(size_t len, gfp_t gfp);
24extern void ceph_buffer_release(struct kref *kref);
25
26static inline struct ceph_buffer *ceph_buffer_get(struct ceph_buffer *b)
27{
28 kref_get(&b->kref);
29 return b;
30}
31
32static inline void ceph_buffer_put(struct ceph_buffer *b)
33{
34 kref_put(&b->kref, ceph_buffer_release);
35}
36
37extern int ceph_decode_buffer(struct ceph_buffer **b, void **p, void *end);
38
39#endif
diff --git a/fs/ceph/caps.c b/fs/ceph/caps.c
new file mode 100644
index 000000000000..db122bb357b8
--- /dev/null
+++ b/fs/ceph/caps.c
@@ -0,0 +1,2927 @@
1#include "ceph_debug.h"
2
3#include <linux/fs.h>
4#include <linux/kernel.h>
5#include <linux/sched.h>
6#include <linux/vmalloc.h>
7#include <linux/wait.h>
8#include <linux/writeback.h>
9
10#include "super.h"
11#include "decode.h"
12#include "messenger.h"
13
14/*
15 * Capability management
16 *
17 * The Ceph metadata servers control client access to inode metadata
18 * and file data by issuing capabilities, granting clients permission
19 * to read and/or write both inode field and file data to OSDs
20 * (storage nodes). Each capability consists of a set of bits
21 * indicating which operations are allowed.
22 *
23 * If the client holds a *_SHARED cap, the client has a coherent value
24 * that can be safely read from the cached inode.
25 *
26 * In the case of a *_EXCL (exclusive) or FILE_WR capabilities, the
27 * client is allowed to change inode attributes (e.g., file size,
28 * mtime), note its dirty state in the ceph_cap, and asynchronously
29 * flush that metadata change to the MDS.
30 *
31 * In the event of a conflicting operation (perhaps by another
32 * client), the MDS will revoke the conflicting client capabilities.
33 *
34 * In order for a client to cache an inode, it must hold a capability
35 * with at least one MDS server. When inodes are released, release
36 * notifications are batched and periodically sent en masse to the MDS
37 * cluster to release server state.
38 */
39
40
41/*
42 * Generate readable cap strings for debugging output.
43 */
44#define MAX_CAP_STR 20
45static char cap_str[MAX_CAP_STR][40];
46static DEFINE_SPINLOCK(cap_str_lock);
47static int last_cap_str;
48
49static char *gcap_string(char *s, int c)
50{
51 if (c & CEPH_CAP_GSHARED)
52 *s++ = 's';
53 if (c & CEPH_CAP_GEXCL)
54 *s++ = 'x';
55 if (c & CEPH_CAP_GCACHE)
56 *s++ = 'c';
57 if (c & CEPH_CAP_GRD)
58 *s++ = 'r';
59 if (c & CEPH_CAP_GWR)
60 *s++ = 'w';
61 if (c & CEPH_CAP_GBUFFER)
62 *s++ = 'b';
63 if (c & CEPH_CAP_GLAZYIO)
64 *s++ = 'l';
65 return s;
66}
67
68const char *ceph_cap_string(int caps)
69{
70 int i;
71 char *s;
72 int c;
73
74 spin_lock(&cap_str_lock);
75 i = last_cap_str++;
76 if (last_cap_str == MAX_CAP_STR)
77 last_cap_str = 0;
78 spin_unlock(&cap_str_lock);
79
80 s = cap_str[i];
81
82 if (caps & CEPH_CAP_PIN)
83 *s++ = 'p';
84
85 c = (caps >> CEPH_CAP_SAUTH) & 3;
86 if (c) {
87 *s++ = 'A';
88 s = gcap_string(s, c);
89 }
90
91 c = (caps >> CEPH_CAP_SLINK) & 3;
92 if (c) {
93 *s++ = 'L';
94 s = gcap_string(s, c);
95 }
96
97 c = (caps >> CEPH_CAP_SXATTR) & 3;
98 if (c) {
99 *s++ = 'X';
100 s = gcap_string(s, c);
101 }
102
103 c = caps >> CEPH_CAP_SFILE;
104 if (c) {
105 *s++ = 'F';
106 s = gcap_string(s, c);
107 }
108
109 if (s == cap_str[i])
110 *s++ = '-';
111 *s = 0;
112 return cap_str[i];
113}
114
115/*
116 * Cap reservations
117 *
118 * Maintain a global pool of preallocated struct ceph_caps, referenced
119 * by struct ceph_caps_reservations. This ensures that we preallocate
120 * memory needed to successfully process an MDS response. (If an MDS
121 * sends us cap information and we fail to process it, we will have
122 * problems due to the client and MDS being out of sync.)
123 *
124 * Reservations are 'owned' by a ceph_cap_reservation context.
125 */
126static spinlock_t caps_list_lock;
127static struct list_head caps_list; /* unused (reserved or unreserved) */
128static int caps_total_count; /* total caps allocated */
129static int caps_use_count; /* in use */
130static int caps_reserve_count; /* unused, reserved */
131static int caps_avail_count; /* unused, unreserved */
132static int caps_min_count; /* keep at least this many (unreserved) */
133
134void __init ceph_caps_init(void)
135{
136 INIT_LIST_HEAD(&caps_list);
137 spin_lock_init(&caps_list_lock);
138}
139
140void ceph_caps_finalize(void)
141{
142 struct ceph_cap *cap;
143
144 spin_lock(&caps_list_lock);
145 while (!list_empty(&caps_list)) {
146 cap = list_first_entry(&caps_list, struct ceph_cap, caps_item);
147 list_del(&cap->caps_item);
148 kmem_cache_free(ceph_cap_cachep, cap);
149 }
150 caps_total_count = 0;
151 caps_avail_count = 0;
152 caps_use_count = 0;
153 caps_reserve_count = 0;
154 caps_min_count = 0;
155 spin_unlock(&caps_list_lock);
156}
157
158void ceph_adjust_min_caps(int delta)
159{
160 spin_lock(&caps_list_lock);
161 caps_min_count += delta;
162 BUG_ON(caps_min_count < 0);
163 spin_unlock(&caps_list_lock);
164}
165
166int ceph_reserve_caps(struct ceph_cap_reservation *ctx, int need)
167{
168 int i;
169 struct ceph_cap *cap;
170 int have;
171 int alloc = 0;
172 LIST_HEAD(newcaps);
173 int ret = 0;
174
175 dout("reserve caps ctx=%p need=%d\n", ctx, need);
176
177 /* first reserve any caps that are already allocated */
178 spin_lock(&caps_list_lock);
179 if (caps_avail_count >= need)
180 have = need;
181 else
182 have = caps_avail_count;
183 caps_avail_count -= have;
184 caps_reserve_count += have;
185 BUG_ON(caps_total_count != caps_use_count + caps_reserve_count +
186 caps_avail_count);
187 spin_unlock(&caps_list_lock);
188
189 for (i = have; i < need; i++) {
190 cap = kmem_cache_alloc(ceph_cap_cachep, GFP_NOFS);
191 if (!cap) {
192 ret = -ENOMEM;
193 goto out_alloc_count;
194 }
195 list_add(&cap->caps_item, &newcaps);
196 alloc++;
197 }
198 BUG_ON(have + alloc != need);
199
200 spin_lock(&caps_list_lock);
201 caps_total_count += alloc;
202 caps_reserve_count += alloc;
203 list_splice(&newcaps, &caps_list);
204
205 BUG_ON(caps_total_count != caps_use_count + caps_reserve_count +
206 caps_avail_count);
207 spin_unlock(&caps_list_lock);
208
209 ctx->count = need;
210 dout("reserve caps ctx=%p %d = %d used + %d resv + %d avail\n",
211 ctx, caps_total_count, caps_use_count, caps_reserve_count,
212 caps_avail_count);
213 return 0;
214
215out_alloc_count:
216 /* we didn't manage to reserve as much as we needed */
217 pr_warning("reserve caps ctx=%p ENOMEM need=%d got=%d\n",
218 ctx, need, have);
219 return ret;
220}
221
222int ceph_unreserve_caps(struct ceph_cap_reservation *ctx)
223{
224 dout("unreserve caps ctx=%p count=%d\n", ctx, ctx->count);
225 if (ctx->count) {
226 spin_lock(&caps_list_lock);
227 BUG_ON(caps_reserve_count < ctx->count);
228 caps_reserve_count -= ctx->count;
229 caps_avail_count += ctx->count;
230 ctx->count = 0;
231 dout("unreserve caps %d = %d used + %d resv + %d avail\n",
232 caps_total_count, caps_use_count, caps_reserve_count,
233 caps_avail_count);
234 BUG_ON(caps_total_count != caps_use_count + caps_reserve_count +
235 caps_avail_count);
236 spin_unlock(&caps_list_lock);
237 }
238 return 0;
239}
240
241static struct ceph_cap *get_cap(struct ceph_cap_reservation *ctx)
242{
243 struct ceph_cap *cap = NULL;
244
245 /* temporary, until we do something about cap import/export */
246 if (!ctx)
247 return kmem_cache_alloc(ceph_cap_cachep, GFP_NOFS);
248
249 spin_lock(&caps_list_lock);
250 dout("get_cap ctx=%p (%d) %d = %d used + %d resv + %d avail\n",
251 ctx, ctx->count, caps_total_count, caps_use_count,
252 caps_reserve_count, caps_avail_count);
253 BUG_ON(!ctx->count);
254 BUG_ON(ctx->count > caps_reserve_count);
255 BUG_ON(list_empty(&caps_list));
256
257 ctx->count--;
258 caps_reserve_count--;
259 caps_use_count++;
260
261 cap = list_first_entry(&caps_list, struct ceph_cap, caps_item);
262 list_del(&cap->caps_item);
263
264 BUG_ON(caps_total_count != caps_use_count + caps_reserve_count +
265 caps_avail_count);
266 spin_unlock(&caps_list_lock);
267 return cap;
268}
269
270void ceph_put_cap(struct ceph_cap *cap)
271{
272 spin_lock(&caps_list_lock);
273 dout("put_cap %p %d = %d used + %d resv + %d avail\n",
274 cap, caps_total_count, caps_use_count,
275 caps_reserve_count, caps_avail_count);
276 caps_use_count--;
277 /*
278 * Keep some preallocated caps around (ceph_min_count), to
279 * avoid lots of free/alloc churn.
280 */
281 if (caps_avail_count >= caps_reserve_count + caps_min_count) {
282 caps_total_count--;
283 kmem_cache_free(ceph_cap_cachep, cap);
284 } else {
285 caps_avail_count++;
286 list_add(&cap->caps_item, &caps_list);
287 }
288
289 BUG_ON(caps_total_count != caps_use_count + caps_reserve_count +
290 caps_avail_count);
291 spin_unlock(&caps_list_lock);
292}
293
294void ceph_reservation_status(struct ceph_client *client,
295 int *total, int *avail, int *used, int *reserved,
296 int *min)
297{
298 if (total)
299 *total = caps_total_count;
300 if (avail)
301 *avail = caps_avail_count;
302 if (used)
303 *used = caps_use_count;
304 if (reserved)
305 *reserved = caps_reserve_count;
306 if (min)
307 *min = caps_min_count;
308}
309
310/*
311 * Find ceph_cap for given mds, if any.
312 *
313 * Called with i_lock held.
314 */
315static struct ceph_cap *__get_cap_for_mds(struct ceph_inode_info *ci, int mds)
316{
317 struct ceph_cap *cap;
318 struct rb_node *n = ci->i_caps.rb_node;
319
320 while (n) {
321 cap = rb_entry(n, struct ceph_cap, ci_node);
322 if (mds < cap->mds)
323 n = n->rb_left;
324 else if (mds > cap->mds)
325 n = n->rb_right;
326 else
327 return cap;
328 }
329 return NULL;
330}
331
332/*
333 * Return id of any MDS with a cap, preferably FILE_WR|WRBUFFER|EXCL, else
334 * -1.
335 */
336static int __ceph_get_cap_mds(struct ceph_inode_info *ci, u32 *mseq)
337{
338 struct ceph_cap *cap;
339 int mds = -1;
340 struct rb_node *p;
341
342 /* prefer mds with WR|WRBUFFER|EXCL caps */
343 for (p = rb_first(&ci->i_caps); p; p = rb_next(p)) {
344 cap = rb_entry(p, struct ceph_cap, ci_node);
345 mds = cap->mds;
346 if (mseq)
347 *mseq = cap->mseq;
348 if (cap->issued & (CEPH_CAP_FILE_WR |
349 CEPH_CAP_FILE_BUFFER |
350 CEPH_CAP_FILE_EXCL))
351 break;
352 }
353 return mds;
354}
355
356int ceph_get_cap_mds(struct inode *inode)
357{
358 int mds;
359 spin_lock(&inode->i_lock);
360 mds = __ceph_get_cap_mds(ceph_inode(inode), NULL);
361 spin_unlock(&inode->i_lock);
362 return mds;
363}
364
365/*
366 * Called under i_lock.
367 */
368static void __insert_cap_node(struct ceph_inode_info *ci,
369 struct ceph_cap *new)
370{
371 struct rb_node **p = &ci->i_caps.rb_node;
372 struct rb_node *parent = NULL;
373 struct ceph_cap *cap = NULL;
374
375 while (*p) {
376 parent = *p;
377 cap = rb_entry(parent, struct ceph_cap, ci_node);
378 if (new->mds < cap->mds)
379 p = &(*p)->rb_left;
380 else if (new->mds > cap->mds)
381 p = &(*p)->rb_right;
382 else
383 BUG();
384 }
385
386 rb_link_node(&new->ci_node, parent, p);
387 rb_insert_color(&new->ci_node, &ci->i_caps);
388}
389
390/*
391 * (re)set cap hold timeouts, which control the delayed release
392 * of unused caps back to the MDS. Should be called on cap use.
393 */
394static void __cap_set_timeouts(struct ceph_mds_client *mdsc,
395 struct ceph_inode_info *ci)
396{
397 struct ceph_mount_args *ma = mdsc->client->mount_args;
398
399 ci->i_hold_caps_min = round_jiffies(jiffies +
400 ma->caps_wanted_delay_min * HZ);
401 ci->i_hold_caps_max = round_jiffies(jiffies +
402 ma->caps_wanted_delay_max * HZ);
403 dout("__cap_set_timeouts %p min %lu max %lu\n", &ci->vfs_inode,
404 ci->i_hold_caps_min - jiffies, ci->i_hold_caps_max - jiffies);
405}
406
407/*
408 * (Re)queue cap at the end of the delayed cap release list.
409 *
410 * If I_FLUSH is set, leave the inode at the front of the list.
411 *
412 * Caller holds i_lock
413 * -> we take mdsc->cap_delay_lock
414 */
415static void __cap_delay_requeue(struct ceph_mds_client *mdsc,
416 struct ceph_inode_info *ci)
417{
418 __cap_set_timeouts(mdsc, ci);
419 dout("__cap_delay_requeue %p flags %d at %lu\n", &ci->vfs_inode,
420 ci->i_ceph_flags, ci->i_hold_caps_max);
421 if (!mdsc->stopping) {
422 spin_lock(&mdsc->cap_delay_lock);
423 if (!list_empty(&ci->i_cap_delay_list)) {
424 if (ci->i_ceph_flags & CEPH_I_FLUSH)
425 goto no_change;
426 list_del_init(&ci->i_cap_delay_list);
427 }
428 list_add_tail(&ci->i_cap_delay_list, &mdsc->cap_delay_list);
429no_change:
430 spin_unlock(&mdsc->cap_delay_lock);
431 }
432}
433
434/*
435 * Queue an inode for immediate writeback. Mark inode with I_FLUSH,
436 * indicating we should send a cap message to flush dirty metadata
437 * asap, and move to the front of the delayed cap list.
438 */
439static void __cap_delay_requeue_front(struct ceph_mds_client *mdsc,
440 struct ceph_inode_info *ci)
441{
442 dout("__cap_delay_requeue_front %p\n", &ci->vfs_inode);
443 spin_lock(&mdsc->cap_delay_lock);
444 ci->i_ceph_flags |= CEPH_I_FLUSH;
445 if (!list_empty(&ci->i_cap_delay_list))
446 list_del_init(&ci->i_cap_delay_list);
447 list_add(&ci->i_cap_delay_list, &mdsc->cap_delay_list);
448 spin_unlock(&mdsc->cap_delay_lock);
449}
450
451/*
452 * Cancel delayed work on cap.
453 *
454 * Caller must hold i_lock.
455 */
456static void __cap_delay_cancel(struct ceph_mds_client *mdsc,
457 struct ceph_inode_info *ci)
458{
459 dout("__cap_delay_cancel %p\n", &ci->vfs_inode);
460 if (list_empty(&ci->i_cap_delay_list))
461 return;
462 spin_lock(&mdsc->cap_delay_lock);
463 list_del_init(&ci->i_cap_delay_list);
464 spin_unlock(&mdsc->cap_delay_lock);
465}
466
467/*
468 * Common issue checks for add_cap, handle_cap_grant.
469 */
470static void __check_cap_issue(struct ceph_inode_info *ci, struct ceph_cap *cap,
471 unsigned issued)
472{
473 unsigned had = __ceph_caps_issued(ci, NULL);
474
475 /*
476 * Each time we receive FILE_CACHE anew, we increment
477 * i_rdcache_gen.
478 */
479 if ((issued & CEPH_CAP_FILE_CACHE) &&
480 (had & CEPH_CAP_FILE_CACHE) == 0)
481 ci->i_rdcache_gen++;
482
483 /*
484 * if we are newly issued FILE_SHARED, clear I_COMPLETE; we
485 * don't know what happened to this directory while we didn't
486 * have the cap.
487 */
488 if ((issued & CEPH_CAP_FILE_SHARED) &&
489 (had & CEPH_CAP_FILE_SHARED) == 0) {
490 ci->i_shared_gen++;
491 if (S_ISDIR(ci->vfs_inode.i_mode)) {
492 dout(" marking %p NOT complete\n", &ci->vfs_inode);
493 ci->i_ceph_flags &= ~CEPH_I_COMPLETE;
494 }
495 }
496}
497
498/*
499 * Add a capability under the given MDS session.
500 *
501 * Caller should hold session snap_rwsem (read) and s_mutex.
502 *
503 * @fmode is the open file mode, if we are opening a file, otherwise
504 * it is < 0. (This is so we can atomically add the cap and add an
505 * open file reference to it.)
506 */
507int ceph_add_cap(struct inode *inode,
508 struct ceph_mds_session *session, u64 cap_id,
509 int fmode, unsigned issued, unsigned wanted,
510 unsigned seq, unsigned mseq, u64 realmino, int flags,
511 struct ceph_cap_reservation *caps_reservation)
512{
513 struct ceph_mds_client *mdsc = &ceph_inode_to_client(inode)->mdsc;
514 struct ceph_inode_info *ci = ceph_inode(inode);
515 struct ceph_cap *new_cap = NULL;
516 struct ceph_cap *cap;
517 int mds = session->s_mds;
518 int actual_wanted;
519
520 dout("add_cap %p mds%d cap %llx %s seq %d\n", inode,
521 session->s_mds, cap_id, ceph_cap_string(issued), seq);
522
523 /*
524 * If we are opening the file, include file mode wanted bits
525 * in wanted.
526 */
527 if (fmode >= 0)
528 wanted |= ceph_caps_for_mode(fmode);
529
530retry:
531 spin_lock(&inode->i_lock);
532 cap = __get_cap_for_mds(ci, mds);
533 if (!cap) {
534 if (new_cap) {
535 cap = new_cap;
536 new_cap = NULL;
537 } else {
538 spin_unlock(&inode->i_lock);
539 new_cap = get_cap(caps_reservation);
540 if (new_cap == NULL)
541 return -ENOMEM;
542 goto retry;
543 }
544
545 cap->issued = 0;
546 cap->implemented = 0;
547 cap->mds = mds;
548 cap->mds_wanted = 0;
549
550 cap->ci = ci;
551 __insert_cap_node(ci, cap);
552
553 /* clear out old exporting info? (i.e. on cap import) */
554 if (ci->i_cap_exporting_mds == mds) {
555 ci->i_cap_exporting_issued = 0;
556 ci->i_cap_exporting_mseq = 0;
557 ci->i_cap_exporting_mds = -1;
558 }
559
560 /* add to session cap list */
561 cap->session = session;
562 spin_lock(&session->s_cap_lock);
563 list_add_tail(&cap->session_caps, &session->s_caps);
564 session->s_nr_caps++;
565 spin_unlock(&session->s_cap_lock);
566 }
567
568 if (!ci->i_snap_realm) {
569 /*
570 * add this inode to the appropriate snap realm
571 */
572 struct ceph_snap_realm *realm = ceph_lookup_snap_realm(mdsc,
573 realmino);
574 if (realm) {
575 ceph_get_snap_realm(mdsc, realm);
576 spin_lock(&realm->inodes_with_caps_lock);
577 ci->i_snap_realm = realm;
578 list_add(&ci->i_snap_realm_item,
579 &realm->inodes_with_caps);
580 spin_unlock(&realm->inodes_with_caps_lock);
581 } else {
582 pr_err("ceph_add_cap: couldn't find snap realm %llx\n",
583 realmino);
584 }
585 }
586
587 __check_cap_issue(ci, cap, issued);
588
589 /*
590 * If we are issued caps we don't want, or the mds' wanted
591 * value appears to be off, queue a check so we'll release
592 * later and/or update the mds wanted value.
593 */
594 actual_wanted = __ceph_caps_wanted(ci);
595 if ((wanted & ~actual_wanted) ||
596 (issued & ~actual_wanted & CEPH_CAP_ANY_WR)) {
597 dout(" issued %s, mds wanted %s, actual %s, queueing\n",
598 ceph_cap_string(issued), ceph_cap_string(wanted),
599 ceph_cap_string(actual_wanted));
600 __cap_delay_requeue(mdsc, ci);
601 }
602
603 if (flags & CEPH_CAP_FLAG_AUTH)
604 ci->i_auth_cap = cap;
605 else if (ci->i_auth_cap == cap)
606 ci->i_auth_cap = NULL;
607
608 dout("add_cap inode %p (%llx.%llx) cap %p %s now %s seq %d mds%d\n",
609 inode, ceph_vinop(inode), cap, ceph_cap_string(issued),
610 ceph_cap_string(issued|cap->issued), seq, mds);
611 cap->cap_id = cap_id;
612 cap->issued = issued;
613 cap->implemented |= issued;
614 cap->mds_wanted |= wanted;
615 cap->seq = seq;
616 cap->issue_seq = seq;
617 cap->mseq = mseq;
618 cap->cap_gen = session->s_cap_gen;
619
620 if (fmode >= 0)
621 __ceph_get_fmode(ci, fmode);
622 spin_unlock(&inode->i_lock);
623 wake_up(&ci->i_cap_wq);
624 return 0;
625}
626
627/*
628 * Return true if cap has not timed out and belongs to the current
629 * generation of the MDS session (i.e. has not gone 'stale' due to
630 * us losing touch with the mds).
631 */
632static int __cap_is_valid(struct ceph_cap *cap)
633{
634 unsigned long ttl;
635 u32 gen;
636
637 spin_lock(&cap->session->s_cap_lock);
638 gen = cap->session->s_cap_gen;
639 ttl = cap->session->s_cap_ttl;
640 spin_unlock(&cap->session->s_cap_lock);
641
642 if (cap->cap_gen < gen || time_after_eq(jiffies, ttl)) {
643 dout("__cap_is_valid %p cap %p issued %s "
644 "but STALE (gen %u vs %u)\n", &cap->ci->vfs_inode,
645 cap, ceph_cap_string(cap->issued), cap->cap_gen, gen);
646 return 0;
647 }
648
649 return 1;
650}
651
652/*
653 * Return set of valid cap bits issued to us. Note that caps time
654 * out, and may be invalidated in bulk if the client session times out
655 * and session->s_cap_gen is bumped.
656 */
657int __ceph_caps_issued(struct ceph_inode_info *ci, int *implemented)
658{
659 int have = ci->i_snap_caps | ci->i_cap_exporting_issued;
660 struct ceph_cap *cap;
661 struct rb_node *p;
662
663 if (implemented)
664 *implemented = 0;
665 for (p = rb_first(&ci->i_caps); p; p = rb_next(p)) {
666 cap = rb_entry(p, struct ceph_cap, ci_node);
667 if (!__cap_is_valid(cap))
668 continue;
669 dout("__ceph_caps_issued %p cap %p issued %s\n",
670 &ci->vfs_inode, cap, ceph_cap_string(cap->issued));
671 have |= cap->issued;
672 if (implemented)
673 *implemented |= cap->implemented;
674 }
675 return have;
676}
677
678/*
679 * Get cap bits issued by caps other than @ocap
680 */
681int __ceph_caps_issued_other(struct ceph_inode_info *ci, struct ceph_cap *ocap)
682{
683 int have = ci->i_snap_caps;
684 struct ceph_cap *cap;
685 struct rb_node *p;
686
687 for (p = rb_first(&ci->i_caps); p; p = rb_next(p)) {
688 cap = rb_entry(p, struct ceph_cap, ci_node);
689 if (cap == ocap)
690 continue;
691 if (!__cap_is_valid(cap))
692 continue;
693 have |= cap->issued;
694 }
695 return have;
696}
697
698/*
699 * Move a cap to the end of the LRU (oldest caps at list head, newest
700 * at list tail).
701 */
702static void __touch_cap(struct ceph_cap *cap)
703{
704 struct ceph_mds_session *s = cap->session;
705
706 spin_lock(&s->s_cap_lock);
707 if (s->s_cap_iterator == NULL) {
708 dout("__touch_cap %p cap %p mds%d\n", &cap->ci->vfs_inode, cap,
709 s->s_mds);
710 list_move_tail(&cap->session_caps, &s->s_caps);
711 } else {
712 dout("__touch_cap %p cap %p mds%d NOP, iterating over caps\n",
713 &cap->ci->vfs_inode, cap, s->s_mds);
714 }
715 spin_unlock(&s->s_cap_lock);
716}
717
718/*
719 * Check if we hold the given mask. If so, move the cap(s) to the
720 * front of their respective LRUs. (This is the preferred way for
721 * callers to check for caps they want.)
722 */
723int __ceph_caps_issued_mask(struct ceph_inode_info *ci, int mask, int touch)
724{
725 struct ceph_cap *cap;
726 struct rb_node *p;
727 int have = ci->i_snap_caps;
728
729 if ((have & mask) == mask) {
730 dout("__ceph_caps_issued_mask %p snap issued %s"
731 " (mask %s)\n", &ci->vfs_inode,
732 ceph_cap_string(have),
733 ceph_cap_string(mask));
734 return 1;
735 }
736
737 for (p = rb_first(&ci->i_caps); p; p = rb_next(p)) {
738 cap = rb_entry(p, struct ceph_cap, ci_node);
739 if (!__cap_is_valid(cap))
740 continue;
741 if ((cap->issued & mask) == mask) {
742 dout("__ceph_caps_issued_mask %p cap %p issued %s"
743 " (mask %s)\n", &ci->vfs_inode, cap,
744 ceph_cap_string(cap->issued),
745 ceph_cap_string(mask));
746 if (touch)
747 __touch_cap(cap);
748 return 1;
749 }
750
751 /* does a combination of caps satisfy mask? */
752 have |= cap->issued;
753 if ((have & mask) == mask) {
754 dout("__ceph_caps_issued_mask %p combo issued %s"
755 " (mask %s)\n", &ci->vfs_inode,
756 ceph_cap_string(cap->issued),
757 ceph_cap_string(mask));
758 if (touch) {
759 struct rb_node *q;
760
761 /* touch this + preceeding caps */
762 __touch_cap(cap);
763 for (q = rb_first(&ci->i_caps); q != p;
764 q = rb_next(q)) {
765 cap = rb_entry(q, struct ceph_cap,
766 ci_node);
767 if (!__cap_is_valid(cap))
768 continue;
769 __touch_cap(cap);
770 }
771 }
772 return 1;
773 }
774 }
775
776 return 0;
777}
778
779/*
780 * Return true if mask caps are currently being revoked by an MDS.
781 */
782int ceph_caps_revoking(struct ceph_inode_info *ci, int mask)
783{
784 struct inode *inode = &ci->vfs_inode;
785 struct ceph_cap *cap;
786 struct rb_node *p;
787 int ret = 0;
788
789 spin_lock(&inode->i_lock);
790 for (p = rb_first(&ci->i_caps); p; p = rb_next(p)) {
791 cap = rb_entry(p, struct ceph_cap, ci_node);
792 if (__cap_is_valid(cap) &&
793 (cap->implemented & ~cap->issued & mask)) {
794 ret = 1;
795 break;
796 }
797 }
798 spin_unlock(&inode->i_lock);
799 dout("ceph_caps_revoking %p %s = %d\n", inode,
800 ceph_cap_string(mask), ret);
801 return ret;
802}
803
804int __ceph_caps_used(struct ceph_inode_info *ci)
805{
806 int used = 0;
807 if (ci->i_pin_ref)
808 used |= CEPH_CAP_PIN;
809 if (ci->i_rd_ref)
810 used |= CEPH_CAP_FILE_RD;
811 if (ci->i_rdcache_ref || ci->i_rdcache_gen)
812 used |= CEPH_CAP_FILE_CACHE;
813 if (ci->i_wr_ref)
814 used |= CEPH_CAP_FILE_WR;
815 if (ci->i_wrbuffer_ref)
816 used |= CEPH_CAP_FILE_BUFFER;
817 return used;
818}
819
820/*
821 * wanted, by virtue of open file modes
822 */
823int __ceph_caps_file_wanted(struct ceph_inode_info *ci)
824{
825 int want = 0;
826 int mode;
827 for (mode = 0; mode < 4; mode++)
828 if (ci->i_nr_by_mode[mode])
829 want |= ceph_caps_for_mode(mode);
830 return want;
831}
832
833/*
834 * Return caps we have registered with the MDS(s) as 'wanted'.
835 */
836int __ceph_caps_mds_wanted(struct ceph_inode_info *ci)
837{
838 struct ceph_cap *cap;
839 struct rb_node *p;
840 int mds_wanted = 0;
841
842 for (p = rb_first(&ci->i_caps); p; p = rb_next(p)) {
843 cap = rb_entry(p, struct ceph_cap, ci_node);
844 if (!__cap_is_valid(cap))
845 continue;
846 mds_wanted |= cap->mds_wanted;
847 }
848 return mds_wanted;
849}
850
851/*
852 * called under i_lock
853 */
854static int __ceph_is_any_caps(struct ceph_inode_info *ci)
855{
856 return !RB_EMPTY_ROOT(&ci->i_caps) || ci->i_cap_exporting_mds >= 0;
857}
858
859/*
860 * caller should hold i_lock.
861 * caller will not hold session s_mutex if called from destroy_inode.
862 */
863void __ceph_remove_cap(struct ceph_cap *cap)
864{
865 struct ceph_mds_session *session = cap->session;
866 struct ceph_inode_info *ci = cap->ci;
867 struct ceph_mds_client *mdsc = &ceph_client(ci->vfs_inode.i_sb)->mdsc;
868
869 dout("__ceph_remove_cap %p from %p\n", cap, &ci->vfs_inode);
870
871 /* remove from inode list */
872 rb_erase(&cap->ci_node, &ci->i_caps);
873 cap->ci = NULL;
874 if (ci->i_auth_cap == cap)
875 ci->i_auth_cap = NULL;
876
877 /* remove from session list */
878 spin_lock(&session->s_cap_lock);
879 if (session->s_cap_iterator == cap) {
880 /* not yet, we are iterating over this very cap */
881 dout("__ceph_remove_cap delaying %p removal from session %p\n",
882 cap, cap->session);
883 } else {
884 list_del_init(&cap->session_caps);
885 session->s_nr_caps--;
886 cap->session = NULL;
887 }
888 spin_unlock(&session->s_cap_lock);
889
890 if (cap->session == NULL)
891 ceph_put_cap(cap);
892
893 if (!__ceph_is_any_caps(ci) && ci->i_snap_realm) {
894 struct ceph_snap_realm *realm = ci->i_snap_realm;
895 spin_lock(&realm->inodes_with_caps_lock);
896 list_del_init(&ci->i_snap_realm_item);
897 ci->i_snap_realm_counter++;
898 ci->i_snap_realm = NULL;
899 spin_unlock(&realm->inodes_with_caps_lock);
900 ceph_put_snap_realm(mdsc, realm);
901 }
902 if (!__ceph_is_any_real_caps(ci))
903 __cap_delay_cancel(mdsc, ci);
904}
905
906/*
907 * Build and send a cap message to the given MDS.
908 *
909 * Caller should be holding s_mutex.
910 */
911static int send_cap_msg(struct ceph_mds_session *session,
912 u64 ino, u64 cid, int op,
913 int caps, int wanted, int dirty,
914 u32 seq, u64 flush_tid, u32 issue_seq, u32 mseq,
915 u64 size, u64 max_size,
916 struct timespec *mtime, struct timespec *atime,
917 u64 time_warp_seq,
918 uid_t uid, gid_t gid, mode_t mode,
919 u64 xattr_version,
920 struct ceph_buffer *xattrs_buf,
921 u64 follows)
922{
923 struct ceph_mds_caps *fc;
924 struct ceph_msg *msg;
925
926 dout("send_cap_msg %s %llx %llx caps %s wanted %s dirty %s"
927 " seq %u/%u mseq %u follows %lld size %llu/%llu"
928 " xattr_ver %llu xattr_len %d\n", ceph_cap_op_name(op),
929 cid, ino, ceph_cap_string(caps), ceph_cap_string(wanted),
930 ceph_cap_string(dirty),
931 seq, issue_seq, mseq, follows, size, max_size,
932 xattr_version, xattrs_buf ? (int)xattrs_buf->vec.iov_len : 0);
933
934 msg = ceph_msg_new(CEPH_MSG_CLIENT_CAPS, sizeof(*fc), 0, 0, NULL);
935 if (IS_ERR(msg))
936 return PTR_ERR(msg);
937
938 msg->hdr.tid = cpu_to_le64(flush_tid);
939
940 fc = msg->front.iov_base;
941 memset(fc, 0, sizeof(*fc));
942
943 fc->cap_id = cpu_to_le64(cid);
944 fc->op = cpu_to_le32(op);
945 fc->seq = cpu_to_le32(seq);
946 fc->issue_seq = cpu_to_le32(issue_seq);
947 fc->migrate_seq = cpu_to_le32(mseq);
948 fc->caps = cpu_to_le32(caps);
949 fc->wanted = cpu_to_le32(wanted);
950 fc->dirty = cpu_to_le32(dirty);
951 fc->ino = cpu_to_le64(ino);
952 fc->snap_follows = cpu_to_le64(follows);
953
954 fc->size = cpu_to_le64(size);
955 fc->max_size = cpu_to_le64(max_size);
956 if (mtime)
957 ceph_encode_timespec(&fc->mtime, mtime);
958 if (atime)
959 ceph_encode_timespec(&fc->atime, atime);
960 fc->time_warp_seq = cpu_to_le32(time_warp_seq);
961
962 fc->uid = cpu_to_le32(uid);
963 fc->gid = cpu_to_le32(gid);
964 fc->mode = cpu_to_le32(mode);
965
966 fc->xattr_version = cpu_to_le64(xattr_version);
967 if (xattrs_buf) {
968 msg->middle = ceph_buffer_get(xattrs_buf);
969 fc->xattr_len = cpu_to_le32(xattrs_buf->vec.iov_len);
970 msg->hdr.middle_len = cpu_to_le32(xattrs_buf->vec.iov_len);
971 }
972
973 ceph_con_send(&session->s_con, msg);
974 return 0;
975}
976
977/*
978 * Queue cap releases when an inode is dropped from our cache. Since
979 * inode is about to be destroyed, there is no need for i_lock.
980 */
981void ceph_queue_caps_release(struct inode *inode)
982{
983 struct ceph_inode_info *ci = ceph_inode(inode);
984 struct rb_node *p;
985
986 p = rb_first(&ci->i_caps);
987 while (p) {
988 struct ceph_cap *cap = rb_entry(p, struct ceph_cap, ci_node);
989 struct ceph_mds_session *session = cap->session;
990 struct ceph_msg *msg;
991 struct ceph_mds_cap_release *head;
992 struct ceph_mds_cap_item *item;
993
994 spin_lock(&session->s_cap_lock);
995 BUG_ON(!session->s_num_cap_releases);
996 msg = list_first_entry(&session->s_cap_releases,
997 struct ceph_msg, list_head);
998
999 dout(" adding %p release to mds%d msg %p (%d left)\n",
1000 inode, session->s_mds, msg, session->s_num_cap_releases);
1001
1002 BUG_ON(msg->front.iov_len + sizeof(*item) > PAGE_CACHE_SIZE);
1003 head = msg->front.iov_base;
1004 head->num = cpu_to_le32(le32_to_cpu(head->num) + 1);
1005 item = msg->front.iov_base + msg->front.iov_len;
1006 item->ino = cpu_to_le64(ceph_ino(inode));
1007 item->cap_id = cpu_to_le64(cap->cap_id);
1008 item->migrate_seq = cpu_to_le32(cap->mseq);
1009 item->seq = cpu_to_le32(cap->issue_seq);
1010
1011 session->s_num_cap_releases--;
1012
1013 msg->front.iov_len += sizeof(*item);
1014 if (le32_to_cpu(head->num) == CEPH_CAPS_PER_RELEASE) {
1015 dout(" release msg %p full\n", msg);
1016 list_move_tail(&msg->list_head,
1017 &session->s_cap_releases_done);
1018 } else {
1019 dout(" release msg %p at %d/%d (%d)\n", msg,
1020 (int)le32_to_cpu(head->num),
1021 (int)CEPH_CAPS_PER_RELEASE,
1022 (int)msg->front.iov_len);
1023 }
1024 spin_unlock(&session->s_cap_lock);
1025 p = rb_next(p);
1026 __ceph_remove_cap(cap);
1027 }
1028}
1029
1030/*
1031 * Send a cap msg on the given inode. Update our caps state, then
1032 * drop i_lock and send the message.
1033 *
1034 * Make note of max_size reported/requested from mds, revoked caps
1035 * that have now been implemented.
1036 *
1037 * Make half-hearted attempt ot to invalidate page cache if we are
1038 * dropping RDCACHE. Note that this will leave behind locked pages
1039 * that we'll then need to deal with elsewhere.
1040 *
1041 * Return non-zero if delayed release, or we experienced an error
1042 * such that the caller should requeue + retry later.
1043 *
1044 * called with i_lock, then drops it.
1045 * caller should hold snap_rwsem (read), s_mutex.
1046 */
1047static int __send_cap(struct ceph_mds_client *mdsc, struct ceph_cap *cap,
1048 int op, int used, int want, int retain, int flushing,
1049 unsigned *pflush_tid)
1050 __releases(cap->ci->vfs_inode->i_lock)
1051{
1052 struct ceph_inode_info *ci = cap->ci;
1053 struct inode *inode = &ci->vfs_inode;
1054 u64 cap_id = cap->cap_id;
1055 int held, revoking, dropping, keep;
1056 u64 seq, issue_seq, mseq, time_warp_seq, follows;
1057 u64 size, max_size;
1058 struct timespec mtime, atime;
1059 int wake = 0;
1060 mode_t mode;
1061 uid_t uid;
1062 gid_t gid;
1063 struct ceph_mds_session *session;
1064 u64 xattr_version = 0;
1065 int delayed = 0;
1066 u64 flush_tid = 0;
1067 int i;
1068 int ret;
1069
1070 held = cap->issued | cap->implemented;
1071 revoking = cap->implemented & ~cap->issued;
1072 retain &= ~revoking;
1073 dropping = cap->issued & ~retain;
1074
1075 dout("__send_cap %p cap %p session %p %s -> %s (revoking %s)\n",
1076 inode, cap, cap->session,
1077 ceph_cap_string(held), ceph_cap_string(held & retain),
1078 ceph_cap_string(revoking));
1079 BUG_ON((retain & CEPH_CAP_PIN) == 0);
1080
1081 session = cap->session;
1082
1083 /* don't release wanted unless we've waited a bit. */
1084 if ((ci->i_ceph_flags & CEPH_I_NODELAY) == 0 &&
1085 time_before(jiffies, ci->i_hold_caps_min)) {
1086 dout(" delaying issued %s -> %s, wanted %s -> %s on send\n",
1087 ceph_cap_string(cap->issued),
1088 ceph_cap_string(cap->issued & retain),
1089 ceph_cap_string(cap->mds_wanted),
1090 ceph_cap_string(want));
1091 want |= cap->mds_wanted;
1092 retain |= cap->issued;
1093 delayed = 1;
1094 }
1095 ci->i_ceph_flags &= ~(CEPH_I_NODELAY | CEPH_I_FLUSH);
1096
1097 cap->issued &= retain; /* drop bits we don't want */
1098 if (cap->implemented & ~cap->issued) {
1099 /*
1100 * Wake up any waiters on wanted -> needed transition.
1101 * This is due to the weird transition from buffered
1102 * to sync IO... we need to flush dirty pages _before_
1103 * allowing sync writes to avoid reordering.
1104 */
1105 wake = 1;
1106 }
1107 cap->implemented &= cap->issued | used;
1108 cap->mds_wanted = want;
1109
1110 if (flushing) {
1111 /*
1112 * assign a tid for flush operations so we can avoid
1113 * flush1 -> dirty1 -> flush2 -> flushack1 -> mark
1114 * clean type races. track latest tid for every bit
1115 * so we can handle flush AxFw, flush Fw, and have the
1116 * first ack clean Ax.
1117 */
1118 flush_tid = ++ci->i_cap_flush_last_tid;
1119 if (pflush_tid)
1120 *pflush_tid = flush_tid;
1121 dout(" cap_flush_tid %d\n", (int)flush_tid);
1122 for (i = 0; i < CEPH_CAP_BITS; i++)
1123 if (flushing & (1 << i))
1124 ci->i_cap_flush_tid[i] = flush_tid;
1125 }
1126
1127 keep = cap->implemented;
1128 seq = cap->seq;
1129 issue_seq = cap->issue_seq;
1130 mseq = cap->mseq;
1131 size = inode->i_size;
1132 ci->i_reported_size = size;
1133 max_size = ci->i_wanted_max_size;
1134 ci->i_requested_max_size = max_size;
1135 mtime = inode->i_mtime;
1136 atime = inode->i_atime;
1137 time_warp_seq = ci->i_time_warp_seq;
1138 follows = ci->i_snap_realm->cached_context->seq;
1139 uid = inode->i_uid;
1140 gid = inode->i_gid;
1141 mode = inode->i_mode;
1142
1143 if (dropping & CEPH_CAP_XATTR_EXCL) {
1144 __ceph_build_xattrs_blob(ci);
1145 xattr_version = ci->i_xattrs.version + 1;
1146 }
1147
1148 spin_unlock(&inode->i_lock);
1149
1150 ret = send_cap_msg(session, ceph_vino(inode).ino, cap_id,
1151 op, keep, want, flushing, seq, flush_tid, issue_seq, mseq,
1152 size, max_size, &mtime, &atime, time_warp_seq,
1153 uid, gid, mode,
1154 xattr_version,
1155 (flushing & CEPH_CAP_XATTR_EXCL) ? ci->i_xattrs.blob : NULL,
1156 follows);
1157 if (ret < 0) {
1158 dout("error sending cap msg, must requeue %p\n", inode);
1159 delayed = 1;
1160 }
1161
1162 if (wake)
1163 wake_up(&ci->i_cap_wq);
1164
1165 return delayed;
1166}
1167
1168/*
1169 * When a snapshot is taken, clients accumulate dirty metadata on
1170 * inodes with capabilities in ceph_cap_snaps to describe the file
1171 * state at the time the snapshot was taken. This must be flushed
1172 * asynchronously back to the MDS once sync writes complete and dirty
1173 * data is written out.
1174 *
1175 * Called under i_lock. Takes s_mutex as needed.
1176 */
1177void __ceph_flush_snaps(struct ceph_inode_info *ci,
1178 struct ceph_mds_session **psession)
1179{
1180 struct inode *inode = &ci->vfs_inode;
1181 int mds;
1182 struct ceph_cap_snap *capsnap;
1183 u32 mseq;
1184 struct ceph_mds_client *mdsc = &ceph_inode_to_client(inode)->mdsc;
1185 struct ceph_mds_session *session = NULL; /* if session != NULL, we hold
1186 session->s_mutex */
1187 u64 next_follows = 0; /* keep track of how far we've gotten through the
1188 i_cap_snaps list, and skip these entries next time
1189 around to avoid an infinite loop */
1190
1191 if (psession)
1192 session = *psession;
1193
1194 dout("__flush_snaps %p\n", inode);
1195retry:
1196 list_for_each_entry(capsnap, &ci->i_cap_snaps, ci_item) {
1197 /* avoid an infiniute loop after retry */
1198 if (capsnap->follows < next_follows)
1199 continue;
1200 /*
1201 * we need to wait for sync writes to complete and for dirty
1202 * pages to be written out.
1203 */
1204 if (capsnap->dirty_pages || capsnap->writing)
1205 continue;
1206
1207 /* pick mds, take s_mutex */
1208 mds = __ceph_get_cap_mds(ci, &mseq);
1209 if (session && session->s_mds != mds) {
1210 dout("oops, wrong session %p mutex\n", session);
1211 mutex_unlock(&session->s_mutex);
1212 ceph_put_mds_session(session);
1213 session = NULL;
1214 }
1215 if (!session) {
1216 spin_unlock(&inode->i_lock);
1217 mutex_lock(&mdsc->mutex);
1218 session = __ceph_lookup_mds_session(mdsc, mds);
1219 mutex_unlock(&mdsc->mutex);
1220 if (session) {
1221 dout("inverting session/ino locks on %p\n",
1222 session);
1223 mutex_lock(&session->s_mutex);
1224 }
1225 /*
1226 * if session == NULL, we raced against a cap
1227 * deletion. retry, and we'll get a better
1228 * @mds value next time.
1229 */
1230 spin_lock(&inode->i_lock);
1231 goto retry;
1232 }
1233
1234 capsnap->flush_tid = ++ci->i_cap_flush_last_tid;
1235 atomic_inc(&capsnap->nref);
1236 if (!list_empty(&capsnap->flushing_item))
1237 list_del_init(&capsnap->flushing_item);
1238 list_add_tail(&capsnap->flushing_item,
1239 &session->s_cap_snaps_flushing);
1240 spin_unlock(&inode->i_lock);
1241
1242 dout("flush_snaps %p cap_snap %p follows %lld size %llu\n",
1243 inode, capsnap, next_follows, capsnap->size);
1244 send_cap_msg(session, ceph_vino(inode).ino, 0,
1245 CEPH_CAP_OP_FLUSHSNAP, capsnap->issued, 0,
1246 capsnap->dirty, 0, capsnap->flush_tid, 0, mseq,
1247 capsnap->size, 0,
1248 &capsnap->mtime, &capsnap->atime,
1249 capsnap->time_warp_seq,
1250 capsnap->uid, capsnap->gid, capsnap->mode,
1251 0, NULL,
1252 capsnap->follows);
1253
1254 next_follows = capsnap->follows + 1;
1255 ceph_put_cap_snap(capsnap);
1256
1257 spin_lock(&inode->i_lock);
1258 goto retry;
1259 }
1260
1261 /* we flushed them all; remove this inode from the queue */
1262 spin_lock(&mdsc->snap_flush_lock);
1263 list_del_init(&ci->i_snap_flush_item);
1264 spin_unlock(&mdsc->snap_flush_lock);
1265
1266 if (psession)
1267 *psession = session;
1268 else if (session) {
1269 mutex_unlock(&session->s_mutex);
1270 ceph_put_mds_session(session);
1271 }
1272}
1273
1274static void ceph_flush_snaps(struct ceph_inode_info *ci)
1275{
1276 struct inode *inode = &ci->vfs_inode;
1277
1278 spin_lock(&inode->i_lock);
1279 __ceph_flush_snaps(ci, NULL);
1280 spin_unlock(&inode->i_lock);
1281}
1282
1283/*
1284 * Mark caps dirty. If inode is newly dirty, add to the global dirty
1285 * list.
1286 */
1287void __ceph_mark_dirty_caps(struct ceph_inode_info *ci, int mask)
1288{
1289 struct ceph_mds_client *mdsc = &ceph_client(ci->vfs_inode.i_sb)->mdsc;
1290 struct inode *inode = &ci->vfs_inode;
1291 int was = ci->i_dirty_caps;
1292 int dirty = 0;
1293
1294 dout("__mark_dirty_caps %p %s dirty %s -> %s\n", &ci->vfs_inode,
1295 ceph_cap_string(mask), ceph_cap_string(was),
1296 ceph_cap_string(was | mask));
1297 ci->i_dirty_caps |= mask;
1298 if (was == 0) {
1299 dout(" inode %p now dirty\n", &ci->vfs_inode);
1300 BUG_ON(!list_empty(&ci->i_dirty_item));
1301 spin_lock(&mdsc->cap_dirty_lock);
1302 list_add(&ci->i_dirty_item, &mdsc->cap_dirty);
1303 spin_unlock(&mdsc->cap_dirty_lock);
1304 if (ci->i_flushing_caps == 0) {
1305 igrab(inode);
1306 dirty |= I_DIRTY_SYNC;
1307 }
1308 }
1309 BUG_ON(list_empty(&ci->i_dirty_item));
1310 if (((was | ci->i_flushing_caps) & CEPH_CAP_FILE_BUFFER) &&
1311 (mask & CEPH_CAP_FILE_BUFFER))
1312 dirty |= I_DIRTY_DATASYNC;
1313 if (dirty)
1314 __mark_inode_dirty(inode, dirty);
1315 __cap_delay_requeue(mdsc, ci);
1316}
1317
1318/*
1319 * Add dirty inode to the flushing list. Assigned a seq number so we
1320 * can wait for caps to flush without starving.
1321 *
1322 * Called under i_lock.
1323 */
1324static int __mark_caps_flushing(struct inode *inode,
1325 struct ceph_mds_session *session)
1326{
1327 struct ceph_mds_client *mdsc = &ceph_client(inode->i_sb)->mdsc;
1328 struct ceph_inode_info *ci = ceph_inode(inode);
1329 int flushing;
1330
1331 BUG_ON(ci->i_dirty_caps == 0);
1332 BUG_ON(list_empty(&ci->i_dirty_item));
1333
1334 flushing = ci->i_dirty_caps;
1335 dout("__mark_caps_flushing flushing %s, flushing_caps %s -> %s\n",
1336 ceph_cap_string(flushing),
1337 ceph_cap_string(ci->i_flushing_caps),
1338 ceph_cap_string(ci->i_flushing_caps | flushing));
1339 ci->i_flushing_caps |= flushing;
1340 ci->i_dirty_caps = 0;
1341 dout(" inode %p now !dirty\n", inode);
1342
1343 spin_lock(&mdsc->cap_dirty_lock);
1344 list_del_init(&ci->i_dirty_item);
1345
1346 ci->i_cap_flush_seq = ++mdsc->cap_flush_seq;
1347 if (list_empty(&ci->i_flushing_item)) {
1348 list_add_tail(&ci->i_flushing_item, &session->s_cap_flushing);
1349 mdsc->num_cap_flushing++;
1350 dout(" inode %p now flushing seq %lld\n", inode,
1351 ci->i_cap_flush_seq);
1352 } else {
1353 list_move_tail(&ci->i_flushing_item, &session->s_cap_flushing);
1354 dout(" inode %p now flushing (more) seq %lld\n", inode,
1355 ci->i_cap_flush_seq);
1356 }
1357 spin_unlock(&mdsc->cap_dirty_lock);
1358
1359 return flushing;
1360}
1361
1362/*
1363 * try to invalidate mapping pages without blocking.
1364 */
1365static int mapping_is_empty(struct address_space *mapping)
1366{
1367 struct page *page = find_get_page(mapping, 0);
1368
1369 if (!page)
1370 return 1;
1371
1372 put_page(page);
1373 return 0;
1374}
1375
1376static int try_nonblocking_invalidate(struct inode *inode)
1377{
1378 struct ceph_inode_info *ci = ceph_inode(inode);
1379 u32 invalidating_gen = ci->i_rdcache_gen;
1380
1381 spin_unlock(&inode->i_lock);
1382 invalidate_mapping_pages(&inode->i_data, 0, -1);
1383 spin_lock(&inode->i_lock);
1384
1385 if (mapping_is_empty(&inode->i_data) &&
1386 invalidating_gen == ci->i_rdcache_gen) {
1387 /* success. */
1388 dout("try_nonblocking_invalidate %p success\n", inode);
1389 ci->i_rdcache_gen = 0;
1390 ci->i_rdcache_revoking = 0;
1391 return 0;
1392 }
1393 dout("try_nonblocking_invalidate %p failed\n", inode);
1394 return -1;
1395}
1396
1397/*
1398 * Swiss army knife function to examine currently used and wanted
1399 * versus held caps. Release, flush, ack revoked caps to mds as
1400 * appropriate.
1401 *
1402 * CHECK_CAPS_NODELAY - caller is delayed work and we should not delay
1403 * cap release further.
1404 * CHECK_CAPS_AUTHONLY - we should only check the auth cap
1405 * CHECK_CAPS_FLUSH - we should flush any dirty caps immediately, without
1406 * further delay.
1407 */
1408void ceph_check_caps(struct ceph_inode_info *ci, int flags,
1409 struct ceph_mds_session *session)
1410{
1411 struct ceph_client *client = ceph_inode_to_client(&ci->vfs_inode);
1412 struct ceph_mds_client *mdsc = &client->mdsc;
1413 struct inode *inode = &ci->vfs_inode;
1414 struct ceph_cap *cap;
1415 int file_wanted, used;
1416 int took_snap_rwsem = 0; /* true if mdsc->snap_rwsem held */
1417 int drop_session_lock = session ? 0 : 1;
1418 int issued, implemented, want, retain, revoking, flushing = 0;
1419 int mds = -1; /* keep track of how far we've gone through i_caps list
1420 to avoid an infinite loop on retry */
1421 struct rb_node *p;
1422 int tried_invalidate = 0;
1423 int delayed = 0, sent = 0, force_requeue = 0, num;
1424 int queue_invalidate = 0;
1425 int is_delayed = flags & CHECK_CAPS_NODELAY;
1426
1427 /* if we are unmounting, flush any unused caps immediately. */
1428 if (mdsc->stopping)
1429 is_delayed = 1;
1430
1431 spin_lock(&inode->i_lock);
1432
1433 if (ci->i_ceph_flags & CEPH_I_FLUSH)
1434 flags |= CHECK_CAPS_FLUSH;
1435
1436 /* flush snaps first time around only */
1437 if (!list_empty(&ci->i_cap_snaps))
1438 __ceph_flush_snaps(ci, &session);
1439 goto retry_locked;
1440retry:
1441 spin_lock(&inode->i_lock);
1442retry_locked:
1443 file_wanted = __ceph_caps_file_wanted(ci);
1444 used = __ceph_caps_used(ci);
1445 want = file_wanted | used;
1446 issued = __ceph_caps_issued(ci, &implemented);
1447 revoking = implemented & ~issued;
1448
1449 retain = want | CEPH_CAP_PIN;
1450 if (!mdsc->stopping && inode->i_nlink > 0) {
1451 if (want) {
1452 retain |= CEPH_CAP_ANY; /* be greedy */
1453 } else {
1454 retain |= CEPH_CAP_ANY_SHARED;
1455 /*
1456 * keep RD only if we didn't have the file open RW,
1457 * because then the mds would revoke it anyway to
1458 * journal max_size=0.
1459 */
1460 if (ci->i_max_size == 0)
1461 retain |= CEPH_CAP_ANY_RD;
1462 }
1463 }
1464
1465 dout("check_caps %p file_want %s used %s dirty %s flushing %s"
1466 " issued %s revoking %s retain %s %s%s%s\n", inode,
1467 ceph_cap_string(file_wanted),
1468 ceph_cap_string(used), ceph_cap_string(ci->i_dirty_caps),
1469 ceph_cap_string(ci->i_flushing_caps),
1470 ceph_cap_string(issued), ceph_cap_string(revoking),
1471 ceph_cap_string(retain),
1472 (flags & CHECK_CAPS_AUTHONLY) ? " AUTHONLY" : "",
1473 (flags & CHECK_CAPS_NODELAY) ? " NODELAY" : "",
1474 (flags & CHECK_CAPS_FLUSH) ? " FLUSH" : "");
1475
1476 /*
1477 * If we no longer need to hold onto old our caps, and we may
1478 * have cached pages, but don't want them, then try to invalidate.
1479 * If we fail, it's because pages are locked.... try again later.
1480 */
1481 if ((!is_delayed || mdsc->stopping) &&
1482 ci->i_wrbuffer_ref == 0 && /* no dirty pages... */
1483 ci->i_rdcache_gen && /* may have cached pages */
1484 (file_wanted == 0 || /* no open files */
1485 (revoking & CEPH_CAP_FILE_CACHE)) && /* or revoking cache */
1486 !tried_invalidate) {
1487 dout("check_caps trying to invalidate on %p\n", inode);
1488 if (try_nonblocking_invalidate(inode) < 0) {
1489 if (revoking & CEPH_CAP_FILE_CACHE) {
1490 dout("check_caps queuing invalidate\n");
1491 queue_invalidate = 1;
1492 ci->i_rdcache_revoking = ci->i_rdcache_gen;
1493 } else {
1494 dout("check_caps failed to invalidate pages\n");
1495 /* we failed to invalidate pages. check these
1496 caps again later. */
1497 force_requeue = 1;
1498 __cap_set_timeouts(mdsc, ci);
1499 }
1500 }
1501 tried_invalidate = 1;
1502 goto retry_locked;
1503 }
1504
1505 num = 0;
1506 for (p = rb_first(&ci->i_caps); p; p = rb_next(p)) {
1507 cap = rb_entry(p, struct ceph_cap, ci_node);
1508 num++;
1509
1510 /* avoid looping forever */
1511 if (mds >= cap->mds ||
1512 ((flags & CHECK_CAPS_AUTHONLY) && cap != ci->i_auth_cap))
1513 continue;
1514
1515 /* NOTE: no side-effects allowed, until we take s_mutex */
1516
1517 revoking = cap->implemented & ~cap->issued;
1518 if (revoking)
1519 dout(" mds%d revoking %s\n", cap->mds,
1520 ceph_cap_string(revoking));
1521
1522 if (cap == ci->i_auth_cap &&
1523 (cap->issued & CEPH_CAP_FILE_WR)) {
1524 /* request larger max_size from MDS? */
1525 if (ci->i_wanted_max_size > ci->i_max_size &&
1526 ci->i_wanted_max_size > ci->i_requested_max_size) {
1527 dout("requesting new max_size\n");
1528 goto ack;
1529 }
1530
1531 /* approaching file_max? */
1532 if ((inode->i_size << 1) >= ci->i_max_size &&
1533 (ci->i_reported_size << 1) < ci->i_max_size) {
1534 dout("i_size approaching max_size\n");
1535 goto ack;
1536 }
1537 }
1538 /* flush anything dirty? */
1539 if (cap == ci->i_auth_cap && (flags & CHECK_CAPS_FLUSH) &&
1540 ci->i_dirty_caps) {
1541 dout("flushing dirty caps\n");
1542 goto ack;
1543 }
1544
1545 /* completed revocation? going down and there are no caps? */
1546 if (revoking && (revoking & used) == 0) {
1547 dout("completed revocation of %s\n",
1548 ceph_cap_string(cap->implemented & ~cap->issued));
1549 goto ack;
1550 }
1551
1552 /* want more caps from mds? */
1553 if (want & ~(cap->mds_wanted | cap->issued))
1554 goto ack;
1555
1556 /* things we might delay */
1557 if ((cap->issued & ~retain) == 0 &&
1558 cap->mds_wanted == want)
1559 continue; /* nope, all good */
1560
1561 if (is_delayed)
1562 goto ack;
1563
1564 /* delay? */
1565 if ((ci->i_ceph_flags & CEPH_I_NODELAY) == 0 &&
1566 time_before(jiffies, ci->i_hold_caps_max)) {
1567 dout(" delaying issued %s -> %s, wanted %s -> %s\n",
1568 ceph_cap_string(cap->issued),
1569 ceph_cap_string(cap->issued & retain),
1570 ceph_cap_string(cap->mds_wanted),
1571 ceph_cap_string(want));
1572 delayed++;
1573 continue;
1574 }
1575
1576ack:
1577 if (ci->i_ceph_flags & CEPH_I_NOFLUSH) {
1578 dout(" skipping %p I_NOFLUSH set\n", inode);
1579 continue;
1580 }
1581
1582 if (session && session != cap->session) {
1583 dout("oops, wrong session %p mutex\n", session);
1584 mutex_unlock(&session->s_mutex);
1585 session = NULL;
1586 }
1587 if (!session) {
1588 session = cap->session;
1589 if (mutex_trylock(&session->s_mutex) == 0) {
1590 dout("inverting session/ino locks on %p\n",
1591 session);
1592 spin_unlock(&inode->i_lock);
1593 if (took_snap_rwsem) {
1594 up_read(&mdsc->snap_rwsem);
1595 took_snap_rwsem = 0;
1596 }
1597 mutex_lock(&session->s_mutex);
1598 goto retry;
1599 }
1600 }
1601 /* take snap_rwsem after session mutex */
1602 if (!took_snap_rwsem) {
1603 if (down_read_trylock(&mdsc->snap_rwsem) == 0) {
1604 dout("inverting snap/in locks on %p\n",
1605 inode);
1606 spin_unlock(&inode->i_lock);
1607 down_read(&mdsc->snap_rwsem);
1608 took_snap_rwsem = 1;
1609 goto retry;
1610 }
1611 took_snap_rwsem = 1;
1612 }
1613
1614 if (cap == ci->i_auth_cap && ci->i_dirty_caps)
1615 flushing = __mark_caps_flushing(inode, session);
1616
1617 mds = cap->mds; /* remember mds, so we don't repeat */
1618 sent++;
1619
1620 /* __send_cap drops i_lock */
1621 delayed += __send_cap(mdsc, cap, CEPH_CAP_OP_UPDATE, used, want,
1622 retain, flushing, NULL);
1623 goto retry; /* retake i_lock and restart our cap scan. */
1624 }
1625
1626 /*
1627 * Reschedule delayed caps release if we delayed anything,
1628 * otherwise cancel.
1629 */
1630 if (delayed && is_delayed)
1631 force_requeue = 1; /* __send_cap delayed release; requeue */
1632 if (!delayed && !is_delayed)
1633 __cap_delay_cancel(mdsc, ci);
1634 else if (!is_delayed || force_requeue)
1635 __cap_delay_requeue(mdsc, ci);
1636
1637 spin_unlock(&inode->i_lock);
1638
1639 if (queue_invalidate)
1640 ceph_queue_invalidate(inode);
1641
1642 if (session && drop_session_lock)
1643 mutex_unlock(&session->s_mutex);
1644 if (took_snap_rwsem)
1645 up_read(&mdsc->snap_rwsem);
1646}
1647
1648/*
1649 * Try to flush dirty caps back to the auth mds.
1650 */
1651static int try_flush_caps(struct inode *inode, struct ceph_mds_session *session,
1652 unsigned *flush_tid)
1653{
1654 struct ceph_mds_client *mdsc = &ceph_client(inode->i_sb)->mdsc;
1655 struct ceph_inode_info *ci = ceph_inode(inode);
1656 int unlock_session = session ? 0 : 1;
1657 int flushing = 0;
1658
1659retry:
1660 spin_lock(&inode->i_lock);
1661 if (ci->i_ceph_flags & CEPH_I_NOFLUSH) {
1662 dout("try_flush_caps skipping %p I_NOFLUSH set\n", inode);
1663 goto out;
1664 }
1665 if (ci->i_dirty_caps && ci->i_auth_cap) {
1666 struct ceph_cap *cap = ci->i_auth_cap;
1667 int used = __ceph_caps_used(ci);
1668 int want = __ceph_caps_wanted(ci);
1669 int delayed;
1670
1671 if (!session) {
1672 spin_unlock(&inode->i_lock);
1673 session = cap->session;
1674 mutex_lock(&session->s_mutex);
1675 goto retry;
1676 }
1677 BUG_ON(session != cap->session);
1678 if (cap->session->s_state < CEPH_MDS_SESSION_OPEN)
1679 goto out;
1680
1681 flushing = __mark_caps_flushing(inode, session);
1682
1683 /* __send_cap drops i_lock */
1684 delayed = __send_cap(mdsc, cap, CEPH_CAP_OP_FLUSH, used, want,
1685 cap->issued | cap->implemented, flushing,
1686 flush_tid);
1687 if (!delayed)
1688 goto out_unlocked;
1689
1690 spin_lock(&inode->i_lock);
1691 __cap_delay_requeue(mdsc, ci);
1692 }
1693out:
1694 spin_unlock(&inode->i_lock);
1695out_unlocked:
1696 if (session && unlock_session)
1697 mutex_unlock(&session->s_mutex);
1698 return flushing;
1699}
1700
1701/*
1702 * Return true if we've flushed caps through the given flush_tid.
1703 */
1704static int caps_are_flushed(struct inode *inode, unsigned tid)
1705{
1706 struct ceph_inode_info *ci = ceph_inode(inode);
1707 int dirty, i, ret = 1;
1708
1709 spin_lock(&inode->i_lock);
1710 dirty = __ceph_caps_dirty(ci);
1711 for (i = 0; i < CEPH_CAP_BITS; i++)
1712 if ((ci->i_flushing_caps & (1 << i)) &&
1713 ci->i_cap_flush_tid[i] <= tid) {
1714 /* still flushing this bit */
1715 ret = 0;
1716 break;
1717 }
1718 spin_unlock(&inode->i_lock);
1719 return ret;
1720}
1721
1722/*
1723 * Wait on any unsafe replies for the given inode. First wait on the
1724 * newest request, and make that the upper bound. Then, if there are
1725 * more requests, keep waiting on the oldest as long as it is still older
1726 * than the original request.
1727 */
1728static void sync_write_wait(struct inode *inode)
1729{
1730 struct ceph_inode_info *ci = ceph_inode(inode);
1731 struct list_head *head = &ci->i_unsafe_writes;
1732 struct ceph_osd_request *req;
1733 u64 last_tid;
1734
1735 spin_lock(&ci->i_unsafe_lock);
1736 if (list_empty(head))
1737 goto out;
1738
1739 /* set upper bound as _last_ entry in chain */
1740 req = list_entry(head->prev, struct ceph_osd_request,
1741 r_unsafe_item);
1742 last_tid = req->r_tid;
1743
1744 do {
1745 ceph_osdc_get_request(req);
1746 spin_unlock(&ci->i_unsafe_lock);
1747 dout("sync_write_wait on tid %llu (until %llu)\n",
1748 req->r_tid, last_tid);
1749 wait_for_completion(&req->r_safe_completion);
1750 spin_lock(&ci->i_unsafe_lock);
1751 ceph_osdc_put_request(req);
1752
1753 /*
1754 * from here on look at first entry in chain, since we
1755 * only want to wait for anything older than last_tid
1756 */
1757 if (list_empty(head))
1758 break;
1759 req = list_entry(head->next, struct ceph_osd_request,
1760 r_unsafe_item);
1761 } while (req->r_tid < last_tid);
1762out:
1763 spin_unlock(&ci->i_unsafe_lock);
1764}
1765
1766int ceph_fsync(struct file *file, struct dentry *dentry, int datasync)
1767{
1768 struct inode *inode = dentry->d_inode;
1769 struct ceph_inode_info *ci = ceph_inode(inode);
1770 unsigned flush_tid;
1771 int ret;
1772 int dirty;
1773
1774 dout("fsync %p%s\n", inode, datasync ? " datasync" : "");
1775 sync_write_wait(inode);
1776
1777 ret = filemap_write_and_wait(inode->i_mapping);
1778 if (ret < 0)
1779 return ret;
1780
1781 dirty = try_flush_caps(inode, NULL, &flush_tid);
1782 dout("fsync dirty caps are %s\n", ceph_cap_string(dirty));
1783
1784 /*
1785 * only wait on non-file metadata writeback (the mds
1786 * can recover size and mtime, so we don't need to
1787 * wait for that)
1788 */
1789 if (!datasync && (dirty & ~CEPH_CAP_ANY_FILE_WR)) {
1790 dout("fsync waiting for flush_tid %u\n", flush_tid);
1791 ret = wait_event_interruptible(ci->i_cap_wq,
1792 caps_are_flushed(inode, flush_tid));
1793 }
1794
1795 dout("fsync %p%s done\n", inode, datasync ? " datasync" : "");
1796 return ret;
1797}
1798
1799/*
1800 * Flush any dirty caps back to the mds. If we aren't asked to wait,
1801 * queue inode for flush but don't do so immediately, because we can
1802 * get by with fewer MDS messages if we wait for data writeback to
1803 * complete first.
1804 */
1805int ceph_write_inode(struct inode *inode, struct writeback_control *wbc)
1806{
1807 struct ceph_inode_info *ci = ceph_inode(inode);
1808 unsigned flush_tid;
1809 int err = 0;
1810 int dirty;
1811 int wait = wbc->sync_mode == WB_SYNC_ALL;
1812
1813 dout("write_inode %p wait=%d\n", inode, wait);
1814 if (wait) {
1815 dirty = try_flush_caps(inode, NULL, &flush_tid);
1816 if (dirty)
1817 err = wait_event_interruptible(ci->i_cap_wq,
1818 caps_are_flushed(inode, flush_tid));
1819 } else {
1820 struct ceph_mds_client *mdsc = &ceph_client(inode->i_sb)->mdsc;
1821
1822 spin_lock(&inode->i_lock);
1823 if (__ceph_caps_dirty(ci))
1824 __cap_delay_requeue_front(mdsc, ci);
1825 spin_unlock(&inode->i_lock);
1826 }
1827 return err;
1828}
1829
1830/*
1831 * After a recovering MDS goes active, we need to resend any caps
1832 * we were flushing.
1833 *
1834 * Caller holds session->s_mutex.
1835 */
1836static void kick_flushing_capsnaps(struct ceph_mds_client *mdsc,
1837 struct ceph_mds_session *session)
1838{
1839 struct ceph_cap_snap *capsnap;
1840
1841 dout("kick_flushing_capsnaps mds%d\n", session->s_mds);
1842 list_for_each_entry(capsnap, &session->s_cap_snaps_flushing,
1843 flushing_item) {
1844 struct ceph_inode_info *ci = capsnap->ci;
1845 struct inode *inode = &ci->vfs_inode;
1846 struct ceph_cap *cap;
1847
1848 spin_lock(&inode->i_lock);
1849 cap = ci->i_auth_cap;
1850 if (cap && cap->session == session) {
1851 dout("kick_flushing_caps %p cap %p capsnap %p\n", inode,
1852 cap, capsnap);
1853 __ceph_flush_snaps(ci, &session);
1854 } else {
1855 pr_err("%p auth cap %p not mds%d ???\n", inode,
1856 cap, session->s_mds);
1857 spin_unlock(&inode->i_lock);
1858 }
1859 }
1860}
1861
1862void ceph_kick_flushing_caps(struct ceph_mds_client *mdsc,
1863 struct ceph_mds_session *session)
1864{
1865 struct ceph_inode_info *ci;
1866
1867 kick_flushing_capsnaps(mdsc, session);
1868
1869 dout("kick_flushing_caps mds%d\n", session->s_mds);
1870 list_for_each_entry(ci, &session->s_cap_flushing, i_flushing_item) {
1871 struct inode *inode = &ci->vfs_inode;
1872 struct ceph_cap *cap;
1873 int delayed = 0;
1874
1875 spin_lock(&inode->i_lock);
1876 cap = ci->i_auth_cap;
1877 if (cap && cap->session == session) {
1878 dout("kick_flushing_caps %p cap %p %s\n", inode,
1879 cap, ceph_cap_string(ci->i_flushing_caps));
1880 delayed = __send_cap(mdsc, cap, CEPH_CAP_OP_FLUSH,
1881 __ceph_caps_used(ci),
1882 __ceph_caps_wanted(ci),
1883 cap->issued | cap->implemented,
1884 ci->i_flushing_caps, NULL);
1885 if (delayed) {
1886 spin_lock(&inode->i_lock);
1887 __cap_delay_requeue(mdsc, ci);
1888 spin_unlock(&inode->i_lock);
1889 }
1890 } else {
1891 pr_err("%p auth cap %p not mds%d ???\n", inode,
1892 cap, session->s_mds);
1893 spin_unlock(&inode->i_lock);
1894 }
1895 }
1896}
1897
1898
1899/*
1900 * Take references to capabilities we hold, so that we don't release
1901 * them to the MDS prematurely.
1902 *
1903 * Protected by i_lock.
1904 */
1905static void __take_cap_refs(struct ceph_inode_info *ci, int got)
1906{
1907 if (got & CEPH_CAP_PIN)
1908 ci->i_pin_ref++;
1909 if (got & CEPH_CAP_FILE_RD)
1910 ci->i_rd_ref++;
1911 if (got & CEPH_CAP_FILE_CACHE)
1912 ci->i_rdcache_ref++;
1913 if (got & CEPH_CAP_FILE_WR)
1914 ci->i_wr_ref++;
1915 if (got & CEPH_CAP_FILE_BUFFER) {
1916 if (ci->i_wrbuffer_ref == 0)
1917 igrab(&ci->vfs_inode);
1918 ci->i_wrbuffer_ref++;
1919 dout("__take_cap_refs %p wrbuffer %d -> %d (?)\n",
1920 &ci->vfs_inode, ci->i_wrbuffer_ref-1, ci->i_wrbuffer_ref);
1921 }
1922}
1923
1924/*
1925 * Try to grab cap references. Specify those refs we @want, and the
1926 * minimal set we @need. Also include the larger offset we are writing
1927 * to (when applicable), and check against max_size here as well.
1928 * Note that caller is responsible for ensuring max_size increases are
1929 * requested from the MDS.
1930 */
1931static int try_get_cap_refs(struct ceph_inode_info *ci, int need, int want,
1932 int *got, loff_t endoff, int *check_max, int *err)
1933{
1934 struct inode *inode = &ci->vfs_inode;
1935 int ret = 0;
1936 int have, implemented;
1937 int file_wanted;
1938
1939 dout("get_cap_refs %p need %s want %s\n", inode,
1940 ceph_cap_string(need), ceph_cap_string(want));
1941 spin_lock(&inode->i_lock);
1942
1943 /* make sure file is actually open */
1944 file_wanted = __ceph_caps_file_wanted(ci);
1945 if ((file_wanted & need) == 0) {
1946 dout("try_get_cap_refs need %s file_wanted %s, EBADF\n",
1947 ceph_cap_string(need), ceph_cap_string(file_wanted));
1948 *err = -EBADF;
1949 ret = 1;
1950 goto out;
1951 }
1952
1953 if (need & CEPH_CAP_FILE_WR) {
1954 if (endoff >= 0 && endoff > (loff_t)ci->i_max_size) {
1955 dout("get_cap_refs %p endoff %llu > maxsize %llu\n",
1956 inode, endoff, ci->i_max_size);
1957 if (endoff > ci->i_wanted_max_size) {
1958 *check_max = 1;
1959 ret = 1;
1960 }
1961 goto out;
1962 }
1963 /*
1964 * If a sync write is in progress, we must wait, so that we
1965 * can get a final snapshot value for size+mtime.
1966 */
1967 if (__ceph_have_pending_cap_snap(ci)) {
1968 dout("get_cap_refs %p cap_snap_pending\n", inode);
1969 goto out;
1970 }
1971 }
1972 have = __ceph_caps_issued(ci, &implemented);
1973
1974 /*
1975 * disallow writes while a truncate is pending
1976 */
1977 if (ci->i_truncate_pending)
1978 have &= ~CEPH_CAP_FILE_WR;
1979
1980 if ((have & need) == need) {
1981 /*
1982 * Look at (implemented & ~have & not) so that we keep waiting
1983 * on transition from wanted -> needed caps. This is needed
1984 * for WRBUFFER|WR -> WR to avoid a new WR sync write from
1985 * going before a prior buffered writeback happens.
1986 */
1987 int not = want & ~(have & need);
1988 int revoking = implemented & ~have;
1989 dout("get_cap_refs %p have %s but not %s (revoking %s)\n",
1990 inode, ceph_cap_string(have), ceph_cap_string(not),
1991 ceph_cap_string(revoking));
1992 if ((revoking & not) == 0) {
1993 *got = need | (have & want);
1994 __take_cap_refs(ci, *got);
1995 ret = 1;
1996 }
1997 } else {
1998 dout("get_cap_refs %p have %s needed %s\n", inode,
1999 ceph_cap_string(have), ceph_cap_string(need));
2000 }
2001out:
2002 spin_unlock(&inode->i_lock);
2003 dout("get_cap_refs %p ret %d got %s\n", inode,
2004 ret, ceph_cap_string(*got));
2005 return ret;
2006}
2007
2008/*
2009 * Check the offset we are writing up to against our current
2010 * max_size. If necessary, tell the MDS we want to write to
2011 * a larger offset.
2012 */
2013static void check_max_size(struct inode *inode, loff_t endoff)
2014{
2015 struct ceph_inode_info *ci = ceph_inode(inode);
2016 int check = 0;
2017
2018 /* do we need to explicitly request a larger max_size? */
2019 spin_lock(&inode->i_lock);
2020 if ((endoff >= ci->i_max_size ||
2021 endoff > (inode->i_size << 1)) &&
2022 endoff > ci->i_wanted_max_size) {
2023 dout("write %p at large endoff %llu, req max_size\n",
2024 inode, endoff);
2025 ci->i_wanted_max_size = endoff;
2026 check = 1;
2027 }
2028 spin_unlock(&inode->i_lock);
2029 if (check)
2030 ceph_check_caps(ci, CHECK_CAPS_AUTHONLY, NULL);
2031}
2032
2033/*
2034 * Wait for caps, and take cap references. If we can't get a WR cap
2035 * due to a small max_size, make sure we check_max_size (and possibly
2036 * ask the mds) so we don't get hung up indefinitely.
2037 */
2038int ceph_get_caps(struct ceph_inode_info *ci, int need, int want, int *got,
2039 loff_t endoff)
2040{
2041 int check_max, ret, err;
2042
2043retry:
2044 if (endoff > 0)
2045 check_max_size(&ci->vfs_inode, endoff);
2046 check_max = 0;
2047 err = 0;
2048 ret = wait_event_interruptible(ci->i_cap_wq,
2049 try_get_cap_refs(ci, need, want,
2050 got, endoff,
2051 &check_max, &err));
2052 if (err)
2053 ret = err;
2054 if (check_max)
2055 goto retry;
2056 return ret;
2057}
2058
2059/*
2060 * Take cap refs. Caller must already know we hold at least one ref
2061 * on the caps in question or we don't know this is safe.
2062 */
2063void ceph_get_cap_refs(struct ceph_inode_info *ci, int caps)
2064{
2065 spin_lock(&ci->vfs_inode.i_lock);
2066 __take_cap_refs(ci, caps);
2067 spin_unlock(&ci->vfs_inode.i_lock);
2068}
2069
2070/*
2071 * Release cap refs.
2072 *
2073 * If we released the last ref on any given cap, call ceph_check_caps
2074 * to release (or schedule a release).
2075 *
2076 * If we are releasing a WR cap (from a sync write), finalize any affected
2077 * cap_snap, and wake up any waiters.
2078 */
2079void ceph_put_cap_refs(struct ceph_inode_info *ci, int had)
2080{
2081 struct inode *inode = &ci->vfs_inode;
2082 int last = 0, put = 0, flushsnaps = 0, wake = 0;
2083 struct ceph_cap_snap *capsnap;
2084
2085 spin_lock(&inode->i_lock);
2086 if (had & CEPH_CAP_PIN)
2087 --ci->i_pin_ref;
2088 if (had & CEPH_CAP_FILE_RD)
2089 if (--ci->i_rd_ref == 0)
2090 last++;
2091 if (had & CEPH_CAP_FILE_CACHE)
2092 if (--ci->i_rdcache_ref == 0)
2093 last++;
2094 if (had & CEPH_CAP_FILE_BUFFER) {
2095 if (--ci->i_wrbuffer_ref == 0) {
2096 last++;
2097 put++;
2098 }
2099 dout("put_cap_refs %p wrbuffer %d -> %d (?)\n",
2100 inode, ci->i_wrbuffer_ref+1, ci->i_wrbuffer_ref);
2101 }
2102 if (had & CEPH_CAP_FILE_WR)
2103 if (--ci->i_wr_ref == 0) {
2104 last++;
2105 if (!list_empty(&ci->i_cap_snaps)) {
2106 capsnap = list_first_entry(&ci->i_cap_snaps,
2107 struct ceph_cap_snap,
2108 ci_item);
2109 if (capsnap->writing) {
2110 capsnap->writing = 0;
2111 flushsnaps =
2112 __ceph_finish_cap_snap(ci,
2113 capsnap);
2114 wake = 1;
2115 }
2116 }
2117 }
2118 spin_unlock(&inode->i_lock);
2119
2120 dout("put_cap_refs %p had %s %s\n", inode, ceph_cap_string(had),
2121 last ? "last" : "");
2122
2123 if (last && !flushsnaps)
2124 ceph_check_caps(ci, 0, NULL);
2125 else if (flushsnaps)
2126 ceph_flush_snaps(ci);
2127 if (wake)
2128 wake_up(&ci->i_cap_wq);
2129 if (put)
2130 iput(inode);
2131}
2132
2133/*
2134 * Release @nr WRBUFFER refs on dirty pages for the given @snapc snap
2135 * context. Adjust per-snap dirty page accounting as appropriate.
2136 * Once all dirty data for a cap_snap is flushed, flush snapped file
2137 * metadata back to the MDS. If we dropped the last ref, call
2138 * ceph_check_caps.
2139 */
2140void ceph_put_wrbuffer_cap_refs(struct ceph_inode_info *ci, int nr,
2141 struct ceph_snap_context *snapc)
2142{
2143 struct inode *inode = &ci->vfs_inode;
2144 int last = 0;
2145 int last_snap = 0;
2146 int found = 0;
2147 struct ceph_cap_snap *capsnap = NULL;
2148
2149 spin_lock(&inode->i_lock);
2150 ci->i_wrbuffer_ref -= nr;
2151 last = !ci->i_wrbuffer_ref;
2152
2153 if (ci->i_head_snapc == snapc) {
2154 ci->i_wrbuffer_ref_head -= nr;
2155 if (!ci->i_wrbuffer_ref_head) {
2156 ceph_put_snap_context(ci->i_head_snapc);
2157 ci->i_head_snapc = NULL;
2158 }
2159 dout("put_wrbuffer_cap_refs on %p head %d/%d -> %d/%d %s\n",
2160 inode,
2161 ci->i_wrbuffer_ref+nr, ci->i_wrbuffer_ref_head+nr,
2162 ci->i_wrbuffer_ref, ci->i_wrbuffer_ref_head,
2163 last ? " LAST" : "");
2164 } else {
2165 list_for_each_entry(capsnap, &ci->i_cap_snaps, ci_item) {
2166 if (capsnap->context == snapc) {
2167 found = 1;
2168 capsnap->dirty_pages -= nr;
2169 last_snap = !capsnap->dirty_pages;
2170 break;
2171 }
2172 }
2173 BUG_ON(!found);
2174 dout("put_wrbuffer_cap_refs on %p cap_snap %p "
2175 " snap %lld %d/%d -> %d/%d %s%s\n",
2176 inode, capsnap, capsnap->context->seq,
2177 ci->i_wrbuffer_ref+nr, capsnap->dirty_pages + nr,
2178 ci->i_wrbuffer_ref, capsnap->dirty_pages,
2179 last ? " (wrbuffer last)" : "",
2180 last_snap ? " (capsnap last)" : "");
2181 }
2182
2183 spin_unlock(&inode->i_lock);
2184
2185 if (last) {
2186 ceph_check_caps(ci, CHECK_CAPS_AUTHONLY, NULL);
2187 iput(inode);
2188 } else if (last_snap) {
2189 ceph_flush_snaps(ci);
2190 wake_up(&ci->i_cap_wq);
2191 }
2192}
2193
2194/*
2195 * Handle a cap GRANT message from the MDS. (Note that a GRANT may
2196 * actually be a revocation if it specifies a smaller cap set.)
2197 *
2198 * caller holds s_mutex.
2199 * return value:
2200 * 0 - ok
2201 * 1 - check_caps on auth cap only (writeback)
2202 * 2 - check_caps (ack revoke)
2203 */
2204static int handle_cap_grant(struct inode *inode, struct ceph_mds_caps *grant,
2205 struct ceph_mds_session *session,
2206 struct ceph_cap *cap,
2207 struct ceph_buffer *xattr_buf)
2208 __releases(inode->i_lock)
2209
2210{
2211 struct ceph_inode_info *ci = ceph_inode(inode);
2212 int mds = session->s_mds;
2213 int seq = le32_to_cpu(grant->seq);
2214 int newcaps = le32_to_cpu(grant->caps);
2215 int issued, implemented, used, wanted, dirty;
2216 u64 size = le64_to_cpu(grant->size);
2217 u64 max_size = le64_to_cpu(grant->max_size);
2218 struct timespec mtime, atime, ctime;
2219 int reply = 0;
2220 int wake = 0;
2221 int writeback = 0;
2222 int revoked_rdcache = 0;
2223 int queue_invalidate = 0;
2224
2225 dout("handle_cap_grant inode %p cap %p mds%d seq %d %s\n",
2226 inode, cap, mds, seq, ceph_cap_string(newcaps));
2227 dout(" size %llu max_size %llu, i_size %llu\n", size, max_size,
2228 inode->i_size);
2229
2230 /*
2231 * If CACHE is being revoked, and we have no dirty buffers,
2232 * try to invalidate (once). (If there are dirty buffers, we
2233 * will invalidate _after_ writeback.)
2234 */
2235 if (((cap->issued & ~newcaps) & CEPH_CAP_FILE_CACHE) &&
2236 !ci->i_wrbuffer_ref) {
2237 if (try_nonblocking_invalidate(inode) == 0) {
2238 revoked_rdcache = 1;
2239 } else {
2240 /* there were locked pages.. invalidate later
2241 in a separate thread. */
2242 if (ci->i_rdcache_revoking != ci->i_rdcache_gen) {
2243 queue_invalidate = 1;
2244 ci->i_rdcache_revoking = ci->i_rdcache_gen;
2245 }
2246 }
2247 }
2248
2249 /* side effects now are allowed */
2250
2251 issued = __ceph_caps_issued(ci, &implemented);
2252 issued |= implemented | __ceph_caps_dirty(ci);
2253
2254 cap->cap_gen = session->s_cap_gen;
2255
2256 __check_cap_issue(ci, cap, newcaps);
2257
2258 if ((issued & CEPH_CAP_AUTH_EXCL) == 0) {
2259 inode->i_mode = le32_to_cpu(grant->mode);
2260 inode->i_uid = le32_to_cpu(grant->uid);
2261 inode->i_gid = le32_to_cpu(grant->gid);
2262 dout("%p mode 0%o uid.gid %d.%d\n", inode, inode->i_mode,
2263 inode->i_uid, inode->i_gid);
2264 }
2265
2266 if ((issued & CEPH_CAP_LINK_EXCL) == 0)
2267 inode->i_nlink = le32_to_cpu(grant->nlink);
2268
2269 if ((issued & CEPH_CAP_XATTR_EXCL) == 0 && grant->xattr_len) {
2270 int len = le32_to_cpu(grant->xattr_len);
2271 u64 version = le64_to_cpu(grant->xattr_version);
2272
2273 if (version > ci->i_xattrs.version) {
2274 dout(" got new xattrs v%llu on %p len %d\n",
2275 version, inode, len);
2276 if (ci->i_xattrs.blob)
2277 ceph_buffer_put(ci->i_xattrs.blob);
2278 ci->i_xattrs.blob = ceph_buffer_get(xattr_buf);
2279 ci->i_xattrs.version = version;
2280 }
2281 }
2282
2283 /* size/ctime/mtime/atime? */
2284 ceph_fill_file_size(inode, issued,
2285 le32_to_cpu(grant->truncate_seq),
2286 le64_to_cpu(grant->truncate_size), size);
2287 ceph_decode_timespec(&mtime, &grant->mtime);
2288 ceph_decode_timespec(&atime, &grant->atime);
2289 ceph_decode_timespec(&ctime, &grant->ctime);
2290 ceph_fill_file_time(inode, issued,
2291 le32_to_cpu(grant->time_warp_seq), &ctime, &mtime,
2292 &atime);
2293
2294 /* max size increase? */
2295 if (max_size != ci->i_max_size) {
2296 dout("max_size %lld -> %llu\n", ci->i_max_size, max_size);
2297 ci->i_max_size = max_size;
2298 if (max_size >= ci->i_wanted_max_size) {
2299 ci->i_wanted_max_size = 0; /* reset */
2300 ci->i_requested_max_size = 0;
2301 }
2302 wake = 1;
2303 }
2304
2305 /* check cap bits */
2306 wanted = __ceph_caps_wanted(ci);
2307 used = __ceph_caps_used(ci);
2308 dirty = __ceph_caps_dirty(ci);
2309 dout(" my wanted = %s, used = %s, dirty %s\n",
2310 ceph_cap_string(wanted),
2311 ceph_cap_string(used),
2312 ceph_cap_string(dirty));
2313 if (wanted != le32_to_cpu(grant->wanted)) {
2314 dout("mds wanted %s -> %s\n",
2315 ceph_cap_string(le32_to_cpu(grant->wanted)),
2316 ceph_cap_string(wanted));
2317 grant->wanted = cpu_to_le32(wanted);
2318 }
2319
2320 cap->seq = seq;
2321
2322 /* file layout may have changed */
2323 ci->i_layout = grant->layout;
2324
2325 /* revocation, grant, or no-op? */
2326 if (cap->issued & ~newcaps) {
2327 dout("revocation: %s -> %s\n", ceph_cap_string(cap->issued),
2328 ceph_cap_string(newcaps));
2329 if ((used & ~newcaps) & CEPH_CAP_FILE_BUFFER)
2330 writeback = 1; /* will delay ack */
2331 else if (dirty & ~newcaps)
2332 reply = 1; /* initiate writeback in check_caps */
2333 else if (((used & ~newcaps) & CEPH_CAP_FILE_CACHE) == 0 ||
2334 revoked_rdcache)
2335 reply = 2; /* send revoke ack in check_caps */
2336 cap->issued = newcaps;
2337 } else if (cap->issued == newcaps) {
2338 dout("caps unchanged: %s -> %s\n",
2339 ceph_cap_string(cap->issued), ceph_cap_string(newcaps));
2340 } else {
2341 dout("grant: %s -> %s\n", ceph_cap_string(cap->issued),
2342 ceph_cap_string(newcaps));
2343 cap->issued = newcaps;
2344 cap->implemented |= newcaps; /* add bits only, to
2345 * avoid stepping on a
2346 * pending revocation */
2347 wake = 1;
2348 }
2349
2350 spin_unlock(&inode->i_lock);
2351 if (writeback)
2352 /*
2353 * queue inode for writeback: we can't actually call
2354 * filemap_write_and_wait, etc. from message handler
2355 * context.
2356 */
2357 ceph_queue_writeback(inode);
2358 if (queue_invalidate)
2359 ceph_queue_invalidate(inode);
2360 if (wake)
2361 wake_up(&ci->i_cap_wq);
2362 return reply;
2363}
2364
2365/*
2366 * Handle FLUSH_ACK from MDS, indicating that metadata we sent to the
2367 * MDS has been safely committed.
2368 */
2369static void handle_cap_flush_ack(struct inode *inode, u64 flush_tid,
2370 struct ceph_mds_caps *m,
2371 struct ceph_mds_session *session,
2372 struct ceph_cap *cap)
2373 __releases(inode->i_lock)
2374{
2375 struct ceph_inode_info *ci = ceph_inode(inode);
2376 struct ceph_mds_client *mdsc = &ceph_client(inode->i_sb)->mdsc;
2377 unsigned seq = le32_to_cpu(m->seq);
2378 int dirty = le32_to_cpu(m->dirty);
2379 int cleaned = 0;
2380 int drop = 0;
2381 int i;
2382
2383 for (i = 0; i < CEPH_CAP_BITS; i++)
2384 if ((dirty & (1 << i)) &&
2385 flush_tid == ci->i_cap_flush_tid[i])
2386 cleaned |= 1 << i;
2387
2388 dout("handle_cap_flush_ack inode %p mds%d seq %d on %s cleaned %s,"
2389 " flushing %s -> %s\n",
2390 inode, session->s_mds, seq, ceph_cap_string(dirty),
2391 ceph_cap_string(cleaned), ceph_cap_string(ci->i_flushing_caps),
2392 ceph_cap_string(ci->i_flushing_caps & ~cleaned));
2393
2394 if (ci->i_flushing_caps == (ci->i_flushing_caps & ~cleaned))
2395 goto out;
2396
2397 ci->i_flushing_caps &= ~cleaned;
2398
2399 spin_lock(&mdsc->cap_dirty_lock);
2400 if (ci->i_flushing_caps == 0) {
2401 list_del_init(&ci->i_flushing_item);
2402 if (!list_empty(&session->s_cap_flushing))
2403 dout(" mds%d still flushing cap on %p\n",
2404 session->s_mds,
2405 &list_entry(session->s_cap_flushing.next,
2406 struct ceph_inode_info,
2407 i_flushing_item)->vfs_inode);
2408 mdsc->num_cap_flushing--;
2409 wake_up(&mdsc->cap_flushing_wq);
2410 dout(" inode %p now !flushing\n", inode);
2411
2412 if (ci->i_dirty_caps == 0) {
2413 dout(" inode %p now clean\n", inode);
2414 BUG_ON(!list_empty(&ci->i_dirty_item));
2415 drop = 1;
2416 } else {
2417 BUG_ON(list_empty(&ci->i_dirty_item));
2418 }
2419 }
2420 spin_unlock(&mdsc->cap_dirty_lock);
2421 wake_up(&ci->i_cap_wq);
2422
2423out:
2424 spin_unlock(&inode->i_lock);
2425 if (drop)
2426 iput(inode);
2427}
2428
2429/*
2430 * Handle FLUSHSNAP_ACK. MDS has flushed snap data to disk and we can
2431 * throw away our cap_snap.
2432 *
2433 * Caller hold s_mutex.
2434 */
2435static void handle_cap_flushsnap_ack(struct inode *inode, u64 flush_tid,
2436 struct ceph_mds_caps *m,
2437 struct ceph_mds_session *session)
2438{
2439 struct ceph_inode_info *ci = ceph_inode(inode);
2440 u64 follows = le64_to_cpu(m->snap_follows);
2441 struct ceph_cap_snap *capsnap;
2442 int drop = 0;
2443
2444 dout("handle_cap_flushsnap_ack inode %p ci %p mds%d follows %lld\n",
2445 inode, ci, session->s_mds, follows);
2446
2447 spin_lock(&inode->i_lock);
2448 list_for_each_entry(capsnap, &ci->i_cap_snaps, ci_item) {
2449 if (capsnap->follows == follows) {
2450 if (capsnap->flush_tid != flush_tid) {
2451 dout(" cap_snap %p follows %lld tid %lld !="
2452 " %lld\n", capsnap, follows,
2453 flush_tid, capsnap->flush_tid);
2454 break;
2455 }
2456 WARN_ON(capsnap->dirty_pages || capsnap->writing);
2457 dout(" removing cap_snap %p follows %lld\n",
2458 capsnap, follows);
2459 ceph_put_snap_context(capsnap->context);
2460 list_del(&capsnap->ci_item);
2461 list_del(&capsnap->flushing_item);
2462 ceph_put_cap_snap(capsnap);
2463 drop = 1;
2464 break;
2465 } else {
2466 dout(" skipping cap_snap %p follows %lld\n",
2467 capsnap, capsnap->follows);
2468 }
2469 }
2470 spin_unlock(&inode->i_lock);
2471 if (drop)
2472 iput(inode);
2473}
2474
2475/*
2476 * Handle TRUNC from MDS, indicating file truncation.
2477 *
2478 * caller hold s_mutex.
2479 */
2480static void handle_cap_trunc(struct inode *inode,
2481 struct ceph_mds_caps *trunc,
2482 struct ceph_mds_session *session)
2483 __releases(inode->i_lock)
2484{
2485 struct ceph_inode_info *ci = ceph_inode(inode);
2486 int mds = session->s_mds;
2487 int seq = le32_to_cpu(trunc->seq);
2488 u32 truncate_seq = le32_to_cpu(trunc->truncate_seq);
2489 u64 truncate_size = le64_to_cpu(trunc->truncate_size);
2490 u64 size = le64_to_cpu(trunc->size);
2491 int implemented = 0;
2492 int dirty = __ceph_caps_dirty(ci);
2493 int issued = __ceph_caps_issued(ceph_inode(inode), &implemented);
2494 int queue_trunc = 0;
2495
2496 issued |= implemented | dirty;
2497
2498 dout("handle_cap_trunc inode %p mds%d seq %d to %lld seq %d\n",
2499 inode, mds, seq, truncate_size, truncate_seq);
2500 queue_trunc = ceph_fill_file_size(inode, issued,
2501 truncate_seq, truncate_size, size);
2502 spin_unlock(&inode->i_lock);
2503
2504 if (queue_trunc)
2505 ceph_queue_vmtruncate(inode);
2506}
2507
2508/*
2509 * Handle EXPORT from MDS. Cap is being migrated _from_ this mds to a
2510 * different one. If we are the most recent migration we've seen (as
2511 * indicated by mseq), make note of the migrating cap bits for the
2512 * duration (until we see the corresponding IMPORT).
2513 *
2514 * caller holds s_mutex
2515 */
2516static void handle_cap_export(struct inode *inode, struct ceph_mds_caps *ex,
2517 struct ceph_mds_session *session)
2518{
2519 struct ceph_inode_info *ci = ceph_inode(inode);
2520 int mds = session->s_mds;
2521 unsigned mseq = le32_to_cpu(ex->migrate_seq);
2522 struct ceph_cap *cap = NULL, *t;
2523 struct rb_node *p;
2524 int remember = 1;
2525
2526 dout("handle_cap_export inode %p ci %p mds%d mseq %d\n",
2527 inode, ci, mds, mseq);
2528
2529 spin_lock(&inode->i_lock);
2530
2531 /* make sure we haven't seen a higher mseq */
2532 for (p = rb_first(&ci->i_caps); p; p = rb_next(p)) {
2533 t = rb_entry(p, struct ceph_cap, ci_node);
2534 if (ceph_seq_cmp(t->mseq, mseq) > 0) {
2535 dout(" higher mseq on cap from mds%d\n",
2536 t->session->s_mds);
2537 remember = 0;
2538 }
2539 if (t->session->s_mds == mds)
2540 cap = t;
2541 }
2542
2543 if (cap) {
2544 if (remember) {
2545 /* make note */
2546 ci->i_cap_exporting_mds = mds;
2547 ci->i_cap_exporting_mseq = mseq;
2548 ci->i_cap_exporting_issued = cap->issued;
2549 }
2550 __ceph_remove_cap(cap);
2551 } else {
2552 WARN_ON(!cap);
2553 }
2554
2555 spin_unlock(&inode->i_lock);
2556}
2557
2558/*
2559 * Handle cap IMPORT. If there are temp bits from an older EXPORT,
2560 * clean them up.
2561 *
2562 * caller holds s_mutex.
2563 */
2564static void handle_cap_import(struct ceph_mds_client *mdsc,
2565 struct inode *inode, struct ceph_mds_caps *im,
2566 struct ceph_mds_session *session,
2567 void *snaptrace, int snaptrace_len)
2568{
2569 struct ceph_inode_info *ci = ceph_inode(inode);
2570 int mds = session->s_mds;
2571 unsigned issued = le32_to_cpu(im->caps);
2572 unsigned wanted = le32_to_cpu(im->wanted);
2573 unsigned seq = le32_to_cpu(im->seq);
2574 unsigned mseq = le32_to_cpu(im->migrate_seq);
2575 u64 realmino = le64_to_cpu(im->realm);
2576 u64 cap_id = le64_to_cpu(im->cap_id);
2577
2578 if (ci->i_cap_exporting_mds >= 0 &&
2579 ceph_seq_cmp(ci->i_cap_exporting_mseq, mseq) < 0) {
2580 dout("handle_cap_import inode %p ci %p mds%d mseq %d"
2581 " - cleared exporting from mds%d\n",
2582 inode, ci, mds, mseq,
2583 ci->i_cap_exporting_mds);
2584 ci->i_cap_exporting_issued = 0;
2585 ci->i_cap_exporting_mseq = 0;
2586 ci->i_cap_exporting_mds = -1;
2587 } else {
2588 dout("handle_cap_import inode %p ci %p mds%d mseq %d\n",
2589 inode, ci, mds, mseq);
2590 }
2591
2592 down_write(&mdsc->snap_rwsem);
2593 ceph_update_snap_trace(mdsc, snaptrace, snaptrace+snaptrace_len,
2594 false);
2595 downgrade_write(&mdsc->snap_rwsem);
2596 ceph_add_cap(inode, session, cap_id, -1,
2597 issued, wanted, seq, mseq, realmino, CEPH_CAP_FLAG_AUTH,
2598 NULL /* no caps context */);
2599 try_flush_caps(inode, session, NULL);
2600 up_read(&mdsc->snap_rwsem);
2601}
2602
2603/*
2604 * Handle a caps message from the MDS.
2605 *
2606 * Identify the appropriate session, inode, and call the right handler
2607 * based on the cap op.
2608 */
2609void ceph_handle_caps(struct ceph_mds_session *session,
2610 struct ceph_msg *msg)
2611{
2612 struct ceph_mds_client *mdsc = session->s_mdsc;
2613 struct super_block *sb = mdsc->client->sb;
2614 struct inode *inode;
2615 struct ceph_cap *cap;
2616 struct ceph_mds_caps *h;
2617 int mds = session->s_mds;
2618 int op;
2619 u32 seq;
2620 struct ceph_vino vino;
2621 u64 cap_id;
2622 u64 size, max_size;
2623 u64 tid;
2624 int check_caps = 0;
2625 void *snaptrace;
2626 int r;
2627
2628 dout("handle_caps from mds%d\n", mds);
2629
2630 /* decode */
2631 tid = le64_to_cpu(msg->hdr.tid);
2632 if (msg->front.iov_len < sizeof(*h))
2633 goto bad;
2634 h = msg->front.iov_base;
2635 snaptrace = h + 1;
2636 op = le32_to_cpu(h->op);
2637 vino.ino = le64_to_cpu(h->ino);
2638 vino.snap = CEPH_NOSNAP;
2639 cap_id = le64_to_cpu(h->cap_id);
2640 seq = le32_to_cpu(h->seq);
2641 size = le64_to_cpu(h->size);
2642 max_size = le64_to_cpu(h->max_size);
2643
2644 mutex_lock(&session->s_mutex);
2645 session->s_seq++;
2646 dout(" mds%d seq %lld cap seq %u\n", session->s_mds, session->s_seq,
2647 (unsigned)seq);
2648
2649 /* lookup ino */
2650 inode = ceph_find_inode(sb, vino);
2651 dout(" op %s ino %llx.%llx inode %p\n", ceph_cap_op_name(op), vino.ino,
2652 vino.snap, inode);
2653 if (!inode) {
2654 dout(" i don't have ino %llx\n", vino.ino);
2655 goto done;
2656 }
2657
2658 /* these will work even if we don't have a cap yet */
2659 switch (op) {
2660 case CEPH_CAP_OP_FLUSHSNAP_ACK:
2661 handle_cap_flushsnap_ack(inode, tid, h, session);
2662 goto done;
2663
2664 case CEPH_CAP_OP_EXPORT:
2665 handle_cap_export(inode, h, session);
2666 goto done;
2667
2668 case CEPH_CAP_OP_IMPORT:
2669 handle_cap_import(mdsc, inode, h, session,
2670 snaptrace, le32_to_cpu(h->snap_trace_len));
2671 check_caps = 1; /* we may have sent a RELEASE to the old auth */
2672 goto done;
2673 }
2674
2675 /* the rest require a cap */
2676 spin_lock(&inode->i_lock);
2677 cap = __get_cap_for_mds(ceph_inode(inode), mds);
2678 if (!cap) {
2679 dout("no cap on %p ino %llx.%llx from mds%d, releasing\n",
2680 inode, ceph_ino(inode), ceph_snap(inode), mds);
2681 spin_unlock(&inode->i_lock);
2682 goto done;
2683 }
2684
2685 /* note that each of these drops i_lock for us */
2686 switch (op) {
2687 case CEPH_CAP_OP_REVOKE:
2688 case CEPH_CAP_OP_GRANT:
2689 r = handle_cap_grant(inode, h, session, cap, msg->middle);
2690 if (r == 1)
2691 ceph_check_caps(ceph_inode(inode),
2692 CHECK_CAPS_NODELAY|CHECK_CAPS_AUTHONLY,
2693 session);
2694 else if (r == 2)
2695 ceph_check_caps(ceph_inode(inode),
2696 CHECK_CAPS_NODELAY,
2697 session);
2698 break;
2699
2700 case CEPH_CAP_OP_FLUSH_ACK:
2701 handle_cap_flush_ack(inode, tid, h, session, cap);
2702 break;
2703
2704 case CEPH_CAP_OP_TRUNC:
2705 handle_cap_trunc(inode, h, session);
2706 break;
2707
2708 default:
2709 spin_unlock(&inode->i_lock);
2710 pr_err("ceph_handle_caps: unknown cap op %d %s\n", op,
2711 ceph_cap_op_name(op));
2712 }
2713
2714done:
2715 mutex_unlock(&session->s_mutex);
2716
2717 if (check_caps)
2718 ceph_check_caps(ceph_inode(inode), CHECK_CAPS_NODELAY, NULL);
2719 if (inode)
2720 iput(inode);
2721 return;
2722
2723bad:
2724 pr_err("ceph_handle_caps: corrupt message\n");
2725 ceph_msg_dump(msg);
2726 return;
2727}
2728
2729/*
2730 * Delayed work handler to process end of delayed cap release LRU list.
2731 */
2732void ceph_check_delayed_caps(struct ceph_mds_client *mdsc)
2733{
2734 struct ceph_inode_info *ci;
2735 int flags = CHECK_CAPS_NODELAY;
2736
2737 dout("check_delayed_caps\n");
2738 while (1) {
2739 spin_lock(&mdsc->cap_delay_lock);
2740 if (list_empty(&mdsc->cap_delay_list))
2741 break;
2742 ci = list_first_entry(&mdsc->cap_delay_list,
2743 struct ceph_inode_info,
2744 i_cap_delay_list);
2745 if ((ci->i_ceph_flags & CEPH_I_FLUSH) == 0 &&
2746 time_before(jiffies, ci->i_hold_caps_max))
2747 break;
2748 list_del_init(&ci->i_cap_delay_list);
2749 spin_unlock(&mdsc->cap_delay_lock);
2750 dout("check_delayed_caps on %p\n", &ci->vfs_inode);
2751 ceph_check_caps(ci, flags, NULL);
2752 }
2753 spin_unlock(&mdsc->cap_delay_lock);
2754}
2755
2756/*
2757 * Flush all dirty caps to the mds
2758 */
2759void ceph_flush_dirty_caps(struct ceph_mds_client *mdsc)
2760{
2761 struct ceph_inode_info *ci, *nci = NULL;
2762 struct inode *inode, *ninode = NULL;
2763 struct list_head *p, *n;
2764
2765 dout("flush_dirty_caps\n");
2766 spin_lock(&mdsc->cap_dirty_lock);
2767 list_for_each_safe(p, n, &mdsc->cap_dirty) {
2768 if (nci) {
2769 ci = nci;
2770 inode = ninode;
2771 ci->i_ceph_flags &= ~CEPH_I_NOFLUSH;
2772 dout("flush_dirty_caps inode %p (was next inode)\n",
2773 inode);
2774 } else {
2775 ci = list_entry(p, struct ceph_inode_info,
2776 i_dirty_item);
2777 inode = igrab(&ci->vfs_inode);
2778 BUG_ON(!inode);
2779 dout("flush_dirty_caps inode %p\n", inode);
2780 }
2781 if (n != &mdsc->cap_dirty) {
2782 nci = list_entry(n, struct ceph_inode_info,
2783 i_dirty_item);
2784 ninode = igrab(&nci->vfs_inode);
2785 BUG_ON(!ninode);
2786 nci->i_ceph_flags |= CEPH_I_NOFLUSH;
2787 dout("flush_dirty_caps next inode %p, noflush\n",
2788 ninode);
2789 } else {
2790 nci = NULL;
2791 ninode = NULL;
2792 }
2793 spin_unlock(&mdsc->cap_dirty_lock);
2794 if (inode) {
2795 ceph_check_caps(ci, CHECK_CAPS_NODELAY|CHECK_CAPS_FLUSH,
2796 NULL);
2797 iput(inode);
2798 }
2799 spin_lock(&mdsc->cap_dirty_lock);
2800 }
2801 spin_unlock(&mdsc->cap_dirty_lock);
2802}
2803
2804/*
2805 * Drop open file reference. If we were the last open file,
2806 * we may need to release capabilities to the MDS (or schedule
2807 * their delayed release).
2808 */
2809void ceph_put_fmode(struct ceph_inode_info *ci, int fmode)
2810{
2811 struct inode *inode = &ci->vfs_inode;
2812 int last = 0;
2813
2814 spin_lock(&inode->i_lock);
2815 dout("put_fmode %p fmode %d %d -> %d\n", inode, fmode,
2816 ci->i_nr_by_mode[fmode], ci->i_nr_by_mode[fmode]-1);
2817 BUG_ON(ci->i_nr_by_mode[fmode] == 0);
2818 if (--ci->i_nr_by_mode[fmode] == 0)
2819 last++;
2820 spin_unlock(&inode->i_lock);
2821
2822 if (last && ci->i_vino.snap == CEPH_NOSNAP)
2823 ceph_check_caps(ci, 0, NULL);
2824}
2825
2826/*
2827 * Helpers for embedding cap and dentry lease releases into mds
2828 * requests.
2829 *
2830 * @force is used by dentry_release (below) to force inclusion of a
2831 * record for the directory inode, even when there aren't any caps to
2832 * drop.
2833 */
2834int ceph_encode_inode_release(void **p, struct inode *inode,
2835 int mds, int drop, int unless, int force)
2836{
2837 struct ceph_inode_info *ci = ceph_inode(inode);
2838 struct ceph_cap *cap;
2839 struct ceph_mds_request_release *rel = *p;
2840 int ret = 0;
2841
2842 dout("encode_inode_release %p mds%d drop %s unless %s\n", inode,
2843 mds, ceph_cap_string(drop), ceph_cap_string(unless));
2844
2845 spin_lock(&inode->i_lock);
2846 cap = __get_cap_for_mds(ci, mds);
2847 if (cap && __cap_is_valid(cap)) {
2848 if (force ||
2849 ((cap->issued & drop) &&
2850 (cap->issued & unless) == 0)) {
2851 if ((cap->issued & drop) &&
2852 (cap->issued & unless) == 0) {
2853 dout("encode_inode_release %p cap %p %s -> "
2854 "%s\n", inode, cap,
2855 ceph_cap_string(cap->issued),
2856 ceph_cap_string(cap->issued & ~drop));
2857 cap->issued &= ~drop;
2858 cap->implemented &= ~drop;
2859 if (ci->i_ceph_flags & CEPH_I_NODELAY) {
2860 int wanted = __ceph_caps_wanted(ci);
2861 dout(" wanted %s -> %s (act %s)\n",
2862 ceph_cap_string(cap->mds_wanted),
2863 ceph_cap_string(cap->mds_wanted &
2864 ~wanted),
2865 ceph_cap_string(wanted));
2866 cap->mds_wanted &= wanted;
2867 }
2868 } else {
2869 dout("encode_inode_release %p cap %p %s"
2870 " (force)\n", inode, cap,
2871 ceph_cap_string(cap->issued));
2872 }
2873
2874 rel->ino = cpu_to_le64(ceph_ino(inode));
2875 rel->cap_id = cpu_to_le64(cap->cap_id);
2876 rel->seq = cpu_to_le32(cap->seq);
2877 rel->issue_seq = cpu_to_le32(cap->issue_seq),
2878 rel->mseq = cpu_to_le32(cap->mseq);
2879 rel->caps = cpu_to_le32(cap->issued);
2880 rel->wanted = cpu_to_le32(cap->mds_wanted);
2881 rel->dname_len = 0;
2882 rel->dname_seq = 0;
2883 *p += sizeof(*rel);
2884 ret = 1;
2885 } else {
2886 dout("encode_inode_release %p cap %p %s\n",
2887 inode, cap, ceph_cap_string(cap->issued));
2888 }
2889 }
2890 spin_unlock(&inode->i_lock);
2891 return ret;
2892}
2893
2894int ceph_encode_dentry_release(void **p, struct dentry *dentry,
2895 int mds, int drop, int unless)
2896{
2897 struct inode *dir = dentry->d_parent->d_inode;
2898 struct ceph_mds_request_release *rel = *p;
2899 struct ceph_dentry_info *di = ceph_dentry(dentry);
2900 int force = 0;
2901 int ret;
2902
2903 /*
2904 * force an record for the directory caps if we have a dentry lease.
2905 * this is racy (can't take i_lock and d_lock together), but it
2906 * doesn't have to be perfect; the mds will revoke anything we don't
2907 * release.
2908 */
2909 spin_lock(&dentry->d_lock);
2910 if (di->lease_session && di->lease_session->s_mds == mds)
2911 force = 1;
2912 spin_unlock(&dentry->d_lock);
2913
2914 ret = ceph_encode_inode_release(p, dir, mds, drop, unless, force);
2915
2916 spin_lock(&dentry->d_lock);
2917 if (ret && di->lease_session && di->lease_session->s_mds == mds) {
2918 dout("encode_dentry_release %p mds%d seq %d\n",
2919 dentry, mds, (int)di->lease_seq);
2920 rel->dname_len = cpu_to_le32(dentry->d_name.len);
2921 memcpy(*p, dentry->d_name.name, dentry->d_name.len);
2922 *p += dentry->d_name.len;
2923 rel->dname_seq = cpu_to_le32(di->lease_seq);
2924 }
2925 spin_unlock(&dentry->d_lock);
2926 return ret;
2927}
diff --git a/fs/ceph/ceph_debug.h b/fs/ceph/ceph_debug.h
new file mode 100644
index 000000000000..1818c2305610
--- /dev/null
+++ b/fs/ceph/ceph_debug.h
@@ -0,0 +1,37 @@
1#ifndef _FS_CEPH_DEBUG_H
2#define _FS_CEPH_DEBUG_H
3
4#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
5
6#ifdef CONFIG_CEPH_FS_PRETTYDEBUG
7
8/*
9 * wrap pr_debug to include a filename:lineno prefix on each line.
10 * this incurs some overhead (kernel size and execution time) due to
11 * the extra function call at each call site.
12 */
13
14# if defined(DEBUG) || defined(CONFIG_DYNAMIC_DEBUG)
15extern const char *ceph_file_part(const char *s, int len);
16# define dout(fmt, ...) \
17 pr_debug(" %12.12s:%-4d : " fmt, \
18 ceph_file_part(__FILE__, sizeof(__FILE__)), \
19 __LINE__, ##__VA_ARGS__)
20# else
21/* faux printk call just to see any compiler warnings. */
22# define dout(fmt, ...) do { \
23 if (0) \
24 printk(KERN_DEBUG fmt, ##__VA_ARGS__); \
25 } while (0)
26# endif
27
28#else
29
30/*
31 * or, just wrap pr_debug
32 */
33# define dout(fmt, ...) pr_debug(" " fmt, ##__VA_ARGS__)
34
35#endif
36
37#endif
diff --git a/fs/ceph/ceph_frag.c b/fs/ceph/ceph_frag.c
new file mode 100644
index 000000000000..ab6cf35c4091
--- /dev/null
+++ b/fs/ceph/ceph_frag.c
@@ -0,0 +1,21 @@
1/*
2 * Ceph 'frag' type
3 */
4#include "types.h"
5
6int ceph_frag_compare(__u32 a, __u32 b)
7{
8 unsigned va = ceph_frag_value(a);
9 unsigned vb = ceph_frag_value(b);
10 if (va < vb)
11 return -1;
12 if (va > vb)
13 return 1;
14 va = ceph_frag_bits(a);
15 vb = ceph_frag_bits(b);
16 if (va < vb)
17 return -1;
18 if (va > vb)
19 return 1;
20 return 0;
21}
diff --git a/fs/ceph/ceph_frag.h b/fs/ceph/ceph_frag.h
new file mode 100644
index 000000000000..793f50cb7c22
--- /dev/null
+++ b/fs/ceph/ceph_frag.h
@@ -0,0 +1,109 @@
1#ifndef _FS_CEPH_FRAG_H
2#define _FS_CEPH_FRAG_H
3
4/*
5 * "Frags" are a way to describe a subset of a 32-bit number space,
6 * using a mask and a value to match against that mask. Any given frag
7 * (subset of the number space) can be partitioned into 2^n sub-frags.
8 *
9 * Frags are encoded into a 32-bit word:
10 * 8 upper bits = "bits"
11 * 24 lower bits = "value"
12 * (We could go to 5+27 bits, but who cares.)
13 *
14 * We use the _most_ significant bits of the 24 bit value. This makes
15 * values logically sort.
16 *
17 * Unfortunately, because the "bits" field is still in the high bits, we
18 * can't sort encoded frags numerically. However, it does allow you
19 * to feed encoded frags as values into frag_contains_value.
20 */
21static inline __u32 ceph_frag_make(__u32 b, __u32 v)
22{
23 return (b << 24) |
24 (v & (0xffffffu << (24-b)) & 0xffffffu);
25}
26static inline __u32 ceph_frag_bits(__u32 f)
27{
28 return f >> 24;
29}
30static inline __u32 ceph_frag_value(__u32 f)
31{
32 return f & 0xffffffu;
33}
34static inline __u32 ceph_frag_mask(__u32 f)
35{
36 return (0xffffffu << (24-ceph_frag_bits(f))) & 0xffffffu;
37}
38static inline __u32 ceph_frag_mask_shift(__u32 f)
39{
40 return 24 - ceph_frag_bits(f);
41}
42
43static inline int ceph_frag_contains_value(__u32 f, __u32 v)
44{
45 return (v & ceph_frag_mask(f)) == ceph_frag_value(f);
46}
47static inline int ceph_frag_contains_frag(__u32 f, __u32 sub)
48{
49 /* is sub as specific as us, and contained by us? */
50 return ceph_frag_bits(sub) >= ceph_frag_bits(f) &&
51 (ceph_frag_value(sub) & ceph_frag_mask(f)) == ceph_frag_value(f);
52}
53
54static inline __u32 ceph_frag_parent(__u32 f)
55{
56 return ceph_frag_make(ceph_frag_bits(f) - 1,
57 ceph_frag_value(f) & (ceph_frag_mask(f) << 1));
58}
59static inline int ceph_frag_is_left_child(__u32 f)
60{
61 return ceph_frag_bits(f) > 0 &&
62 (ceph_frag_value(f) & (0x1000000 >> ceph_frag_bits(f))) == 0;
63}
64static inline int ceph_frag_is_right_child(__u32 f)
65{
66 return ceph_frag_bits(f) > 0 &&
67 (ceph_frag_value(f) & (0x1000000 >> ceph_frag_bits(f))) == 1;
68}
69static inline __u32 ceph_frag_sibling(__u32 f)
70{
71 return ceph_frag_make(ceph_frag_bits(f),
72 ceph_frag_value(f) ^ (0x1000000 >> ceph_frag_bits(f)));
73}
74static inline __u32 ceph_frag_left_child(__u32 f)
75{
76 return ceph_frag_make(ceph_frag_bits(f)+1, ceph_frag_value(f));
77}
78static inline __u32 ceph_frag_right_child(__u32 f)
79{
80 return ceph_frag_make(ceph_frag_bits(f)+1,
81 ceph_frag_value(f) | (0x1000000 >> (1+ceph_frag_bits(f))));
82}
83static inline __u32 ceph_frag_make_child(__u32 f, int by, int i)
84{
85 int newbits = ceph_frag_bits(f) + by;
86 return ceph_frag_make(newbits,
87 ceph_frag_value(f) | (i << (24 - newbits)));
88}
89static inline int ceph_frag_is_leftmost(__u32 f)
90{
91 return ceph_frag_value(f) == 0;
92}
93static inline int ceph_frag_is_rightmost(__u32 f)
94{
95 return ceph_frag_value(f) == ceph_frag_mask(f);
96}
97static inline __u32 ceph_frag_next(__u32 f)
98{
99 return ceph_frag_make(ceph_frag_bits(f),
100 ceph_frag_value(f) + (0x1000000 >> ceph_frag_bits(f)));
101}
102
103/*
104 * comparator to sort frags logically, as when traversing the
105 * number space in ascending order...
106 */
107int ceph_frag_compare(__u32 a, __u32 b);
108
109#endif
diff --git a/fs/ceph/ceph_fs.c b/fs/ceph/ceph_fs.c
new file mode 100644
index 000000000000..79d76bc4303f
--- /dev/null
+++ b/fs/ceph/ceph_fs.c
@@ -0,0 +1,74 @@
1/*
2 * Some non-inline ceph helpers
3 */
4#include "types.h"
5
6/*
7 * return true if @layout appears to be valid
8 */
9int ceph_file_layout_is_valid(const struct ceph_file_layout *layout)
10{
11 __u32 su = le32_to_cpu(layout->fl_stripe_unit);
12 __u32 sc = le32_to_cpu(layout->fl_stripe_count);
13 __u32 os = le32_to_cpu(layout->fl_object_size);
14
15 /* stripe unit, object size must be non-zero, 64k increment */
16 if (!su || (su & (CEPH_MIN_STRIPE_UNIT-1)))
17 return 0;
18 if (!os || (os & (CEPH_MIN_STRIPE_UNIT-1)))
19 return 0;
20 /* object size must be a multiple of stripe unit */
21 if (os < su || os % su)
22 return 0;
23 /* stripe count must be non-zero */
24 if (!sc)
25 return 0;
26 return 1;
27}
28
29
30int ceph_flags_to_mode(int flags)
31{
32#ifdef O_DIRECTORY /* fixme */
33 if ((flags & O_DIRECTORY) == O_DIRECTORY)
34 return CEPH_FILE_MODE_PIN;
35#endif
36#ifdef O_LAZY
37 if (flags & O_LAZY)
38 return CEPH_FILE_MODE_LAZY;
39#endif
40 if ((flags & O_APPEND) == O_APPEND)
41 flags |= O_WRONLY;
42
43 flags &= O_ACCMODE;
44 if ((flags & O_RDWR) == O_RDWR)
45 return CEPH_FILE_MODE_RDWR;
46 if ((flags & O_WRONLY) == O_WRONLY)
47 return CEPH_FILE_MODE_WR;
48 return CEPH_FILE_MODE_RD;
49}
50
51int ceph_caps_for_mode(int mode)
52{
53 switch (mode) {
54 case CEPH_FILE_MODE_PIN:
55 return CEPH_CAP_PIN;
56 case CEPH_FILE_MODE_RD:
57 return CEPH_CAP_PIN | CEPH_CAP_FILE_SHARED |
58 CEPH_CAP_FILE_RD | CEPH_CAP_FILE_CACHE;
59 case CEPH_FILE_MODE_RDWR:
60 return CEPH_CAP_PIN | CEPH_CAP_FILE_SHARED |
61 CEPH_CAP_FILE_EXCL |
62 CEPH_CAP_FILE_RD | CEPH_CAP_FILE_CACHE |
63 CEPH_CAP_FILE_WR | CEPH_CAP_FILE_BUFFER |
64 CEPH_CAP_AUTH_SHARED | CEPH_CAP_AUTH_EXCL |
65 CEPH_CAP_XATTR_SHARED | CEPH_CAP_XATTR_EXCL;
66 case CEPH_FILE_MODE_WR:
67 return CEPH_CAP_PIN | CEPH_CAP_FILE_SHARED |
68 CEPH_CAP_FILE_EXCL |
69 CEPH_CAP_FILE_WR | CEPH_CAP_FILE_BUFFER |
70 CEPH_CAP_AUTH_SHARED | CEPH_CAP_AUTH_EXCL |
71 CEPH_CAP_XATTR_SHARED | CEPH_CAP_XATTR_EXCL;
72 }
73 return 0;
74}
diff --git a/fs/ceph/ceph_fs.h b/fs/ceph/ceph_fs.h
new file mode 100644
index 000000000000..0c2241ef3653
--- /dev/null
+++ b/fs/ceph/ceph_fs.h
@@ -0,0 +1,650 @@
1/*
2 * ceph_fs.h - Ceph constants and data types to share between kernel and
3 * user space.
4 *
5 * Most types in this file are defined as little-endian, and are
6 * primarily intended to describe data structures that pass over the
7 * wire or that are stored on disk.
8 *
9 * LGPL2
10 */
11
12#ifndef _FS_CEPH_CEPH_FS_H
13#define _FS_CEPH_CEPH_FS_H
14
15#include "msgr.h"
16#include "rados.h"
17
18/*
19 * Ceph release version
20 */
21#define CEPH_VERSION_MAJOR 0
22#define CEPH_VERSION_MINOR 19
23#define CEPH_VERSION_PATCH 0
24
25#define _CEPH_STRINGIFY(x) #x
26#define CEPH_STRINGIFY(x) _CEPH_STRINGIFY(x)
27#define CEPH_MAKE_VERSION(x, y, z) CEPH_STRINGIFY(x) "." CEPH_STRINGIFY(y) \
28 "." CEPH_STRINGIFY(z)
29#define CEPH_VERSION CEPH_MAKE_VERSION(CEPH_VERSION_MAJOR, \
30 CEPH_VERSION_MINOR, CEPH_VERSION_PATCH)
31
32/*
33 * subprotocol versions. when specific messages types or high-level
34 * protocols change, bump the affected components. we keep rev
35 * internal cluster protocols separately from the public,
36 * client-facing protocol.
37 */
38#define CEPH_OSD_PROTOCOL 8 /* cluster internal */
39#define CEPH_MDS_PROTOCOL 9 /* cluster internal */
40#define CEPH_MON_PROTOCOL 5 /* cluster internal */
41#define CEPH_OSDC_PROTOCOL 24 /* server/client */
42#define CEPH_MDSC_PROTOCOL 32 /* server/client */
43#define CEPH_MONC_PROTOCOL 15 /* server/client */
44
45
46#define CEPH_INO_ROOT 1
47#define CEPH_INO_CEPH 2 /* hidden .ceph dir */
48
49/* arbitrary limit on max # of monitors (cluster of 3 is typical) */
50#define CEPH_MAX_MON 31
51
52
53/*
54 * feature bits
55 */
56#define CEPH_FEATURE_SUPPORTED 0
57#define CEPH_FEATURE_REQUIRED 0
58
59
60/*
61 * ceph_file_layout - describe data layout for a file/inode
62 */
63struct ceph_file_layout {
64 /* file -> object mapping */
65 __le32 fl_stripe_unit; /* stripe unit, in bytes. must be multiple
66 of page size. */
67 __le32 fl_stripe_count; /* over this many objects */
68 __le32 fl_object_size; /* until objects are this big, then move to
69 new objects */
70 __le32 fl_cas_hash; /* 0 = none; 1 = sha256 */
71
72 /* pg -> disk layout */
73 __le32 fl_object_stripe_unit; /* for per-object parity, if any */
74
75 /* object -> pg layout */
76 __le32 fl_pg_preferred; /* preferred primary for pg (-1 for none) */
77 __le32 fl_pg_pool; /* namespace, crush ruleset, rep level */
78} __attribute__ ((packed));
79
80#define CEPH_MIN_STRIPE_UNIT 65536
81
82int ceph_file_layout_is_valid(const struct ceph_file_layout *layout);
83
84
85/* crypto algorithms */
86#define CEPH_CRYPTO_NONE 0x0
87#define CEPH_CRYPTO_AES 0x1
88
89/* security/authentication protocols */
90#define CEPH_AUTH_UNKNOWN 0x0
91#define CEPH_AUTH_NONE 0x1
92#define CEPH_AUTH_CEPHX 0x2
93
94
95/*********************************************
96 * message layer
97 */
98
99/*
100 * message types
101 */
102
103/* misc */
104#define CEPH_MSG_SHUTDOWN 1
105#define CEPH_MSG_PING 2
106
107/* client <-> monitor */
108#define CEPH_MSG_MON_MAP 4
109#define CEPH_MSG_MON_GET_MAP 5
110#define CEPH_MSG_STATFS 13
111#define CEPH_MSG_STATFS_REPLY 14
112#define CEPH_MSG_MON_SUBSCRIBE 15
113#define CEPH_MSG_MON_SUBSCRIBE_ACK 16
114#define CEPH_MSG_AUTH 17
115#define CEPH_MSG_AUTH_REPLY 18
116
117/* client <-> mds */
118#define CEPH_MSG_MDS_MAP 21
119
120#define CEPH_MSG_CLIENT_SESSION 22
121#define CEPH_MSG_CLIENT_RECONNECT 23
122
123#define CEPH_MSG_CLIENT_REQUEST 24
124#define CEPH_MSG_CLIENT_REQUEST_FORWARD 25
125#define CEPH_MSG_CLIENT_REPLY 26
126#define CEPH_MSG_CLIENT_CAPS 0x310
127#define CEPH_MSG_CLIENT_LEASE 0x311
128#define CEPH_MSG_CLIENT_SNAP 0x312
129#define CEPH_MSG_CLIENT_CAPRELEASE 0x313
130
131/* osd */
132#define CEPH_MSG_OSD_MAP 41
133#define CEPH_MSG_OSD_OP 42
134#define CEPH_MSG_OSD_OPREPLY 43
135
136struct ceph_mon_request_header {
137 __le64 have_version;
138 __le16 session_mon;
139 __le64 session_mon_tid;
140} __attribute__ ((packed));
141
142struct ceph_mon_statfs {
143 struct ceph_mon_request_header monhdr;
144 struct ceph_fsid fsid;
145} __attribute__ ((packed));
146
147struct ceph_statfs {
148 __le64 kb, kb_used, kb_avail;
149 __le64 num_objects;
150} __attribute__ ((packed));
151
152struct ceph_mon_statfs_reply {
153 struct ceph_fsid fsid;
154 __le64 version;
155 struct ceph_statfs st;
156} __attribute__ ((packed));
157
158struct ceph_osd_getmap {
159 struct ceph_mon_request_header monhdr;
160 struct ceph_fsid fsid;
161 __le32 start;
162} __attribute__ ((packed));
163
164struct ceph_mds_getmap {
165 struct ceph_mon_request_header monhdr;
166 struct ceph_fsid fsid;
167} __attribute__ ((packed));
168
169struct ceph_client_mount {
170 struct ceph_mon_request_header monhdr;
171} __attribute__ ((packed));
172
173struct ceph_mon_subscribe_item {
174 __le64 have_version; __le64 have;
175 __u8 onetime;
176} __attribute__ ((packed));
177
178struct ceph_mon_subscribe_ack {
179 __le32 duration; /* seconds */
180 struct ceph_fsid fsid;
181} __attribute__ ((packed));
182
183/*
184 * mds states
185 * > 0 -> in
186 * <= 0 -> out
187 */
188#define CEPH_MDS_STATE_DNE 0 /* down, does not exist. */
189#define CEPH_MDS_STATE_STOPPED -1 /* down, once existed, but no subtrees.
190 empty log. */
191#define CEPH_MDS_STATE_BOOT -4 /* up, boot announcement. */
192#define CEPH_MDS_STATE_STANDBY -5 /* up, idle. waiting for assignment. */
193#define CEPH_MDS_STATE_CREATING -6 /* up, creating MDS instance. */
194#define CEPH_MDS_STATE_STARTING -7 /* up, starting previously stopped mds */
195#define CEPH_MDS_STATE_STANDBY_REPLAY -8 /* up, tailing active node's journal */
196
197#define CEPH_MDS_STATE_REPLAY 8 /* up, replaying journal. */
198#define CEPH_MDS_STATE_RESOLVE 9 /* up, disambiguating distributed
199 operations (import, rename, etc.) */
200#define CEPH_MDS_STATE_RECONNECT 10 /* up, reconnect to clients */
201#define CEPH_MDS_STATE_REJOIN 11 /* up, rejoining distributed cache */
202#define CEPH_MDS_STATE_CLIENTREPLAY 12 /* up, replaying client operations */
203#define CEPH_MDS_STATE_ACTIVE 13 /* up, active */
204#define CEPH_MDS_STATE_STOPPING 14 /* up, but exporting metadata */
205
206extern const char *ceph_mds_state_name(int s);
207
208
209/*
210 * metadata lock types.
211 * - these are bitmasks.. we can compose them
212 * - they also define the lock ordering by the MDS
213 * - a few of these are internal to the mds
214 */
215#define CEPH_LOCK_DN 1
216#define CEPH_LOCK_ISNAP 2
217#define CEPH_LOCK_IVERSION 4 /* mds internal */
218#define CEPH_LOCK_IFILE 8 /* mds internal */
219#define CEPH_LOCK_IAUTH 32
220#define CEPH_LOCK_ILINK 64
221#define CEPH_LOCK_IDFT 128 /* dir frag tree */
222#define CEPH_LOCK_INEST 256 /* mds internal */
223#define CEPH_LOCK_IXATTR 512
224#define CEPH_LOCK_INO 2048 /* immutable inode bits; not a lock */
225
226/* client_session ops */
227enum {
228 CEPH_SESSION_REQUEST_OPEN,
229 CEPH_SESSION_OPEN,
230 CEPH_SESSION_REQUEST_CLOSE,
231 CEPH_SESSION_CLOSE,
232 CEPH_SESSION_REQUEST_RENEWCAPS,
233 CEPH_SESSION_RENEWCAPS,
234 CEPH_SESSION_STALE,
235 CEPH_SESSION_RECALL_STATE,
236};
237
238extern const char *ceph_session_op_name(int op);
239
240struct ceph_mds_session_head {
241 __le32 op;
242 __le64 seq;
243 struct ceph_timespec stamp;
244 __le32 max_caps, max_leases;
245} __attribute__ ((packed));
246
247/* client_request */
248/*
249 * metadata ops.
250 * & 0x001000 -> write op
251 * & 0x010000 -> follow symlink (e.g. stat(), not lstat()).
252 & & 0x100000 -> use weird ino/path trace
253 */
254#define CEPH_MDS_OP_WRITE 0x001000
255enum {
256 CEPH_MDS_OP_LOOKUP = 0x00100,
257 CEPH_MDS_OP_GETATTR = 0x00101,
258 CEPH_MDS_OP_LOOKUPHASH = 0x00102,
259 CEPH_MDS_OP_LOOKUPPARENT = 0x00103,
260
261 CEPH_MDS_OP_SETXATTR = 0x01105,
262 CEPH_MDS_OP_RMXATTR = 0x01106,
263 CEPH_MDS_OP_SETLAYOUT = 0x01107,
264 CEPH_MDS_OP_SETATTR = 0x01108,
265
266 CEPH_MDS_OP_MKNOD = 0x01201,
267 CEPH_MDS_OP_LINK = 0x01202,
268 CEPH_MDS_OP_UNLINK = 0x01203,
269 CEPH_MDS_OP_RENAME = 0x01204,
270 CEPH_MDS_OP_MKDIR = 0x01220,
271 CEPH_MDS_OP_RMDIR = 0x01221,
272 CEPH_MDS_OP_SYMLINK = 0x01222,
273
274 CEPH_MDS_OP_CREATE = 0x01301,
275 CEPH_MDS_OP_OPEN = 0x00302,
276 CEPH_MDS_OP_READDIR = 0x00305,
277
278 CEPH_MDS_OP_LOOKUPSNAP = 0x00400,
279 CEPH_MDS_OP_MKSNAP = 0x01400,
280 CEPH_MDS_OP_RMSNAP = 0x01401,
281 CEPH_MDS_OP_LSSNAP = 0x00402,
282};
283
284extern const char *ceph_mds_op_name(int op);
285
286
287#define CEPH_SETATTR_MODE 1
288#define CEPH_SETATTR_UID 2
289#define CEPH_SETATTR_GID 4
290#define CEPH_SETATTR_MTIME 8
291#define CEPH_SETATTR_ATIME 16
292#define CEPH_SETATTR_SIZE 32
293#define CEPH_SETATTR_CTIME 64
294
295union ceph_mds_request_args {
296 struct {
297 __le32 mask; /* CEPH_CAP_* */
298 } __attribute__ ((packed)) getattr;
299 struct {
300 __le32 mode;
301 __le32 uid;
302 __le32 gid;
303 struct ceph_timespec mtime;
304 struct ceph_timespec atime;
305 __le64 size, old_size; /* old_size needed by truncate */
306 __le32 mask; /* CEPH_SETATTR_* */
307 } __attribute__ ((packed)) setattr;
308 struct {
309 __le32 frag; /* which dir fragment */
310 __le32 max_entries; /* how many dentries to grab */
311 } __attribute__ ((packed)) readdir;
312 struct {
313 __le32 mode;
314 __le32 rdev;
315 } __attribute__ ((packed)) mknod;
316 struct {
317 __le32 mode;
318 } __attribute__ ((packed)) mkdir;
319 struct {
320 __le32 flags;
321 __le32 mode;
322 __le32 stripe_unit; /* layout for newly created file */
323 __le32 stripe_count; /* ... */
324 __le32 object_size;
325 __le32 file_replication;
326 __le32 preferred;
327 } __attribute__ ((packed)) open;
328 struct {
329 __le32 flags;
330 } __attribute__ ((packed)) setxattr;
331 struct {
332 struct ceph_file_layout layout;
333 } __attribute__ ((packed)) setlayout;
334} __attribute__ ((packed));
335
336#define CEPH_MDS_FLAG_REPLAY 1 /* this is a replayed op */
337#define CEPH_MDS_FLAG_WANT_DENTRY 2 /* want dentry in reply */
338
339struct ceph_mds_request_head {
340 __le64 oldest_client_tid;
341 __le32 mdsmap_epoch; /* on client */
342 __le32 flags; /* CEPH_MDS_FLAG_* */
343 __u8 num_retry, num_fwd; /* count retry, fwd attempts */
344 __le16 num_releases; /* # include cap/lease release records */
345 __le32 op; /* mds op code */
346 __le32 caller_uid, caller_gid;
347 __le64 ino; /* use this ino for openc, mkdir, mknod,
348 etc. (if replaying) */
349 union ceph_mds_request_args args;
350} __attribute__ ((packed));
351
352/* cap/lease release record */
353struct ceph_mds_request_release {
354 __le64 ino, cap_id; /* ino and unique cap id */
355 __le32 caps, wanted; /* new issued, wanted */
356 __le32 seq, issue_seq, mseq;
357 __le32 dname_seq; /* if releasing a dentry lease, a */
358 __le32 dname_len; /* string follows. */
359} __attribute__ ((packed));
360
361/* client reply */
362struct ceph_mds_reply_head {
363 __le32 op;
364 __le32 result;
365 __le32 mdsmap_epoch;
366 __u8 safe; /* true if committed to disk */
367 __u8 is_dentry, is_target; /* true if dentry, target inode records
368 are included with reply */
369} __attribute__ ((packed));
370
371/* one for each node split */
372struct ceph_frag_tree_split {
373 __le32 frag; /* this frag splits... */
374 __le32 by; /* ...by this many bits */
375} __attribute__ ((packed));
376
377struct ceph_frag_tree_head {
378 __le32 nsplits; /* num ceph_frag_tree_split records */
379 struct ceph_frag_tree_split splits[];
380} __attribute__ ((packed));
381
382/* capability issue, for bundling with mds reply */
383struct ceph_mds_reply_cap {
384 __le32 caps, wanted; /* caps issued, wanted */
385 __le64 cap_id;
386 __le32 seq, mseq;
387 __le64 realm; /* snap realm */
388 __u8 flags; /* CEPH_CAP_FLAG_* */
389} __attribute__ ((packed));
390
391#define CEPH_CAP_FLAG_AUTH 1 /* cap is issued by auth mds */
392
393/* inode record, for bundling with mds reply */
394struct ceph_mds_reply_inode {
395 __le64 ino;
396 __le64 snapid;
397 __le32 rdev;
398 __le64 version; /* inode version */
399 __le64 xattr_version; /* version for xattr blob */
400 struct ceph_mds_reply_cap cap; /* caps issued for this inode */
401 struct ceph_file_layout layout;
402 struct ceph_timespec ctime, mtime, atime;
403 __le32 time_warp_seq;
404 __le64 size, max_size, truncate_size;
405 __le32 truncate_seq;
406 __le32 mode, uid, gid;
407 __le32 nlink;
408 __le64 files, subdirs, rbytes, rfiles, rsubdirs; /* dir stats */
409 struct ceph_timespec rctime;
410 struct ceph_frag_tree_head fragtree; /* (must be at end of struct) */
411} __attribute__ ((packed));
412/* followed by frag array, then symlink string, then xattr blob */
413
414/* reply_lease follows dname, and reply_inode */
415struct ceph_mds_reply_lease {
416 __le16 mask; /* lease type(s) */
417 __le32 duration_ms; /* lease duration */
418 __le32 seq;
419} __attribute__ ((packed));
420
421struct ceph_mds_reply_dirfrag {
422 __le32 frag; /* fragment */
423 __le32 auth; /* auth mds, if this is a delegation point */
424 __le32 ndist; /* number of mds' this is replicated on */
425 __le32 dist[];
426} __attribute__ ((packed));
427
428/* file access modes */
429#define CEPH_FILE_MODE_PIN 0
430#define CEPH_FILE_MODE_RD 1
431#define CEPH_FILE_MODE_WR 2
432#define CEPH_FILE_MODE_RDWR 3 /* RD | WR */
433#define CEPH_FILE_MODE_LAZY 4 /* lazy io */
434#define CEPH_FILE_MODE_NUM 8 /* bc these are bit fields.. mostly */
435
436int ceph_flags_to_mode(int flags);
437
438
439/* capability bits */
440#define CEPH_CAP_PIN 1 /* no specific capabilities beyond the pin */
441
442/* generic cap bits */
443#define CEPH_CAP_GSHARED 1 /* client can reads */
444#define CEPH_CAP_GEXCL 2 /* client can read and update */
445#define CEPH_CAP_GCACHE 4 /* (file) client can cache reads */
446#define CEPH_CAP_GRD 8 /* (file) client can read */
447#define CEPH_CAP_GWR 16 /* (file) client can write */
448#define CEPH_CAP_GBUFFER 32 /* (file) client can buffer writes */
449#define CEPH_CAP_GWREXTEND 64 /* (file) client can extend EOF */
450#define CEPH_CAP_GLAZYIO 128 /* (file) client can perform lazy io */
451
452/* per-lock shift */
453#define CEPH_CAP_SAUTH 2
454#define CEPH_CAP_SLINK 4
455#define CEPH_CAP_SXATTR 6
456#define CEPH_CAP_SFILE 8 /* goes at the end (uses >2 cap bits) */
457
458#define CEPH_CAP_BITS 16
459
460/* composed values */
461#define CEPH_CAP_AUTH_SHARED (CEPH_CAP_GSHARED << CEPH_CAP_SAUTH)
462#define CEPH_CAP_AUTH_EXCL (CEPH_CAP_GEXCL << CEPH_CAP_SAUTH)
463#define CEPH_CAP_LINK_SHARED (CEPH_CAP_GSHARED << CEPH_CAP_SLINK)
464#define CEPH_CAP_LINK_EXCL (CEPH_CAP_GEXCL << CEPH_CAP_SLINK)
465#define CEPH_CAP_XATTR_SHARED (CEPH_CAP_GSHARED << CEPH_CAP_SXATTR)
466#define CEPH_CAP_XATTR_EXCL (CEPH_CAP_GEXCL << CEPH_CAP_SXATTR)
467#define CEPH_CAP_FILE(x) (x << CEPH_CAP_SFILE)
468#define CEPH_CAP_FILE_SHARED (CEPH_CAP_GSHARED << CEPH_CAP_SFILE)
469#define CEPH_CAP_FILE_EXCL (CEPH_CAP_GEXCL << CEPH_CAP_SFILE)
470#define CEPH_CAP_FILE_CACHE (CEPH_CAP_GCACHE << CEPH_CAP_SFILE)
471#define CEPH_CAP_FILE_RD (CEPH_CAP_GRD << CEPH_CAP_SFILE)
472#define CEPH_CAP_FILE_WR (CEPH_CAP_GWR << CEPH_CAP_SFILE)
473#define CEPH_CAP_FILE_BUFFER (CEPH_CAP_GBUFFER << CEPH_CAP_SFILE)
474#define CEPH_CAP_FILE_WREXTEND (CEPH_CAP_GWREXTEND << CEPH_CAP_SFILE)
475#define CEPH_CAP_FILE_LAZYIO (CEPH_CAP_GLAZYIO << CEPH_CAP_SFILE)
476
477/* cap masks (for getattr) */
478#define CEPH_STAT_CAP_INODE CEPH_CAP_PIN
479#define CEPH_STAT_CAP_TYPE CEPH_CAP_PIN /* mode >> 12 */
480#define CEPH_STAT_CAP_SYMLINK CEPH_CAP_PIN
481#define CEPH_STAT_CAP_UID CEPH_CAP_AUTH_SHARED
482#define CEPH_STAT_CAP_GID CEPH_CAP_AUTH_SHARED
483#define CEPH_STAT_CAP_MODE CEPH_CAP_AUTH_SHARED
484#define CEPH_STAT_CAP_NLINK CEPH_CAP_LINK_SHARED
485#define CEPH_STAT_CAP_LAYOUT CEPH_CAP_FILE_SHARED
486#define CEPH_STAT_CAP_MTIME CEPH_CAP_FILE_SHARED
487#define CEPH_STAT_CAP_SIZE CEPH_CAP_FILE_SHARED
488#define CEPH_STAT_CAP_ATIME CEPH_CAP_FILE_SHARED /* fixme */
489#define CEPH_STAT_CAP_XATTR CEPH_CAP_XATTR_SHARED
490#define CEPH_STAT_CAP_INODE_ALL (CEPH_CAP_PIN | \
491 CEPH_CAP_AUTH_SHARED | \
492 CEPH_CAP_LINK_SHARED | \
493 CEPH_CAP_FILE_SHARED | \
494 CEPH_CAP_XATTR_SHARED)
495
496#define CEPH_CAP_ANY_SHARED (CEPH_CAP_AUTH_SHARED | \
497 CEPH_CAP_LINK_SHARED | \
498 CEPH_CAP_XATTR_SHARED | \
499 CEPH_CAP_FILE_SHARED)
500#define CEPH_CAP_ANY_RD (CEPH_CAP_ANY_SHARED | CEPH_CAP_FILE_RD | \
501 CEPH_CAP_FILE_CACHE)
502
503#define CEPH_CAP_ANY_EXCL (CEPH_CAP_AUTH_EXCL | \
504 CEPH_CAP_LINK_EXCL | \
505 CEPH_CAP_XATTR_EXCL | \
506 CEPH_CAP_FILE_EXCL)
507#define CEPH_CAP_ANY_FILE_WR (CEPH_CAP_FILE_WR | CEPH_CAP_FILE_BUFFER | \
508 CEPH_CAP_FILE_EXCL)
509#define CEPH_CAP_ANY_WR (CEPH_CAP_ANY_EXCL | CEPH_CAP_ANY_FILE_WR)
510#define CEPH_CAP_ANY (CEPH_CAP_ANY_RD | CEPH_CAP_ANY_EXCL | \
511 CEPH_CAP_ANY_FILE_WR | CEPH_CAP_PIN)
512
513#define CEPH_CAP_LOCKS (CEPH_LOCK_IFILE | CEPH_LOCK_IAUTH | CEPH_LOCK_ILINK | \
514 CEPH_LOCK_IXATTR)
515
516int ceph_caps_for_mode(int mode);
517
518enum {
519 CEPH_CAP_OP_GRANT, /* mds->client grant */
520 CEPH_CAP_OP_REVOKE, /* mds->client revoke */
521 CEPH_CAP_OP_TRUNC, /* mds->client trunc notify */
522 CEPH_CAP_OP_EXPORT, /* mds has exported the cap */
523 CEPH_CAP_OP_IMPORT, /* mds has imported the cap */
524 CEPH_CAP_OP_UPDATE, /* client->mds update */
525 CEPH_CAP_OP_DROP, /* client->mds drop cap bits */
526 CEPH_CAP_OP_FLUSH, /* client->mds cap writeback */
527 CEPH_CAP_OP_FLUSH_ACK, /* mds->client flushed */
528 CEPH_CAP_OP_FLUSHSNAP, /* client->mds flush snapped metadata */
529 CEPH_CAP_OP_FLUSHSNAP_ACK, /* mds->client flushed snapped metadata */
530 CEPH_CAP_OP_RELEASE, /* client->mds release (clean) cap */
531 CEPH_CAP_OP_RENEW, /* client->mds renewal request */
532};
533
534extern const char *ceph_cap_op_name(int op);
535
536/*
537 * caps message, used for capability callbacks, acks, requests, etc.
538 */
539struct ceph_mds_caps {
540 __le32 op; /* CEPH_CAP_OP_* */
541 __le64 ino, realm;
542 __le64 cap_id;
543 __le32 seq, issue_seq;
544 __le32 caps, wanted, dirty; /* latest issued/wanted/dirty */
545 __le32 migrate_seq;
546 __le64 snap_follows;
547 __le32 snap_trace_len;
548
549 /* authlock */
550 __le32 uid, gid, mode;
551
552 /* linklock */
553 __le32 nlink;
554
555 /* xattrlock */
556 __le32 xattr_len;
557 __le64 xattr_version;
558
559 /* filelock */
560 __le64 size, max_size, truncate_size;
561 __le32 truncate_seq;
562 struct ceph_timespec mtime, atime, ctime;
563 struct ceph_file_layout layout;
564 __le32 time_warp_seq;
565} __attribute__ ((packed));
566
567/* cap release msg head */
568struct ceph_mds_cap_release {
569 __le32 num; /* number of cap_items that follow */
570} __attribute__ ((packed));
571
572struct ceph_mds_cap_item {
573 __le64 ino;
574 __le64 cap_id;
575 __le32 migrate_seq, seq;
576} __attribute__ ((packed));
577
578#define CEPH_MDS_LEASE_REVOKE 1 /* mds -> client */
579#define CEPH_MDS_LEASE_RELEASE 2 /* client -> mds */
580#define CEPH_MDS_LEASE_RENEW 3 /* client <-> mds */
581#define CEPH_MDS_LEASE_REVOKE_ACK 4 /* client -> mds */
582
583extern const char *ceph_lease_op_name(int o);
584
585/* lease msg header */
586struct ceph_mds_lease {
587 __u8 action; /* CEPH_MDS_LEASE_* */
588 __le16 mask; /* which lease */
589 __le64 ino;
590 __le64 first, last; /* snap range */
591 __le32 seq;
592 __le32 duration_ms; /* duration of renewal */
593} __attribute__ ((packed));
594/* followed by a __le32+string for dname */
595
596/* client reconnect */
597struct ceph_mds_cap_reconnect {
598 __le64 cap_id;
599 __le32 wanted;
600 __le32 issued;
601 __le64 size;
602 struct ceph_timespec mtime, atime;
603 __le64 snaprealm;
604 __le64 pathbase; /* base ino for our path to this ino */
605} __attribute__ ((packed));
606/* followed by encoded string */
607
608struct ceph_mds_snaprealm_reconnect {
609 __le64 ino; /* snap realm base */
610 __le64 seq; /* snap seq for this snap realm */
611 __le64 parent; /* parent realm */
612} __attribute__ ((packed));
613
614/*
615 * snaps
616 */
617enum {
618 CEPH_SNAP_OP_UPDATE, /* CREATE or DESTROY */
619 CEPH_SNAP_OP_CREATE,
620 CEPH_SNAP_OP_DESTROY,
621 CEPH_SNAP_OP_SPLIT,
622};
623
624extern const char *ceph_snap_op_name(int o);
625
626/* snap msg header */
627struct ceph_mds_snap_head {
628 __le32 op; /* CEPH_SNAP_OP_* */
629 __le64 split; /* ino to split off, if any */
630 __le32 num_split_inos; /* # inos belonging to new child realm */
631 __le32 num_split_realms; /* # child realms udner new child realm */
632 __le32 trace_len; /* size of snap trace blob */
633} __attribute__ ((packed));
634/* followed by split ino list, then split realms, then the trace blob */
635
636/*
637 * encode info about a snaprealm, as viewed by a client
638 */
639struct ceph_mds_snap_realm {
640 __le64 ino; /* ino */
641 __le64 created; /* snap: when created */
642 __le64 parent; /* ino: parent realm */
643 __le64 parent_since; /* snap: same parent since */
644 __le64 seq; /* snap: version */
645 __le32 num_snaps;
646 __le32 num_prior_parent_snaps;
647} __attribute__ ((packed));
648/* followed by my snap list, then prior parent snap list */
649
650#endif
diff --git a/fs/ceph/ceph_hash.c b/fs/ceph/ceph_hash.c
new file mode 100644
index 000000000000..bd570015d147
--- /dev/null
+++ b/fs/ceph/ceph_hash.c
@@ -0,0 +1,118 @@
1
2#include "types.h"
3
4/*
5 * Robert Jenkin's hash function.
6 * http://burtleburtle.net/bob/hash/evahash.html
7 * This is in the public domain.
8 */
9#define mix(a, b, c) \
10 do { \
11 a = a - b; a = a - c; a = a ^ (c >> 13); \
12 b = b - c; b = b - a; b = b ^ (a << 8); \
13 c = c - a; c = c - b; c = c ^ (b >> 13); \
14 a = a - b; a = a - c; a = a ^ (c >> 12); \
15 b = b - c; b = b - a; b = b ^ (a << 16); \
16 c = c - a; c = c - b; c = c ^ (b >> 5); \
17 a = a - b; a = a - c; a = a ^ (c >> 3); \
18 b = b - c; b = b - a; b = b ^ (a << 10); \
19 c = c - a; c = c - b; c = c ^ (b >> 15); \
20 } while (0)
21
22unsigned ceph_str_hash_rjenkins(const char *str, unsigned length)
23{
24 const unsigned char *k = (const unsigned char *)str;
25 __u32 a, b, c; /* the internal state */
26 __u32 len; /* how many key bytes still need mixing */
27
28 /* Set up the internal state */
29 len = length;
30 a = 0x9e3779b9; /* the golden ratio; an arbitrary value */
31 b = a;
32 c = 0; /* variable initialization of internal state */
33
34 /* handle most of the key */
35 while (len >= 12) {
36 a = a + (k[0] + ((__u32)k[1] << 8) + ((__u32)k[2] << 16) +
37 ((__u32)k[3] << 24));
38 b = b + (k[4] + ((__u32)k[5] << 8) + ((__u32)k[6] << 16) +
39 ((__u32)k[7] << 24));
40 c = c + (k[8] + ((__u32)k[9] << 8) + ((__u32)k[10] << 16) +
41 ((__u32)k[11] << 24));
42 mix(a, b, c);
43 k = k + 12;
44 len = len - 12;
45 }
46
47 /* handle the last 11 bytes */
48 c = c + length;
49 switch (len) { /* all the case statements fall through */
50 case 11:
51 c = c + ((__u32)k[10] << 24);
52 case 10:
53 c = c + ((__u32)k[9] << 16);
54 case 9:
55 c = c + ((__u32)k[8] << 8);
56 /* the first byte of c is reserved for the length */
57 case 8:
58 b = b + ((__u32)k[7] << 24);
59 case 7:
60 b = b + ((__u32)k[6] << 16);
61 case 6:
62 b = b + ((__u32)k[5] << 8);
63 case 5:
64 b = b + k[4];
65 case 4:
66 a = a + ((__u32)k[3] << 24);
67 case 3:
68 a = a + ((__u32)k[2] << 16);
69 case 2:
70 a = a + ((__u32)k[1] << 8);
71 case 1:
72 a = a + k[0];
73 /* case 0: nothing left to add */
74 }
75 mix(a, b, c);
76
77 return c;
78}
79
80/*
81 * linux dcache hash
82 */
83unsigned ceph_str_hash_linux(const char *str, unsigned length)
84{
85 unsigned long hash = 0;
86 unsigned char c;
87
88 while (length--) {
89 c = *str++;
90 hash = (hash + (c << 4) + (c >> 4)) * 11;
91 }
92 return hash;
93}
94
95
96unsigned ceph_str_hash(int type, const char *s, unsigned len)
97{
98 switch (type) {
99 case CEPH_STR_HASH_LINUX:
100 return ceph_str_hash_linux(s, len);
101 case CEPH_STR_HASH_RJENKINS:
102 return ceph_str_hash_rjenkins(s, len);
103 default:
104 return -1;
105 }
106}
107
108const char *ceph_str_hash_name(int type)
109{
110 switch (type) {
111 case CEPH_STR_HASH_LINUX:
112 return "linux";
113 case CEPH_STR_HASH_RJENKINS:
114 return "rjenkins";
115 default:
116 return "unknown";
117 }
118}
diff --git a/fs/ceph/ceph_hash.h b/fs/ceph/ceph_hash.h
new file mode 100644
index 000000000000..5ac470c433c9
--- /dev/null
+++ b/fs/ceph/ceph_hash.h
@@ -0,0 +1,13 @@
1#ifndef _FS_CEPH_HASH_H
2#define _FS_CEPH_HASH_H
3
4#define CEPH_STR_HASH_LINUX 0x1 /* linux dcache hash */
5#define CEPH_STR_HASH_RJENKINS 0x2 /* robert jenkins' */
6
7extern unsigned ceph_str_hash_linux(const char *s, unsigned len);
8extern unsigned ceph_str_hash_rjenkins(const char *s, unsigned len);
9
10extern unsigned ceph_str_hash(int type, const char *s, unsigned len);
11extern const char *ceph_str_hash_name(int type);
12
13#endif
diff --git a/fs/ceph/ceph_strings.c b/fs/ceph/ceph_strings.c
new file mode 100644
index 000000000000..8e4be6a80c62
--- /dev/null
+++ b/fs/ceph/ceph_strings.c
@@ -0,0 +1,176 @@
1/*
2 * Ceph string constants
3 */
4#include "types.h"
5
6const char *ceph_entity_type_name(int type)
7{
8 switch (type) {
9 case CEPH_ENTITY_TYPE_MDS: return "mds";
10 case CEPH_ENTITY_TYPE_OSD: return "osd";
11 case CEPH_ENTITY_TYPE_MON: return "mon";
12 case CEPH_ENTITY_TYPE_CLIENT: return "client";
13 case CEPH_ENTITY_TYPE_ADMIN: return "admin";
14 case CEPH_ENTITY_TYPE_AUTH: return "auth";
15 default: return "unknown";
16 }
17}
18
19const char *ceph_osd_op_name(int op)
20{
21 switch (op) {
22 case CEPH_OSD_OP_READ: return "read";
23 case CEPH_OSD_OP_STAT: return "stat";
24
25 case CEPH_OSD_OP_MASKTRUNC: return "masktrunc";
26
27 case CEPH_OSD_OP_WRITE: return "write";
28 case CEPH_OSD_OP_DELETE: return "delete";
29 case CEPH_OSD_OP_TRUNCATE: return "truncate";
30 case CEPH_OSD_OP_ZERO: return "zero";
31 case CEPH_OSD_OP_WRITEFULL: return "writefull";
32
33 case CEPH_OSD_OP_APPEND: return "append";
34 case CEPH_OSD_OP_STARTSYNC: return "startsync";
35 case CEPH_OSD_OP_SETTRUNC: return "settrunc";
36 case CEPH_OSD_OP_TRIMTRUNC: return "trimtrunc";
37
38 case CEPH_OSD_OP_TMAPUP: return "tmapup";
39 case CEPH_OSD_OP_TMAPGET: return "tmapget";
40 case CEPH_OSD_OP_TMAPPUT: return "tmapput";
41
42 case CEPH_OSD_OP_GETXATTR: return "getxattr";
43 case CEPH_OSD_OP_GETXATTRS: return "getxattrs";
44 case CEPH_OSD_OP_SETXATTR: return "setxattr";
45 case CEPH_OSD_OP_SETXATTRS: return "setxattrs";
46 case CEPH_OSD_OP_RESETXATTRS: return "resetxattrs";
47 case CEPH_OSD_OP_RMXATTR: return "rmxattr";
48
49 case CEPH_OSD_OP_PULL: return "pull";
50 case CEPH_OSD_OP_PUSH: return "push";
51 case CEPH_OSD_OP_BALANCEREADS: return "balance-reads";
52 case CEPH_OSD_OP_UNBALANCEREADS: return "unbalance-reads";
53 case CEPH_OSD_OP_SCRUB: return "scrub";
54
55 case CEPH_OSD_OP_WRLOCK: return "wrlock";
56 case CEPH_OSD_OP_WRUNLOCK: return "wrunlock";
57 case CEPH_OSD_OP_RDLOCK: return "rdlock";
58 case CEPH_OSD_OP_RDUNLOCK: return "rdunlock";
59 case CEPH_OSD_OP_UPLOCK: return "uplock";
60 case CEPH_OSD_OP_DNLOCK: return "dnlock";
61
62 case CEPH_OSD_OP_CALL: return "call";
63
64 case CEPH_OSD_OP_PGLS: return "pgls";
65 }
66 return "???";
67}
68
69const char *ceph_mds_state_name(int s)
70{
71 switch (s) {
72 /* down and out */
73 case CEPH_MDS_STATE_DNE: return "down:dne";
74 case CEPH_MDS_STATE_STOPPED: return "down:stopped";
75 /* up and out */
76 case CEPH_MDS_STATE_BOOT: return "up:boot";
77 case CEPH_MDS_STATE_STANDBY: return "up:standby";
78 case CEPH_MDS_STATE_STANDBY_REPLAY: return "up:standby-replay";
79 case CEPH_MDS_STATE_CREATING: return "up:creating";
80 case CEPH_MDS_STATE_STARTING: return "up:starting";
81 /* up and in */
82 case CEPH_MDS_STATE_REPLAY: return "up:replay";
83 case CEPH_MDS_STATE_RESOLVE: return "up:resolve";
84 case CEPH_MDS_STATE_RECONNECT: return "up:reconnect";
85 case CEPH_MDS_STATE_REJOIN: return "up:rejoin";
86 case CEPH_MDS_STATE_CLIENTREPLAY: return "up:clientreplay";
87 case CEPH_MDS_STATE_ACTIVE: return "up:active";
88 case CEPH_MDS_STATE_STOPPING: return "up:stopping";
89 }
90 return "???";
91}
92
93const char *ceph_session_op_name(int op)
94{
95 switch (op) {
96 case CEPH_SESSION_REQUEST_OPEN: return "request_open";
97 case CEPH_SESSION_OPEN: return "open";
98 case CEPH_SESSION_REQUEST_CLOSE: return "request_close";
99 case CEPH_SESSION_CLOSE: return "close";
100 case CEPH_SESSION_REQUEST_RENEWCAPS: return "request_renewcaps";
101 case CEPH_SESSION_RENEWCAPS: return "renewcaps";
102 case CEPH_SESSION_STALE: return "stale";
103 case CEPH_SESSION_RECALL_STATE: return "recall_state";
104 }
105 return "???";
106}
107
108const char *ceph_mds_op_name(int op)
109{
110 switch (op) {
111 case CEPH_MDS_OP_LOOKUP: return "lookup";
112 case CEPH_MDS_OP_LOOKUPHASH: return "lookuphash";
113 case CEPH_MDS_OP_LOOKUPPARENT: return "lookupparent";
114 case CEPH_MDS_OP_GETATTR: return "getattr";
115 case CEPH_MDS_OP_SETXATTR: return "setxattr";
116 case CEPH_MDS_OP_SETATTR: return "setattr";
117 case CEPH_MDS_OP_RMXATTR: return "rmxattr";
118 case CEPH_MDS_OP_READDIR: return "readdir";
119 case CEPH_MDS_OP_MKNOD: return "mknod";
120 case CEPH_MDS_OP_LINK: return "link";
121 case CEPH_MDS_OP_UNLINK: return "unlink";
122 case CEPH_MDS_OP_RENAME: return "rename";
123 case CEPH_MDS_OP_MKDIR: return "mkdir";
124 case CEPH_MDS_OP_RMDIR: return "rmdir";
125 case CEPH_MDS_OP_SYMLINK: return "symlink";
126 case CEPH_MDS_OP_CREATE: return "create";
127 case CEPH_MDS_OP_OPEN: return "open";
128 case CEPH_MDS_OP_LOOKUPSNAP: return "lookupsnap";
129 case CEPH_MDS_OP_LSSNAP: return "lssnap";
130 case CEPH_MDS_OP_MKSNAP: return "mksnap";
131 case CEPH_MDS_OP_RMSNAP: return "rmsnap";
132 }
133 return "???";
134}
135
136const char *ceph_cap_op_name(int op)
137{
138 switch (op) {
139 case CEPH_CAP_OP_GRANT: return "grant";
140 case CEPH_CAP_OP_REVOKE: return "revoke";
141 case CEPH_CAP_OP_TRUNC: return "trunc";
142 case CEPH_CAP_OP_EXPORT: return "export";
143 case CEPH_CAP_OP_IMPORT: return "import";
144 case CEPH_CAP_OP_UPDATE: return "update";
145 case CEPH_CAP_OP_DROP: return "drop";
146 case CEPH_CAP_OP_FLUSH: return "flush";
147 case CEPH_CAP_OP_FLUSH_ACK: return "flush_ack";
148 case CEPH_CAP_OP_FLUSHSNAP: return "flushsnap";
149 case CEPH_CAP_OP_FLUSHSNAP_ACK: return "flushsnap_ack";
150 case CEPH_CAP_OP_RELEASE: return "release";
151 case CEPH_CAP_OP_RENEW: return "renew";
152 }
153 return "???";
154}
155
156const char *ceph_lease_op_name(int o)
157{
158 switch (o) {
159 case CEPH_MDS_LEASE_REVOKE: return "revoke";
160 case CEPH_MDS_LEASE_RELEASE: return "release";
161 case CEPH_MDS_LEASE_RENEW: return "renew";
162 case CEPH_MDS_LEASE_REVOKE_ACK: return "revoke_ack";
163 }
164 return "???";
165}
166
167const char *ceph_snap_op_name(int o)
168{
169 switch (o) {
170 case CEPH_SNAP_OP_UPDATE: return "update";
171 case CEPH_SNAP_OP_CREATE: return "create";
172 case CEPH_SNAP_OP_DESTROY: return "destroy";
173 case CEPH_SNAP_OP_SPLIT: return "split";
174 }
175 return "???";
176}
diff --git a/fs/ceph/crush/crush.c b/fs/ceph/crush/crush.c
new file mode 100644
index 000000000000..fabd302e5779
--- /dev/null
+++ b/fs/ceph/crush/crush.c
@@ -0,0 +1,151 @@
1
2#ifdef __KERNEL__
3# include <linux/slab.h>
4#else
5# include <stdlib.h>
6# include <assert.h>
7# define kfree(x) do { if (x) free(x); } while (0)
8# define BUG_ON(x) assert(!(x))
9#endif
10
11#include "crush.h"
12
13const char *crush_bucket_alg_name(int alg)
14{
15 switch (alg) {
16 case CRUSH_BUCKET_UNIFORM: return "uniform";
17 case CRUSH_BUCKET_LIST: return "list";
18 case CRUSH_BUCKET_TREE: return "tree";
19 case CRUSH_BUCKET_STRAW: return "straw";
20 default: return "unknown";
21 }
22}
23
24/**
25 * crush_get_bucket_item_weight - Get weight of an item in given bucket
26 * @b: bucket pointer
27 * @p: item index in bucket
28 */
29int crush_get_bucket_item_weight(struct crush_bucket *b, int p)
30{
31 if (p >= b->size)
32 return 0;
33
34 switch (b->alg) {
35 case CRUSH_BUCKET_UNIFORM:
36 return ((struct crush_bucket_uniform *)b)->item_weight;
37 case CRUSH_BUCKET_LIST:
38 return ((struct crush_bucket_list *)b)->item_weights[p];
39 case CRUSH_BUCKET_TREE:
40 if (p & 1)
41 return ((struct crush_bucket_tree *)b)->node_weights[p];
42 return 0;
43 case CRUSH_BUCKET_STRAW:
44 return ((struct crush_bucket_straw *)b)->item_weights[p];
45 }
46 return 0;
47}
48
49/**
50 * crush_calc_parents - Calculate parent vectors for the given crush map.
51 * @map: crush_map pointer
52 */
53void crush_calc_parents(struct crush_map *map)
54{
55 int i, b, c;
56
57 for (b = 0; b < map->max_buckets; b++) {
58 if (map->buckets[b] == NULL)
59 continue;
60 for (i = 0; i < map->buckets[b]->size; i++) {
61 c = map->buckets[b]->items[i];
62 BUG_ON(c >= map->max_devices ||
63 c < -map->max_buckets);
64 if (c >= 0)
65 map->device_parents[c] = map->buckets[b]->id;
66 else
67 map->bucket_parents[-1-c] = map->buckets[b]->id;
68 }
69 }
70}
71
72void crush_destroy_bucket_uniform(struct crush_bucket_uniform *b)
73{
74 kfree(b->h.perm);
75 kfree(b->h.items);
76 kfree(b);
77}
78
79void crush_destroy_bucket_list(struct crush_bucket_list *b)
80{
81 kfree(b->item_weights);
82 kfree(b->sum_weights);
83 kfree(b->h.perm);
84 kfree(b->h.items);
85 kfree(b);
86}
87
88void crush_destroy_bucket_tree(struct crush_bucket_tree *b)
89{
90 kfree(b->node_weights);
91 kfree(b);
92}
93
94void crush_destroy_bucket_straw(struct crush_bucket_straw *b)
95{
96 kfree(b->straws);
97 kfree(b->item_weights);
98 kfree(b->h.perm);
99 kfree(b->h.items);
100 kfree(b);
101}
102
103void crush_destroy_bucket(struct crush_bucket *b)
104{
105 switch (b->alg) {
106 case CRUSH_BUCKET_UNIFORM:
107 crush_destroy_bucket_uniform((struct crush_bucket_uniform *)b);
108 break;
109 case CRUSH_BUCKET_LIST:
110 crush_destroy_bucket_list((struct crush_bucket_list *)b);
111 break;
112 case CRUSH_BUCKET_TREE:
113 crush_destroy_bucket_tree((struct crush_bucket_tree *)b);
114 break;
115 case CRUSH_BUCKET_STRAW:
116 crush_destroy_bucket_straw((struct crush_bucket_straw *)b);
117 break;
118 }
119}
120
121/**
122 * crush_destroy - Destroy a crush_map
123 * @map: crush_map pointer
124 */
125void crush_destroy(struct crush_map *map)
126{
127 int b;
128
129 /* buckets */
130 if (map->buckets) {
131 for (b = 0; b < map->max_buckets; b++) {
132 if (map->buckets[b] == NULL)
133 continue;
134 crush_destroy_bucket(map->buckets[b]);
135 }
136 kfree(map->buckets);
137 }
138
139 /* rules */
140 if (map->rules) {
141 for (b = 0; b < map->max_rules; b++)
142 kfree(map->rules[b]);
143 kfree(map->rules);
144 }
145
146 kfree(map->bucket_parents);
147 kfree(map->device_parents);
148 kfree(map);
149}
150
151
diff --git a/fs/ceph/crush/crush.h b/fs/ceph/crush/crush.h
new file mode 100644
index 000000000000..dcd7e7523700
--- /dev/null
+++ b/fs/ceph/crush/crush.h
@@ -0,0 +1,180 @@
1#ifndef _CRUSH_CRUSH_H
2#define _CRUSH_CRUSH_H
3
4#include <linux/types.h>
5
6/*
7 * CRUSH is a pseudo-random data distribution algorithm that
8 * efficiently distributes input values (typically, data objects)
9 * across a heterogeneous, structured storage cluster.
10 *
11 * The algorithm was originally described in detail in this paper
12 * (although the algorithm has evolved somewhat since then):
13 *
14 * http://www.ssrc.ucsc.edu/Papers/weil-sc06.pdf
15 *
16 * LGPL2
17 */
18
19
20#define CRUSH_MAGIC 0x00010000ul /* for detecting algorithm revisions */
21
22
23#define CRUSH_MAX_DEPTH 10 /* max crush hierarchy depth */
24#define CRUSH_MAX_SET 10 /* max size of a mapping result */
25
26
27/*
28 * CRUSH uses user-defined "rules" to describe how inputs should be
29 * mapped to devices. A rule consists of sequence of steps to perform
30 * to generate the set of output devices.
31 */
32struct crush_rule_step {
33 __u32 op;
34 __s32 arg1;
35 __s32 arg2;
36};
37
38/* step op codes */
39enum {
40 CRUSH_RULE_NOOP = 0,
41 CRUSH_RULE_TAKE = 1, /* arg1 = value to start with */
42 CRUSH_RULE_CHOOSE_FIRSTN = 2, /* arg1 = num items to pick */
43 /* arg2 = type */
44 CRUSH_RULE_CHOOSE_INDEP = 3, /* same */
45 CRUSH_RULE_EMIT = 4, /* no args */
46 CRUSH_RULE_CHOOSE_LEAF_FIRSTN = 6,
47 CRUSH_RULE_CHOOSE_LEAF_INDEP = 7,
48};
49
50/*
51 * for specifying choose num (arg1) relative to the max parameter
52 * passed to do_rule
53 */
54#define CRUSH_CHOOSE_N 0
55#define CRUSH_CHOOSE_N_MINUS(x) (-(x))
56
57/*
58 * The rule mask is used to describe what the rule is intended for.
59 * Given a ruleset and size of output set, we search through the
60 * rule list for a matching rule_mask.
61 */
62struct crush_rule_mask {
63 __u8 ruleset;
64 __u8 type;
65 __u8 min_size;
66 __u8 max_size;
67};
68
69struct crush_rule {
70 __u32 len;
71 struct crush_rule_mask mask;
72 struct crush_rule_step steps[0];
73};
74
75#define crush_rule_size(len) (sizeof(struct crush_rule) + \
76 (len)*sizeof(struct crush_rule_step))
77
78
79
80/*
81 * A bucket is a named container of other items (either devices or
82 * other buckets). Items within a bucket are chosen using one of a
83 * few different algorithms. The table summarizes how the speed of
84 * each option measures up against mapping stability when items are
85 * added or removed.
86 *
87 * Bucket Alg Speed Additions Removals
88 * ------------------------------------------------
89 * uniform O(1) poor poor
90 * list O(n) optimal poor
91 * tree O(log n) good good
92 * straw O(n) optimal optimal
93 */
94enum {
95 CRUSH_BUCKET_UNIFORM = 1,
96 CRUSH_BUCKET_LIST = 2,
97 CRUSH_BUCKET_TREE = 3,
98 CRUSH_BUCKET_STRAW = 4
99};
100extern const char *crush_bucket_alg_name(int alg);
101
102struct crush_bucket {
103 __s32 id; /* this'll be negative */
104 __u16 type; /* non-zero; type=0 is reserved for devices */
105 __u8 alg; /* one of CRUSH_BUCKET_* */
106 __u8 hash; /* which hash function to use, CRUSH_HASH_* */
107 __u32 weight; /* 16-bit fixed point */
108 __u32 size; /* num items */
109 __s32 *items;
110
111 /*
112 * cached random permutation: used for uniform bucket and for
113 * the linear search fallback for the other bucket types.
114 */
115 __u32 perm_x; /* @x for which *perm is defined */
116 __u32 perm_n; /* num elements of *perm that are permuted/defined */
117 __u32 *perm;
118};
119
120struct crush_bucket_uniform {
121 struct crush_bucket h;
122 __u32 item_weight; /* 16-bit fixed point; all items equally weighted */
123};
124
125struct crush_bucket_list {
126 struct crush_bucket h;
127 __u32 *item_weights; /* 16-bit fixed point */
128 __u32 *sum_weights; /* 16-bit fixed point. element i is sum
129 of weights 0..i, inclusive */
130};
131
132struct crush_bucket_tree {
133 struct crush_bucket h; /* note: h.size is _tree_ size, not number of
134 actual items */
135 __u8 num_nodes;
136 __u32 *node_weights;
137};
138
139struct crush_bucket_straw {
140 struct crush_bucket h;
141 __u32 *item_weights; /* 16-bit fixed point */
142 __u32 *straws; /* 16-bit fixed point */
143};
144
145
146
147/*
148 * CRUSH map includes all buckets, rules, etc.
149 */
150struct crush_map {
151 struct crush_bucket **buckets;
152 struct crush_rule **rules;
153
154 /*
155 * Parent pointers to identify the parent bucket a device or
156 * bucket in the hierarchy. If an item appears more than
157 * once, this is the _last_ time it appeared (where buckets
158 * are processed in bucket id order, from -1 on down to
159 * -max_buckets.
160 */
161 __u32 *bucket_parents;
162 __u32 *device_parents;
163
164 __s32 max_buckets;
165 __u32 max_rules;
166 __s32 max_devices;
167};
168
169
170/* crush.c */
171extern int crush_get_bucket_item_weight(struct crush_bucket *b, int pos);
172extern void crush_calc_parents(struct crush_map *map);
173extern void crush_destroy_bucket_uniform(struct crush_bucket_uniform *b);
174extern void crush_destroy_bucket_list(struct crush_bucket_list *b);
175extern void crush_destroy_bucket_tree(struct crush_bucket_tree *b);
176extern void crush_destroy_bucket_straw(struct crush_bucket_straw *b);
177extern void crush_destroy_bucket(struct crush_bucket *b);
178extern void crush_destroy(struct crush_map *map);
179
180#endif
diff --git a/fs/ceph/crush/hash.c b/fs/ceph/crush/hash.c
new file mode 100644
index 000000000000..5873aed694bf
--- /dev/null
+++ b/fs/ceph/crush/hash.c
@@ -0,0 +1,149 @@
1
2#include <linux/types.h>
3#include "hash.h"
4
5/*
6 * Robert Jenkins' function for mixing 32-bit values
7 * http://burtleburtle.net/bob/hash/evahash.html
8 * a, b = random bits, c = input and output
9 */
10#define crush_hashmix(a, b, c) do { \
11 a = a-b; a = a-c; a = a^(c>>13); \
12 b = b-c; b = b-a; b = b^(a<<8); \
13 c = c-a; c = c-b; c = c^(b>>13); \
14 a = a-b; a = a-c; a = a^(c>>12); \
15 b = b-c; b = b-a; b = b^(a<<16); \
16 c = c-a; c = c-b; c = c^(b>>5); \
17 a = a-b; a = a-c; a = a^(c>>3); \
18 b = b-c; b = b-a; b = b^(a<<10); \
19 c = c-a; c = c-b; c = c^(b>>15); \
20 } while (0)
21
22#define crush_hash_seed 1315423911
23
24static __u32 crush_hash32_rjenkins1(__u32 a)
25{
26 __u32 hash = crush_hash_seed ^ a;
27 __u32 b = a;
28 __u32 x = 231232;
29 __u32 y = 1232;
30 crush_hashmix(b, x, hash);
31 crush_hashmix(y, a, hash);
32 return hash;
33}
34
35static __u32 crush_hash32_rjenkins1_2(__u32 a, __u32 b)
36{
37 __u32 hash = crush_hash_seed ^ a ^ b;
38 __u32 x = 231232;
39 __u32 y = 1232;
40 crush_hashmix(a, b, hash);
41 crush_hashmix(x, a, hash);
42 crush_hashmix(b, y, hash);
43 return hash;
44}
45
46static __u32 crush_hash32_rjenkins1_3(__u32 a, __u32 b, __u32 c)
47{
48 __u32 hash = crush_hash_seed ^ a ^ b ^ c;
49 __u32 x = 231232;
50 __u32 y = 1232;
51 crush_hashmix(a, b, hash);
52 crush_hashmix(c, x, hash);
53 crush_hashmix(y, a, hash);
54 crush_hashmix(b, x, hash);
55 crush_hashmix(y, c, hash);
56 return hash;
57}
58
59static __u32 crush_hash32_rjenkins1_4(__u32 a, __u32 b, __u32 c, __u32 d)
60{
61 __u32 hash = crush_hash_seed ^ a ^ b ^ c ^ d;
62 __u32 x = 231232;
63 __u32 y = 1232;
64 crush_hashmix(a, b, hash);
65 crush_hashmix(c, d, hash);
66 crush_hashmix(a, x, hash);
67 crush_hashmix(y, b, hash);
68 crush_hashmix(c, x, hash);
69 crush_hashmix(y, d, hash);
70 return hash;
71}
72
73static __u32 crush_hash32_rjenkins1_5(__u32 a, __u32 b, __u32 c, __u32 d,
74 __u32 e)
75{
76 __u32 hash = crush_hash_seed ^ a ^ b ^ c ^ d ^ e;
77 __u32 x = 231232;
78 __u32 y = 1232;
79 crush_hashmix(a, b, hash);
80 crush_hashmix(c, d, hash);
81 crush_hashmix(e, x, hash);
82 crush_hashmix(y, a, hash);
83 crush_hashmix(b, x, hash);
84 crush_hashmix(y, c, hash);
85 crush_hashmix(d, x, hash);
86 crush_hashmix(y, e, hash);
87 return hash;
88}
89
90
91__u32 crush_hash32(int type, __u32 a)
92{
93 switch (type) {
94 case CRUSH_HASH_RJENKINS1:
95 return crush_hash32_rjenkins1(a);
96 default:
97 return 0;
98 }
99}
100
101__u32 crush_hash32_2(int type, __u32 a, __u32 b)
102{
103 switch (type) {
104 case CRUSH_HASH_RJENKINS1:
105 return crush_hash32_rjenkins1_2(a, b);
106 default:
107 return 0;
108 }
109}
110
111__u32 crush_hash32_3(int type, __u32 a, __u32 b, __u32 c)
112{
113 switch (type) {
114 case CRUSH_HASH_RJENKINS1:
115 return crush_hash32_rjenkins1_3(a, b, c);
116 default:
117 return 0;
118 }
119}
120
121__u32 crush_hash32_4(int type, __u32 a, __u32 b, __u32 c, __u32 d)
122{
123 switch (type) {
124 case CRUSH_HASH_RJENKINS1:
125 return crush_hash32_rjenkins1_4(a, b, c, d);
126 default:
127 return 0;
128 }
129}
130
131__u32 crush_hash32_5(int type, __u32 a, __u32 b, __u32 c, __u32 d, __u32 e)
132{
133 switch (type) {
134 case CRUSH_HASH_RJENKINS1:
135 return crush_hash32_rjenkins1_5(a, b, c, d, e);
136 default:
137 return 0;
138 }
139}
140
141const char *crush_hash_name(int type)
142{
143 switch (type) {
144 case CRUSH_HASH_RJENKINS1:
145 return "rjenkins1";
146 default:
147 return "unknown";
148 }
149}
diff --git a/fs/ceph/crush/hash.h b/fs/ceph/crush/hash.h
new file mode 100644
index 000000000000..ff48e110e4bb
--- /dev/null
+++ b/fs/ceph/crush/hash.h
@@ -0,0 +1,17 @@
1#ifndef _CRUSH_HASH_H
2#define _CRUSH_HASH_H
3
4#define CRUSH_HASH_RJENKINS1 0
5
6#define CRUSH_HASH_DEFAULT CRUSH_HASH_RJENKINS1
7
8extern const char *crush_hash_name(int type);
9
10extern __u32 crush_hash32(int type, __u32 a);
11extern __u32 crush_hash32_2(int type, __u32 a, __u32 b);
12extern __u32 crush_hash32_3(int type, __u32 a, __u32 b, __u32 c);
13extern __u32 crush_hash32_4(int type, __u32 a, __u32 b, __u32 c, __u32 d);
14extern __u32 crush_hash32_5(int type, __u32 a, __u32 b, __u32 c, __u32 d,
15 __u32 e);
16
17#endif
diff --git a/fs/ceph/crush/mapper.c b/fs/ceph/crush/mapper.c
new file mode 100644
index 000000000000..9ba54efb6543
--- /dev/null
+++ b/fs/ceph/crush/mapper.c
@@ -0,0 +1,596 @@
1
2#ifdef __KERNEL__
3# include <linux/string.h>
4# include <linux/slab.h>
5# include <linux/bug.h>
6# include <linux/kernel.h>
7# ifndef dprintk
8# define dprintk(args...)
9# endif
10#else
11# include <string.h>
12# include <stdio.h>
13# include <stdlib.h>
14# include <assert.h>
15# define BUG_ON(x) assert(!(x))
16# define dprintk(args...) /* printf(args) */
17# define kmalloc(x, f) malloc(x)
18# define kfree(x) free(x)
19#endif
20
21#include "crush.h"
22#include "hash.h"
23
24/*
25 * Implement the core CRUSH mapping algorithm.
26 */
27
28/**
29 * crush_find_rule - find a crush_rule id for a given ruleset, type, and size.
30 * @map: the crush_map
31 * @ruleset: the storage ruleset id (user defined)
32 * @type: storage ruleset type (user defined)
33 * @size: output set size
34 */
35int crush_find_rule(struct crush_map *map, int ruleset, int type, int size)
36{
37 int i;
38
39 for (i = 0; i < map->max_rules; i++) {
40 if (map->rules[i] &&
41 map->rules[i]->mask.ruleset == ruleset &&
42 map->rules[i]->mask.type == type &&
43 map->rules[i]->mask.min_size <= size &&
44 map->rules[i]->mask.max_size >= size)
45 return i;
46 }
47 return -1;
48}
49
50
51/*
52 * bucket choose methods
53 *
54 * For each bucket algorithm, we have a "choose" method that, given a
55 * crush input @x and replica position (usually, position in output set) @r,
56 * will produce an item in the bucket.
57 */
58
59/*
60 * Choose based on a random permutation of the bucket.
61 *
62 * We used to use some prime number arithmetic to do this, but it
63 * wasn't very random, and had some other bad behaviors. Instead, we
64 * calculate an actual random permutation of the bucket members.
65 * Since this is expensive, we optimize for the r=0 case, which
66 * captures the vast majority of calls.
67 */
68static int bucket_perm_choose(struct crush_bucket *bucket,
69 int x, int r)
70{
71 unsigned pr = r % bucket->size;
72 unsigned i, s;
73
74 /* start a new permutation if @x has changed */
75 if (bucket->perm_x != x || bucket->perm_n == 0) {
76 dprintk("bucket %d new x=%d\n", bucket->id, x);
77 bucket->perm_x = x;
78
79 /* optimize common r=0 case */
80 if (pr == 0) {
81 s = crush_hash32_3(bucket->hash, x, bucket->id, 0) %
82 bucket->size;
83 bucket->perm[0] = s;
84 bucket->perm_n = 0xffff; /* magic value, see below */
85 goto out;
86 }
87
88 for (i = 0; i < bucket->size; i++)
89 bucket->perm[i] = i;
90 bucket->perm_n = 0;
91 } else if (bucket->perm_n == 0xffff) {
92 /* clean up after the r=0 case above */
93 for (i = 1; i < bucket->size; i++)
94 bucket->perm[i] = i;
95 bucket->perm[bucket->perm[0]] = 0;
96 bucket->perm_n = 1;
97 }
98
99 /* calculate permutation up to pr */
100 for (i = 0; i < bucket->perm_n; i++)
101 dprintk(" perm_choose have %d: %d\n", i, bucket->perm[i]);
102 while (bucket->perm_n <= pr) {
103 unsigned p = bucket->perm_n;
104 /* no point in swapping the final entry */
105 if (p < bucket->size - 1) {
106 i = crush_hash32_3(bucket->hash, x, bucket->id, p) %
107 (bucket->size - p);
108 if (i) {
109 unsigned t = bucket->perm[p + i];
110 bucket->perm[p + i] = bucket->perm[p];
111 bucket->perm[p] = t;
112 }
113 dprintk(" perm_choose swap %d with %d\n", p, p+i);
114 }
115 bucket->perm_n++;
116 }
117 for (i = 0; i < bucket->size; i++)
118 dprintk(" perm_choose %d: %d\n", i, bucket->perm[i]);
119
120 s = bucket->perm[pr];
121out:
122 dprintk(" perm_choose %d sz=%d x=%d r=%d (%d) s=%d\n", bucket->id,
123 bucket->size, x, r, pr, s);
124 return bucket->items[s];
125}
126
127/* uniform */
128static int bucket_uniform_choose(struct crush_bucket_uniform *bucket,
129 int x, int r)
130{
131 return bucket_perm_choose(&bucket->h, x, r);
132}
133
134/* list */
135static int bucket_list_choose(struct crush_bucket_list *bucket,
136 int x, int r)
137{
138 int i;
139
140 for (i = bucket->h.size-1; i >= 0; i--) {
141 __u64 w = crush_hash32_4(bucket->h.hash,x, bucket->h.items[i],
142 r, bucket->h.id);
143 w &= 0xffff;
144 dprintk("list_choose i=%d x=%d r=%d item %d weight %x "
145 "sw %x rand %llx",
146 i, x, r, bucket->h.items[i], bucket->item_weights[i],
147 bucket->sum_weights[i], w);
148 w *= bucket->sum_weights[i];
149 w = w >> 16;
150 /*dprintk(" scaled %llx\n", w);*/
151 if (w < bucket->item_weights[i])
152 return bucket->h.items[i];
153 }
154
155 BUG_ON(1);
156 return 0;
157}
158
159
160/* (binary) tree */
161static int height(int n)
162{
163 int h = 0;
164 while ((n & 1) == 0) {
165 h++;
166 n = n >> 1;
167 }
168 return h;
169}
170
171static int left(int x)
172{
173 int h = height(x);
174 return x - (1 << (h-1));
175}
176
177static int right(int x)
178{
179 int h = height(x);
180 return x + (1 << (h-1));
181}
182
183static int terminal(int x)
184{
185 return x & 1;
186}
187
188static int bucket_tree_choose(struct crush_bucket_tree *bucket,
189 int x, int r)
190{
191 int n, l;
192 __u32 w;
193 __u64 t;
194
195 /* start at root */
196 n = bucket->num_nodes >> 1;
197
198 while (!terminal(n)) {
199 /* pick point in [0, w) */
200 w = bucket->node_weights[n];
201 t = (__u64)crush_hash32_4(bucket->h.hash, x, n, r,
202 bucket->h.id) * (__u64)w;
203 t = t >> 32;
204
205 /* descend to the left or right? */
206 l = left(n);
207 if (t < bucket->node_weights[l])
208 n = l;
209 else
210 n = right(n);
211 }
212
213 return bucket->h.items[n >> 1];
214}
215
216
217/* straw */
218
219static int bucket_straw_choose(struct crush_bucket_straw *bucket,
220 int x, int r)
221{
222 int i;
223 int high = 0;
224 __u64 high_draw = 0;
225 __u64 draw;
226
227 for (i = 0; i < bucket->h.size; i++) {
228 draw = crush_hash32_3(bucket->h.hash, x, bucket->h.items[i], r);
229 draw &= 0xffff;
230 draw *= bucket->straws[i];
231 if (i == 0 || draw > high_draw) {
232 high = i;
233 high_draw = draw;
234 }
235 }
236 return bucket->h.items[high];
237}
238
239static int crush_bucket_choose(struct crush_bucket *in, int x, int r)
240{
241 dprintk("choose %d x=%d r=%d\n", in->id, x, r);
242 switch (in->alg) {
243 case CRUSH_BUCKET_UNIFORM:
244 return bucket_uniform_choose((struct crush_bucket_uniform *)in,
245 x, r);
246 case CRUSH_BUCKET_LIST:
247 return bucket_list_choose((struct crush_bucket_list *)in,
248 x, r);
249 case CRUSH_BUCKET_TREE:
250 return bucket_tree_choose((struct crush_bucket_tree *)in,
251 x, r);
252 case CRUSH_BUCKET_STRAW:
253 return bucket_straw_choose((struct crush_bucket_straw *)in,
254 x, r);
255 default:
256 BUG_ON(1);
257 return in->items[0];
258 }
259}
260
261/*
262 * true if device is marked "out" (failed, fully offloaded)
263 * of the cluster
264 */
265static int is_out(struct crush_map *map, __u32 *weight, int item, int x)
266{
267 if (weight[item] >= 0x1000)
268 return 0;
269 if (weight[item] == 0)
270 return 1;
271 if ((crush_hash32_2(CRUSH_HASH_RJENKINS1, x, item) & 0xffff)
272 < weight[item])
273 return 0;
274 return 1;
275}
276
277/**
278 * crush_choose - choose numrep distinct items of given type
279 * @map: the crush_map
280 * @bucket: the bucket we are choose an item from
281 * @x: crush input value
282 * @numrep: the number of items to choose
283 * @type: the type of item to choose
284 * @out: pointer to output vector
285 * @outpos: our position in that vector
286 * @firstn: true if choosing "first n" items, false if choosing "indep"
287 * @recurse_to_leaf: true if we want one device under each item of given type
288 * @out2: second output vector for leaf items (if @recurse_to_leaf)
289 */
290static int crush_choose(struct crush_map *map,
291 struct crush_bucket *bucket,
292 __u32 *weight,
293 int x, int numrep, int type,
294 int *out, int outpos,
295 int firstn, int recurse_to_leaf,
296 int *out2)
297{
298 int rep;
299 int ftotal, flocal;
300 int retry_descent, retry_bucket, skip_rep;
301 struct crush_bucket *in = bucket;
302 int r;
303 int i;
304 int item = 0;
305 int itemtype;
306 int collide, reject;
307 const int orig_tries = 5; /* attempts before we fall back to search */
308 dprintk("choose bucket %d x %d outpos %d\n", bucket->id, x, outpos);
309
310 for (rep = outpos; rep < numrep; rep++) {
311 /* keep trying until we get a non-out, non-colliding item */
312 ftotal = 0;
313 skip_rep = 0;
314 do {
315 retry_descent = 0;
316 in = bucket; /* initial bucket */
317
318 /* choose through intervening buckets */
319 flocal = 0;
320 do {
321 collide = 0;
322 retry_bucket = 0;
323 r = rep;
324 if (in->alg == CRUSH_BUCKET_UNIFORM) {
325 /* be careful */
326 if (firstn || numrep >= in->size)
327 /* r' = r + f_total */
328 r += ftotal;
329 else if (in->size % numrep == 0)
330 /* r'=r+(n+1)*f_local */
331 r += (numrep+1) *
332 (flocal+ftotal);
333 else
334 /* r' = r + n*f_local */
335 r += numrep * (flocal+ftotal);
336 } else {
337 if (firstn)
338 /* r' = r + f_total */
339 r += ftotal;
340 else
341 /* r' = r + n*f_local */
342 r += numrep * (flocal+ftotal);
343 }
344
345 /* bucket choose */
346 if (in->size == 0) {
347 reject = 1;
348 goto reject;
349 }
350 if (flocal >= (in->size>>1) &&
351 flocal > orig_tries)
352 item = bucket_perm_choose(in, x, r);
353 else
354 item = crush_bucket_choose(in, x, r);
355 BUG_ON(item >= map->max_devices);
356
357 /* desired type? */
358 if (item < 0)
359 itemtype = map->buckets[-1-item]->type;
360 else
361 itemtype = 0;
362 dprintk(" item %d type %d\n", item, itemtype);
363
364 /* keep going? */
365 if (itemtype != type) {
366 BUG_ON(item >= 0 ||
367 (-1-item) >= map->max_buckets);
368 in = map->buckets[-1-item];
369 continue;
370 }
371
372 /* collision? */
373 for (i = 0; i < outpos; i++) {
374 if (out[i] == item) {
375 collide = 1;
376 break;
377 }
378 }
379
380 if (recurse_to_leaf &&
381 item < 0 &&
382 crush_choose(map, map->buckets[-1-item],
383 weight,
384 x, outpos+1, 0,
385 out2, outpos,
386 firstn, 0, NULL) <= outpos) {
387 reject = 1;
388 } else {
389 /* out? */
390 if (itemtype == 0)
391 reject = is_out(map, weight,
392 item, x);
393 else
394 reject = 0;
395 }
396
397reject:
398 if (reject || collide) {
399 ftotal++;
400 flocal++;
401
402 if (collide && flocal < 3)
403 /* retry locally a few times */
404 retry_bucket = 1;
405 else if (flocal < in->size + orig_tries)
406 /* exhaustive bucket search */
407 retry_bucket = 1;
408 else if (ftotal < 20)
409 /* then retry descent */
410 retry_descent = 1;
411 else
412 /* else give up */
413 skip_rep = 1;
414 dprintk(" reject %d collide %d "
415 "ftotal %d flocal %d\n",
416 reject, collide, ftotal,
417 flocal);
418 }
419 } while (retry_bucket);
420 } while (retry_descent);
421
422 if (skip_rep) {
423 dprintk("skip rep\n");
424 continue;
425 }
426
427 dprintk("choose got %d\n", item);
428 out[outpos] = item;
429 outpos++;
430 }
431
432 dprintk("choose returns %d\n", outpos);
433 return outpos;
434}
435
436
437/**
438 * crush_do_rule - calculate a mapping with the given input and rule
439 * @map: the crush_map
440 * @ruleno: the rule id
441 * @x: hash input
442 * @result: pointer to result vector
443 * @result_max: maximum result size
444 * @force: force initial replica choice; -1 for none
445 */
446int crush_do_rule(struct crush_map *map,
447 int ruleno, int x, int *result, int result_max,
448 int force, __u32 *weight)
449{
450 int result_len;
451 int force_context[CRUSH_MAX_DEPTH];
452 int force_pos = -1;
453 int a[CRUSH_MAX_SET];
454 int b[CRUSH_MAX_SET];
455 int c[CRUSH_MAX_SET];
456 int recurse_to_leaf;
457 int *w;
458 int wsize = 0;
459 int *o;
460 int osize;
461 int *tmp;
462 struct crush_rule *rule;
463 int step;
464 int i, j;
465 int numrep;
466 int firstn;
467 int rc = -1;
468
469 BUG_ON(ruleno >= map->max_rules);
470
471 rule = map->rules[ruleno];
472 result_len = 0;
473 w = a;
474 o = b;
475
476 /*
477 * determine hierarchical context of force, if any. note
478 * that this may or may not correspond to the specific types
479 * referenced by the crush rule.
480 */
481 if (force >= 0) {
482 if (force >= map->max_devices ||
483 map->device_parents[force] == 0) {
484 /*dprintk("CRUSH: forcefed device dne\n");*/
485 rc = -1; /* force fed device dne */
486 goto out;
487 }
488 if (!is_out(map, weight, force, x)) {
489 while (1) {
490 force_context[++force_pos] = force;
491 if (force >= 0)
492 force = map->device_parents[force];
493 else
494 force = map->bucket_parents[-1-force];
495 if (force == 0)
496 break;
497 }
498 }
499 }
500
501 for (step = 0; step < rule->len; step++) {
502 firstn = 0;
503 switch (rule->steps[step].op) {
504 case CRUSH_RULE_TAKE:
505 w[0] = rule->steps[step].arg1;
506 if (force_pos >= 0) {
507 BUG_ON(force_context[force_pos] != w[0]);
508 force_pos--;
509 }
510 wsize = 1;
511 break;
512
513 case CRUSH_RULE_CHOOSE_LEAF_FIRSTN:
514 case CRUSH_RULE_CHOOSE_FIRSTN:
515 firstn = 1;
516 case CRUSH_RULE_CHOOSE_LEAF_INDEP:
517 case CRUSH_RULE_CHOOSE_INDEP:
518 BUG_ON(wsize == 0);
519
520 recurse_to_leaf =
521 rule->steps[step].op ==
522 CRUSH_RULE_CHOOSE_LEAF_FIRSTN ||
523 rule->steps[step].op ==
524 CRUSH_RULE_CHOOSE_LEAF_INDEP;
525
526 /* reset output */
527 osize = 0;
528
529 for (i = 0; i < wsize; i++) {
530 /*
531 * see CRUSH_N, CRUSH_N_MINUS macros.
532 * basically, numrep <= 0 means relative to
533 * the provided result_max
534 */
535 numrep = rule->steps[step].arg1;
536 if (numrep <= 0) {
537 numrep += result_max;
538 if (numrep <= 0)
539 continue;
540 }
541 j = 0;
542 if (osize == 0 && force_pos >= 0) {
543 /* skip any intermediate types */
544 while (force_pos &&
545 force_context[force_pos] < 0 &&
546 rule->steps[step].arg2 !=
547 map->buckets[-1 -
548 force_context[force_pos]]->type)
549 force_pos--;
550 o[osize] = force_context[force_pos];
551 if (recurse_to_leaf)
552 c[osize] = force_context[0];
553 j++;
554 force_pos--;
555 }
556 osize += crush_choose(map,
557 map->buckets[-1-w[i]],
558 weight,
559 x, numrep,
560 rule->steps[step].arg2,
561 o+osize, j,
562 firstn,
563 recurse_to_leaf, c+osize);
564 }
565
566 if (recurse_to_leaf)
567 /* copy final _leaf_ values to output set */
568 memcpy(o, c, osize*sizeof(*o));
569
570 /* swap t and w arrays */
571 tmp = o;
572 o = w;
573 w = tmp;
574 wsize = osize;
575 break;
576
577
578 case CRUSH_RULE_EMIT:
579 for (i = 0; i < wsize && result_len < result_max; i++) {
580 result[result_len] = w[i];
581 result_len++;
582 }
583 wsize = 0;
584 break;
585
586 default:
587 BUG_ON(1);
588 }
589 }
590 rc = result_len;
591
592out:
593 return rc;
594}
595
596
diff --git a/fs/ceph/crush/mapper.h b/fs/ceph/crush/mapper.h
new file mode 100644
index 000000000000..98e90046fd9f
--- /dev/null
+++ b/fs/ceph/crush/mapper.h
@@ -0,0 +1,20 @@
1#ifndef _CRUSH_MAPPER_H
2#define _CRUSH_MAPPER_H
3
4/*
5 * CRUSH functions for find rules and then mapping an input to an
6 * output set.
7 *
8 * LGPL2
9 */
10
11#include "crush.h"
12
13extern int crush_find_rule(struct crush_map *map, int pool, int type, int size);
14extern int crush_do_rule(struct crush_map *map,
15 int ruleno,
16 int x, int *result, int result_max,
17 int forcefeed, /* -1 for none */
18 __u32 *weights);
19
20#endif
diff --git a/fs/ceph/crypto.c b/fs/ceph/crypto.c
new file mode 100644
index 000000000000..291ac288e791
--- /dev/null
+++ b/fs/ceph/crypto.c
@@ -0,0 +1,408 @@
1
2#include "ceph_debug.h"
3
4#include <linux/err.h>
5#include <linux/scatterlist.h>
6#include <crypto/hash.h>
7
8#include "crypto.h"
9#include "decode.h"
10
11int ceph_crypto_key_encode(struct ceph_crypto_key *key, void **p, void *end)
12{
13 if (*p + sizeof(u16) + sizeof(key->created) +
14 sizeof(u16) + key->len > end)
15 return -ERANGE;
16 ceph_encode_16(p, key->type);
17 ceph_encode_copy(p, &key->created, sizeof(key->created));
18 ceph_encode_16(p, key->len);
19 ceph_encode_copy(p, key->key, key->len);
20 return 0;
21}
22
23int ceph_crypto_key_decode(struct ceph_crypto_key *key, void **p, void *end)
24{
25 ceph_decode_need(p, end, 2*sizeof(u16) + sizeof(key->created), bad);
26 key->type = ceph_decode_16(p);
27 ceph_decode_copy(p, &key->created, sizeof(key->created));
28 key->len = ceph_decode_16(p);
29 ceph_decode_need(p, end, key->len, bad);
30 key->key = kmalloc(key->len, GFP_NOFS);
31 if (!key->key)
32 return -ENOMEM;
33 ceph_decode_copy(p, key->key, key->len);
34 return 0;
35
36bad:
37 dout("failed to decode crypto key\n");
38 return -EINVAL;
39}
40
41int ceph_crypto_key_unarmor(struct ceph_crypto_key *key, const char *inkey)
42{
43 int inlen = strlen(inkey);
44 int blen = inlen * 3 / 4;
45 void *buf, *p;
46 int ret;
47
48 dout("crypto_key_unarmor %s\n", inkey);
49 buf = kmalloc(blen, GFP_NOFS);
50 if (!buf)
51 return -ENOMEM;
52 blen = ceph_unarmor(buf, inkey, inkey+inlen);
53 if (blen < 0) {
54 kfree(buf);
55 return blen;
56 }
57
58 p = buf;
59 ret = ceph_crypto_key_decode(key, &p, p + blen);
60 kfree(buf);
61 if (ret)
62 return ret;
63 dout("crypto_key_unarmor key %p type %d len %d\n", key,
64 key->type, key->len);
65 return 0;
66}
67
68
69
70#define AES_KEY_SIZE 16
71
72static struct crypto_blkcipher *ceph_crypto_alloc_cipher(void)
73{
74 return crypto_alloc_blkcipher("cbc(aes)", 0, CRYPTO_ALG_ASYNC);
75}
76
77const u8 *aes_iv = "cephsageyudagreg";
78
79int ceph_aes_encrypt(const void *key, int key_len, void *dst, size_t *dst_len,
80 const void *src, size_t src_len)
81{
82 struct scatterlist sg_in[2], sg_out[1];
83 struct crypto_blkcipher *tfm = ceph_crypto_alloc_cipher();
84 struct blkcipher_desc desc = { .tfm = tfm, .flags = 0 };
85 int ret;
86 void *iv;
87 int ivsize;
88 size_t zero_padding = (0x10 - (src_len & 0x0f));
89 char pad[16];
90
91 if (IS_ERR(tfm))
92 return PTR_ERR(tfm);
93
94 memset(pad, zero_padding, zero_padding);
95
96 *dst_len = src_len + zero_padding;
97
98 crypto_blkcipher_setkey((void *)tfm, key, key_len);
99 sg_init_table(sg_in, 2);
100 sg_set_buf(&sg_in[0], src, src_len);
101 sg_set_buf(&sg_in[1], pad, zero_padding);
102 sg_init_table(sg_out, 1);
103 sg_set_buf(sg_out, dst, *dst_len);
104 iv = crypto_blkcipher_crt(tfm)->iv;
105 ivsize = crypto_blkcipher_ivsize(tfm);
106
107 memcpy(iv, aes_iv, ivsize);
108 /*
109 print_hex_dump(KERN_ERR, "enc key: ", DUMP_PREFIX_NONE, 16, 1,
110 key, key_len, 1);
111 print_hex_dump(KERN_ERR, "enc src: ", DUMP_PREFIX_NONE, 16, 1,
112 src, src_len, 1);
113 print_hex_dump(KERN_ERR, "enc pad: ", DUMP_PREFIX_NONE, 16, 1,
114 pad, zero_padding, 1);
115 */
116 ret = crypto_blkcipher_encrypt(&desc, sg_out, sg_in,
117 src_len + zero_padding);
118 crypto_free_blkcipher(tfm);
119 if (ret < 0)
120 pr_err("ceph_aes_crypt failed %d\n", ret);
121 /*
122 print_hex_dump(KERN_ERR, "enc out: ", DUMP_PREFIX_NONE, 16, 1,
123 dst, *dst_len, 1);
124 */
125 return 0;
126}
127
128int ceph_aes_encrypt2(const void *key, int key_len, void *dst, size_t *dst_len,
129 const void *src1, size_t src1_len,
130 const void *src2, size_t src2_len)
131{
132 struct scatterlist sg_in[3], sg_out[1];
133 struct crypto_blkcipher *tfm = ceph_crypto_alloc_cipher();
134 struct blkcipher_desc desc = { .tfm = tfm, .flags = 0 };
135 int ret;
136 void *iv;
137 int ivsize;
138 size_t zero_padding = (0x10 - ((src1_len + src2_len) & 0x0f));
139 char pad[16];
140
141 if (IS_ERR(tfm))
142 return PTR_ERR(tfm);
143
144 memset(pad, zero_padding, zero_padding);
145
146 *dst_len = src1_len + src2_len + zero_padding;
147
148 crypto_blkcipher_setkey((void *)tfm, key, key_len);
149 sg_init_table(sg_in, 3);
150 sg_set_buf(&sg_in[0], src1, src1_len);
151 sg_set_buf(&sg_in[1], src2, src2_len);
152 sg_set_buf(&sg_in[2], pad, zero_padding);
153 sg_init_table(sg_out, 1);
154 sg_set_buf(sg_out, dst, *dst_len);
155 iv = crypto_blkcipher_crt(tfm)->iv;
156 ivsize = crypto_blkcipher_ivsize(tfm);
157
158 memcpy(iv, aes_iv, ivsize);
159 /*
160 print_hex_dump(KERN_ERR, "enc key: ", DUMP_PREFIX_NONE, 16, 1,
161 key, key_len, 1);
162 print_hex_dump(KERN_ERR, "enc src1: ", DUMP_PREFIX_NONE, 16, 1,
163 src1, src1_len, 1);
164 print_hex_dump(KERN_ERR, "enc src2: ", DUMP_PREFIX_NONE, 16, 1,
165 src2, src2_len, 1);
166 print_hex_dump(KERN_ERR, "enc pad: ", DUMP_PREFIX_NONE, 16, 1,
167 pad, zero_padding, 1);
168 */
169 ret = crypto_blkcipher_encrypt(&desc, sg_out, sg_in,
170 src1_len + src2_len + zero_padding);
171 crypto_free_blkcipher(tfm);
172 if (ret < 0)
173 pr_err("ceph_aes_crypt2 failed %d\n", ret);
174 /*
175 print_hex_dump(KERN_ERR, "enc out: ", DUMP_PREFIX_NONE, 16, 1,
176 dst, *dst_len, 1);
177 */
178 return 0;
179}
180
181int ceph_aes_decrypt(const void *key, int key_len, void *dst, size_t *dst_len,
182 const void *src, size_t src_len)
183{
184 struct scatterlist sg_in[1], sg_out[2];
185 struct crypto_blkcipher *tfm = ceph_crypto_alloc_cipher();
186 struct blkcipher_desc desc = { .tfm = tfm };
187 char pad[16];
188 void *iv;
189 int ivsize;
190 int ret;
191 int last_byte;
192
193 if (IS_ERR(tfm))
194 return PTR_ERR(tfm);
195
196 crypto_blkcipher_setkey((void *)tfm, key, key_len);
197 sg_init_table(sg_in, 1);
198 sg_init_table(sg_out, 2);
199 sg_set_buf(sg_in, src, src_len);
200 sg_set_buf(&sg_out[0], dst, *dst_len);
201 sg_set_buf(&sg_out[1], pad, sizeof(pad));
202
203 iv = crypto_blkcipher_crt(tfm)->iv;
204 ivsize = crypto_blkcipher_ivsize(tfm);
205
206 memcpy(iv, aes_iv, ivsize);
207
208 /*
209 print_hex_dump(KERN_ERR, "dec key: ", DUMP_PREFIX_NONE, 16, 1,
210 key, key_len, 1);
211 print_hex_dump(KERN_ERR, "dec in: ", DUMP_PREFIX_NONE, 16, 1,
212 src, src_len, 1);
213 */
214
215 ret = crypto_blkcipher_decrypt(&desc, sg_out, sg_in, src_len);
216 crypto_free_blkcipher(tfm);
217 if (ret < 0) {
218 pr_err("ceph_aes_decrypt failed %d\n", ret);
219 return ret;
220 }
221
222 if (src_len <= *dst_len)
223 last_byte = ((char *)dst)[src_len - 1];
224 else
225 last_byte = pad[src_len - *dst_len - 1];
226 if (last_byte <= 16 && src_len >= last_byte) {
227 *dst_len = src_len - last_byte;
228 } else {
229 pr_err("ceph_aes_decrypt got bad padding %d on src len %d\n",
230 last_byte, (int)src_len);
231 return -EPERM; /* bad padding */
232 }
233 /*
234 print_hex_dump(KERN_ERR, "dec out: ", DUMP_PREFIX_NONE, 16, 1,
235 dst, *dst_len, 1);
236 */
237 return 0;
238}
239
240int ceph_aes_decrypt2(const void *key, int key_len,
241 void *dst1, size_t *dst1_len,
242 void *dst2, size_t *dst2_len,
243 const void *src, size_t src_len)
244{
245 struct scatterlist sg_in[1], sg_out[3];
246 struct crypto_blkcipher *tfm = ceph_crypto_alloc_cipher();
247 struct blkcipher_desc desc = { .tfm = tfm };
248 char pad[16];
249 void *iv;
250 int ivsize;
251 int ret;
252 int last_byte;
253
254 if (IS_ERR(tfm))
255 return PTR_ERR(tfm);
256
257 sg_init_table(sg_in, 1);
258 sg_set_buf(sg_in, src, src_len);
259 sg_init_table(sg_out, 3);
260 sg_set_buf(&sg_out[0], dst1, *dst1_len);
261 sg_set_buf(&sg_out[1], dst2, *dst2_len);
262 sg_set_buf(&sg_out[2], pad, sizeof(pad));
263
264 crypto_blkcipher_setkey((void *)tfm, key, key_len);
265 iv = crypto_blkcipher_crt(tfm)->iv;
266 ivsize = crypto_blkcipher_ivsize(tfm);
267
268 memcpy(iv, aes_iv, ivsize);
269
270 /*
271 print_hex_dump(KERN_ERR, "dec key: ", DUMP_PREFIX_NONE, 16, 1,
272 key, key_len, 1);
273 print_hex_dump(KERN_ERR, "dec in: ", DUMP_PREFIX_NONE, 16, 1,
274 src, src_len, 1);
275 */
276
277 ret = crypto_blkcipher_decrypt(&desc, sg_out, sg_in, src_len);
278 crypto_free_blkcipher(tfm);
279 if (ret < 0) {
280 pr_err("ceph_aes_decrypt failed %d\n", ret);
281 return ret;
282 }
283
284 if (src_len <= *dst1_len)
285 last_byte = ((char *)dst1)[src_len - 1];
286 else if (src_len <= *dst1_len + *dst2_len)
287 last_byte = ((char *)dst2)[src_len - *dst1_len - 1];
288 else
289 last_byte = pad[src_len - *dst1_len - *dst2_len - 1];
290 if (last_byte <= 16 && src_len >= last_byte) {
291 src_len -= last_byte;
292 } else {
293 pr_err("ceph_aes_decrypt got bad padding %d on src len %d\n",
294 last_byte, (int)src_len);
295 return -EPERM; /* bad padding */
296 }
297
298 if (src_len < *dst1_len) {
299 *dst1_len = src_len;
300 *dst2_len = 0;
301 } else {
302 *dst2_len = src_len - *dst1_len;
303 }
304 /*
305 print_hex_dump(KERN_ERR, "dec out1: ", DUMP_PREFIX_NONE, 16, 1,
306 dst1, *dst1_len, 1);
307 print_hex_dump(KERN_ERR, "dec out2: ", DUMP_PREFIX_NONE, 16, 1,
308 dst2, *dst2_len, 1);
309 */
310
311 return 0;
312}
313
314
315int ceph_decrypt(struct ceph_crypto_key *secret, void *dst, size_t *dst_len,
316 const void *src, size_t src_len)
317{
318 switch (secret->type) {
319 case CEPH_CRYPTO_NONE:
320 if (*dst_len < src_len)
321 return -ERANGE;
322 memcpy(dst, src, src_len);
323 *dst_len = src_len;
324 return 0;
325
326 case CEPH_CRYPTO_AES:
327 return ceph_aes_decrypt(secret->key, secret->len, dst,
328 dst_len, src, src_len);
329
330 default:
331 return -EINVAL;
332 }
333}
334
335int ceph_decrypt2(struct ceph_crypto_key *secret,
336 void *dst1, size_t *dst1_len,
337 void *dst2, size_t *dst2_len,
338 const void *src, size_t src_len)
339{
340 size_t t;
341
342 switch (secret->type) {
343 case CEPH_CRYPTO_NONE:
344 if (*dst1_len + *dst2_len < src_len)
345 return -ERANGE;
346 t = min(*dst1_len, src_len);
347 memcpy(dst1, src, t);
348 *dst1_len = t;
349 src += t;
350 src_len -= t;
351 if (src_len) {
352 t = min(*dst2_len, src_len);
353 memcpy(dst2, src, t);
354 *dst2_len = t;
355 }
356 return 0;
357
358 case CEPH_CRYPTO_AES:
359 return ceph_aes_decrypt2(secret->key, secret->len,
360 dst1, dst1_len, dst2, dst2_len,
361 src, src_len);
362
363 default:
364 return -EINVAL;
365 }
366}
367
368int ceph_encrypt(struct ceph_crypto_key *secret, void *dst, size_t *dst_len,
369 const void *src, size_t src_len)
370{
371 switch (secret->type) {
372 case CEPH_CRYPTO_NONE:
373 if (*dst_len < src_len)
374 return -ERANGE;
375 memcpy(dst, src, src_len);
376 *dst_len = src_len;
377 return 0;
378
379 case CEPH_CRYPTO_AES:
380 return ceph_aes_encrypt(secret->key, secret->len, dst,
381 dst_len, src, src_len);
382
383 default:
384 return -EINVAL;
385 }
386}
387
388int ceph_encrypt2(struct ceph_crypto_key *secret, void *dst, size_t *dst_len,
389 const void *src1, size_t src1_len,
390 const void *src2, size_t src2_len)
391{
392 switch (secret->type) {
393 case CEPH_CRYPTO_NONE:
394 if (*dst_len < src1_len + src2_len)
395 return -ERANGE;
396 memcpy(dst, src1, src1_len);
397 memcpy(dst + src1_len, src2, src2_len);
398 *dst_len = src1_len + src2_len;
399 return 0;
400
401 case CEPH_CRYPTO_AES:
402 return ceph_aes_encrypt2(secret->key, secret->len, dst, dst_len,
403 src1, src1_len, src2, src2_len);
404
405 default:
406 return -EINVAL;
407 }
408}
diff --git a/fs/ceph/crypto.h b/fs/ceph/crypto.h
new file mode 100644
index 000000000000..40b502e6bd89
--- /dev/null
+++ b/fs/ceph/crypto.h
@@ -0,0 +1,48 @@
1#ifndef _FS_CEPH_CRYPTO_H
2#define _FS_CEPH_CRYPTO_H
3
4#include "types.h"
5#include "buffer.h"
6
7/*
8 * cryptographic secret
9 */
10struct ceph_crypto_key {
11 int type;
12 struct ceph_timespec created;
13 int len;
14 void *key;
15};
16
17static inline void ceph_crypto_key_destroy(struct ceph_crypto_key *key)
18{
19 kfree(key->key);
20}
21
22extern int ceph_crypto_key_encode(struct ceph_crypto_key *key,
23 void **p, void *end);
24extern int ceph_crypto_key_decode(struct ceph_crypto_key *key,
25 void **p, void *end);
26extern int ceph_crypto_key_unarmor(struct ceph_crypto_key *key, const char *in);
27
28/* crypto.c */
29extern int ceph_decrypt(struct ceph_crypto_key *secret,
30 void *dst, size_t *dst_len,
31 const void *src, size_t src_len);
32extern int ceph_encrypt(struct ceph_crypto_key *secret,
33 void *dst, size_t *dst_len,
34 const void *src, size_t src_len);
35extern int ceph_decrypt2(struct ceph_crypto_key *secret,
36 void *dst1, size_t *dst1_len,
37 void *dst2, size_t *dst2_len,
38 const void *src, size_t src_len);
39extern int ceph_encrypt2(struct ceph_crypto_key *secret,
40 void *dst, size_t *dst_len,
41 const void *src1, size_t src1_len,
42 const void *src2, size_t src2_len);
43
44/* armor.c */
45extern int ceph_armor(char *dst, const void *src, const void *end);
46extern int ceph_unarmor(void *dst, const char *src, const char *end);
47
48#endif
diff --git a/fs/ceph/debugfs.c b/fs/ceph/debugfs.c
new file mode 100644
index 000000000000..e159f1415110
--- /dev/null
+++ b/fs/ceph/debugfs.c
@@ -0,0 +1,483 @@
1#include "ceph_debug.h"
2
3#include <linux/device.h>
4#include <linux/module.h>
5#include <linux/ctype.h>
6#include <linux/debugfs.h>
7#include <linux/seq_file.h>
8
9#include "super.h"
10#include "mds_client.h"
11#include "mon_client.h"
12#include "auth.h"
13
14#ifdef CONFIG_DEBUG_FS
15
16/*
17 * Implement /sys/kernel/debug/ceph fun
18 *
19 * /sys/kernel/debug/ceph/client* - an instance of the ceph client
20 * .../osdmap - current osdmap
21 * .../mdsmap - current mdsmap
22 * .../monmap - current monmap
23 * .../osdc - active osd requests
24 * .../mdsc - active mds requests
25 * .../monc - mon client state
26 * .../dentry_lru - dump contents of dentry lru
27 * .../caps - expose cap (reservation) stats
28 * .../bdi - symlink to ../../bdi/something
29 */
30
31static struct dentry *ceph_debugfs_dir;
32
33static int monmap_show(struct seq_file *s, void *p)
34{
35 int i;
36 struct ceph_client *client = s->private;
37
38 if (client->monc.monmap == NULL)
39 return 0;
40
41 seq_printf(s, "epoch %d\n", client->monc.monmap->epoch);
42 for (i = 0; i < client->monc.monmap->num_mon; i++) {
43 struct ceph_entity_inst *inst =
44 &client->monc.monmap->mon_inst[i];
45
46 seq_printf(s, "\t%s%lld\t%s\n",
47 ENTITY_NAME(inst->name),
48 pr_addr(&inst->addr.in_addr));
49 }
50 return 0;
51}
52
53static int mdsmap_show(struct seq_file *s, void *p)
54{
55 int i;
56 struct ceph_client *client = s->private;
57
58 if (client->mdsc.mdsmap == NULL)
59 return 0;
60 seq_printf(s, "epoch %d\n", client->mdsc.mdsmap->m_epoch);
61 seq_printf(s, "root %d\n", client->mdsc.mdsmap->m_root);
62 seq_printf(s, "session_timeout %d\n",
63 client->mdsc.mdsmap->m_session_timeout);
64 seq_printf(s, "session_autoclose %d\n",
65 client->mdsc.mdsmap->m_session_autoclose);
66 for (i = 0; i < client->mdsc.mdsmap->m_max_mds; i++) {
67 struct ceph_entity_addr *addr =
68 &client->mdsc.mdsmap->m_info[i].addr;
69 int state = client->mdsc.mdsmap->m_info[i].state;
70
71 seq_printf(s, "\tmds%d\t%s\t(%s)\n", i, pr_addr(&addr->in_addr),
72 ceph_mds_state_name(state));
73 }
74 return 0;
75}
76
77static int osdmap_show(struct seq_file *s, void *p)
78{
79 int i;
80 struct ceph_client *client = s->private;
81 struct rb_node *n;
82
83 if (client->osdc.osdmap == NULL)
84 return 0;
85 seq_printf(s, "epoch %d\n", client->osdc.osdmap->epoch);
86 seq_printf(s, "flags%s%s\n",
87 (client->osdc.osdmap->flags & CEPH_OSDMAP_NEARFULL) ?
88 " NEARFULL" : "",
89 (client->osdc.osdmap->flags & CEPH_OSDMAP_FULL) ?
90 " FULL" : "");
91 for (n = rb_first(&client->osdc.osdmap->pg_pools); n; n = rb_next(n)) {
92 struct ceph_pg_pool_info *pool =
93 rb_entry(n, struct ceph_pg_pool_info, node);
94 seq_printf(s, "pg_pool %d pg_num %d / %d, lpg_num %d / %d\n",
95 pool->id, pool->v.pg_num, pool->pg_num_mask,
96 pool->v.lpg_num, pool->lpg_num_mask);
97 }
98 for (i = 0; i < client->osdc.osdmap->max_osd; i++) {
99 struct ceph_entity_addr *addr =
100 &client->osdc.osdmap->osd_addr[i];
101 int state = client->osdc.osdmap->osd_state[i];
102 char sb[64];
103
104 seq_printf(s, "\tosd%d\t%s\t%3d%%\t(%s)\n",
105 i, pr_addr(&addr->in_addr),
106 ((client->osdc.osdmap->osd_weight[i]*100) >> 16),
107 ceph_osdmap_state_str(sb, sizeof(sb), state));
108 }
109 return 0;
110}
111
112static int monc_show(struct seq_file *s, void *p)
113{
114 struct ceph_client *client = s->private;
115 struct ceph_mon_statfs_request *req;
116 struct ceph_mon_client *monc = &client->monc;
117 struct rb_node *rp;
118
119 mutex_lock(&monc->mutex);
120
121 if (monc->have_mdsmap)
122 seq_printf(s, "have mdsmap %u\n", (unsigned)monc->have_mdsmap);
123 if (monc->have_osdmap)
124 seq_printf(s, "have osdmap %u\n", (unsigned)monc->have_osdmap);
125 if (monc->want_next_osdmap)
126 seq_printf(s, "want next osdmap\n");
127
128 for (rp = rb_first(&monc->statfs_request_tree); rp; rp = rb_next(rp)) {
129 req = rb_entry(rp, struct ceph_mon_statfs_request, node);
130 seq_printf(s, "%lld statfs\n", req->tid);
131 }
132
133 mutex_unlock(&monc->mutex);
134 return 0;
135}
136
137static int mdsc_show(struct seq_file *s, void *p)
138{
139 struct ceph_client *client = s->private;
140 struct ceph_mds_client *mdsc = &client->mdsc;
141 struct ceph_mds_request *req;
142 struct rb_node *rp;
143 int pathlen;
144 u64 pathbase;
145 char *path;
146
147 mutex_lock(&mdsc->mutex);
148 for (rp = rb_first(&mdsc->request_tree); rp; rp = rb_next(rp)) {
149 req = rb_entry(rp, struct ceph_mds_request, r_node);
150
151 if (req->r_request)
152 seq_printf(s, "%lld\tmds%d\t", req->r_tid, req->r_mds);
153 else
154 seq_printf(s, "%lld\t(no request)\t", req->r_tid);
155
156 seq_printf(s, "%s", ceph_mds_op_name(req->r_op));
157
158 if (req->r_got_unsafe)
159 seq_printf(s, "\t(unsafe)");
160 else
161 seq_printf(s, "\t");
162
163 if (req->r_inode) {
164 seq_printf(s, " #%llx", ceph_ino(req->r_inode));
165 } else if (req->r_dentry) {
166 path = ceph_mdsc_build_path(req->r_dentry, &pathlen,
167 &pathbase, 0);
168 spin_lock(&req->r_dentry->d_lock);
169 seq_printf(s, " #%llx/%.*s (%s)",
170 ceph_ino(req->r_dentry->d_parent->d_inode),
171 req->r_dentry->d_name.len,
172 req->r_dentry->d_name.name,
173 path ? path : "");
174 spin_unlock(&req->r_dentry->d_lock);
175 kfree(path);
176 } else if (req->r_path1) {
177 seq_printf(s, " #%llx/%s", req->r_ino1.ino,
178 req->r_path1);
179 }
180
181 if (req->r_old_dentry) {
182 path = ceph_mdsc_build_path(req->r_old_dentry, &pathlen,
183 &pathbase, 0);
184 spin_lock(&req->r_old_dentry->d_lock);
185 seq_printf(s, " #%llx/%.*s (%s)",
186 ceph_ino(req->r_old_dentry->d_parent->d_inode),
187 req->r_old_dentry->d_name.len,
188 req->r_old_dentry->d_name.name,
189 path ? path : "");
190 spin_unlock(&req->r_old_dentry->d_lock);
191 kfree(path);
192 } else if (req->r_path2) {
193 if (req->r_ino2.ino)
194 seq_printf(s, " #%llx/%s", req->r_ino2.ino,
195 req->r_path2);
196 else
197 seq_printf(s, " %s", req->r_path2);
198 }
199
200 seq_printf(s, "\n");
201 }
202 mutex_unlock(&mdsc->mutex);
203
204 return 0;
205}
206
207static int osdc_show(struct seq_file *s, void *pp)
208{
209 struct ceph_client *client = s->private;
210 struct ceph_osd_client *osdc = &client->osdc;
211 struct rb_node *p;
212
213 mutex_lock(&osdc->request_mutex);
214 for (p = rb_first(&osdc->requests); p; p = rb_next(p)) {
215 struct ceph_osd_request *req;
216 struct ceph_osd_request_head *head;
217 struct ceph_osd_op *op;
218 int num_ops;
219 int opcode, olen;
220 int i;
221
222 req = rb_entry(p, struct ceph_osd_request, r_node);
223
224 seq_printf(s, "%lld\tosd%d\t%d.%x\t", req->r_tid,
225 req->r_osd ? req->r_osd->o_osd : -1,
226 le32_to_cpu(req->r_pgid.pool),
227 le16_to_cpu(req->r_pgid.ps));
228
229 head = req->r_request->front.iov_base;
230 op = (void *)(head + 1);
231
232 num_ops = le16_to_cpu(head->num_ops);
233 olen = le32_to_cpu(head->object_len);
234 seq_printf(s, "%.*s", olen,
235 (const char *)(head->ops + num_ops));
236
237 if (req->r_reassert_version.epoch)
238 seq_printf(s, "\t%u'%llu",
239 (unsigned)le32_to_cpu(req->r_reassert_version.epoch),
240 le64_to_cpu(req->r_reassert_version.version));
241 else
242 seq_printf(s, "\t");
243
244 for (i = 0; i < num_ops; i++) {
245 opcode = le16_to_cpu(op->op);
246 seq_printf(s, "\t%s", ceph_osd_op_name(opcode));
247 op++;
248 }
249
250 seq_printf(s, "\n");
251 }
252 mutex_unlock(&osdc->request_mutex);
253 return 0;
254}
255
256static int caps_show(struct seq_file *s, void *p)
257{
258 struct ceph_client *client = p;
259 int total, avail, used, reserved, min;
260
261 ceph_reservation_status(client, &total, &avail, &used, &reserved, &min);
262 seq_printf(s, "total\t\t%d\n"
263 "avail\t\t%d\n"
264 "used\t\t%d\n"
265 "reserved\t%d\n"
266 "min\t%d\n",
267 total, avail, used, reserved, min);
268 return 0;
269}
270
271static int dentry_lru_show(struct seq_file *s, void *ptr)
272{
273 struct ceph_client *client = s->private;
274 struct ceph_mds_client *mdsc = &client->mdsc;
275 struct ceph_dentry_info *di;
276
277 spin_lock(&mdsc->dentry_lru_lock);
278 list_for_each_entry(di, &mdsc->dentry_lru, lru) {
279 struct dentry *dentry = di->dentry;
280 seq_printf(s, "%p %p\t%.*s\n",
281 di, dentry, dentry->d_name.len, dentry->d_name.name);
282 }
283 spin_unlock(&mdsc->dentry_lru_lock);
284
285 return 0;
286}
287
288#define DEFINE_SHOW_FUNC(name) \
289static int name##_open(struct inode *inode, struct file *file) \
290{ \
291 struct seq_file *sf; \
292 int ret; \
293 \
294 ret = single_open(file, name, NULL); \
295 sf = file->private_data; \
296 sf->private = inode->i_private; \
297 return ret; \
298} \
299 \
300static const struct file_operations name##_fops = { \
301 .open = name##_open, \
302 .read = seq_read, \
303 .llseek = seq_lseek, \
304 .release = single_release, \
305};
306
307DEFINE_SHOW_FUNC(monmap_show)
308DEFINE_SHOW_FUNC(mdsmap_show)
309DEFINE_SHOW_FUNC(osdmap_show)
310DEFINE_SHOW_FUNC(monc_show)
311DEFINE_SHOW_FUNC(mdsc_show)
312DEFINE_SHOW_FUNC(osdc_show)
313DEFINE_SHOW_FUNC(dentry_lru_show)
314DEFINE_SHOW_FUNC(caps_show)
315
316static int congestion_kb_set(void *data, u64 val)
317{
318 struct ceph_client *client = (struct ceph_client *)data;
319
320 if (client)
321 client->mount_args->congestion_kb = (int)val;
322
323 return 0;
324}
325
326static int congestion_kb_get(void *data, u64 *val)
327{
328 struct ceph_client *client = (struct ceph_client *)data;
329
330 if (client)
331 *val = (u64)client->mount_args->congestion_kb;
332
333 return 0;
334}
335
336
337DEFINE_SIMPLE_ATTRIBUTE(congestion_kb_fops, congestion_kb_get,
338 congestion_kb_set, "%llu\n");
339
340int __init ceph_debugfs_init(void)
341{
342 ceph_debugfs_dir = debugfs_create_dir("ceph", NULL);
343 if (!ceph_debugfs_dir)
344 return -ENOMEM;
345 return 0;
346}
347
348void ceph_debugfs_cleanup(void)
349{
350 debugfs_remove(ceph_debugfs_dir);
351}
352
353int ceph_debugfs_client_init(struct ceph_client *client)
354{
355 int ret = 0;
356 char name[80];
357
358 snprintf(name, sizeof(name), FSID_FORMAT ".client%lld",
359 PR_FSID(&client->fsid), client->monc.auth->global_id);
360
361 client->debugfs_dir = debugfs_create_dir(name, ceph_debugfs_dir);
362 if (!client->debugfs_dir)
363 goto out;
364
365 client->monc.debugfs_file = debugfs_create_file("monc",
366 0600,
367 client->debugfs_dir,
368 client,
369 &monc_show_fops);
370 if (!client->monc.debugfs_file)
371 goto out;
372
373 client->mdsc.debugfs_file = debugfs_create_file("mdsc",
374 0600,
375 client->debugfs_dir,
376 client,
377 &mdsc_show_fops);
378 if (!client->mdsc.debugfs_file)
379 goto out;
380
381 client->osdc.debugfs_file = debugfs_create_file("osdc",
382 0600,
383 client->debugfs_dir,
384 client,
385 &osdc_show_fops);
386 if (!client->osdc.debugfs_file)
387 goto out;
388
389 client->debugfs_monmap = debugfs_create_file("monmap",
390 0600,
391 client->debugfs_dir,
392 client,
393 &monmap_show_fops);
394 if (!client->debugfs_monmap)
395 goto out;
396
397 client->debugfs_mdsmap = debugfs_create_file("mdsmap",
398 0600,
399 client->debugfs_dir,
400 client,
401 &mdsmap_show_fops);
402 if (!client->debugfs_mdsmap)
403 goto out;
404
405 client->debugfs_osdmap = debugfs_create_file("osdmap",
406 0600,
407 client->debugfs_dir,
408 client,
409 &osdmap_show_fops);
410 if (!client->debugfs_osdmap)
411 goto out;
412
413 client->debugfs_dentry_lru = debugfs_create_file("dentry_lru",
414 0600,
415 client->debugfs_dir,
416 client,
417 &dentry_lru_show_fops);
418 if (!client->debugfs_dentry_lru)
419 goto out;
420
421 client->debugfs_caps = debugfs_create_file("caps",
422 0400,
423 client->debugfs_dir,
424 client,
425 &caps_show_fops);
426 if (!client->debugfs_caps)
427 goto out;
428
429 client->debugfs_congestion_kb = debugfs_create_file("writeback_congestion_kb",
430 0600,
431 client->debugfs_dir,
432 client,
433 &congestion_kb_fops);
434 if (!client->debugfs_congestion_kb)
435 goto out;
436
437 sprintf(name, "../../bdi/%s", dev_name(client->sb->s_bdi->dev));
438 client->debugfs_bdi = debugfs_create_symlink("bdi", client->debugfs_dir,
439 name);
440
441 return 0;
442
443out:
444 ceph_debugfs_client_cleanup(client);
445 return ret;
446}
447
448void ceph_debugfs_client_cleanup(struct ceph_client *client)
449{
450 debugfs_remove(client->debugfs_bdi);
451 debugfs_remove(client->debugfs_caps);
452 debugfs_remove(client->debugfs_dentry_lru);
453 debugfs_remove(client->debugfs_osdmap);
454 debugfs_remove(client->debugfs_mdsmap);
455 debugfs_remove(client->debugfs_monmap);
456 debugfs_remove(client->osdc.debugfs_file);
457 debugfs_remove(client->mdsc.debugfs_file);
458 debugfs_remove(client->monc.debugfs_file);
459 debugfs_remove(client->debugfs_congestion_kb);
460 debugfs_remove(client->debugfs_dir);
461}
462
463#else // CONFIG_DEBUG_FS
464
465int __init ceph_debugfs_init(void)
466{
467 return 0;
468}
469
470void ceph_debugfs_cleanup(void)
471{
472}
473
474int ceph_debugfs_client_init(struct ceph_client *client)
475{
476 return 0;
477}
478
479void ceph_debugfs_client_cleanup(struct ceph_client *client)
480{
481}
482
483#endif // CONFIG_DEBUG_FS
diff --git a/fs/ceph/decode.h b/fs/ceph/decode.h
new file mode 100644
index 000000000000..65b3e022eaf5
--- /dev/null
+++ b/fs/ceph/decode.h
@@ -0,0 +1,194 @@
1#ifndef __CEPH_DECODE_H
2#define __CEPH_DECODE_H
3
4#include <asm/unaligned.h>
5#include <linux/time.h>
6
7#include "types.h"
8
9/*
10 * in all cases,
11 * void **p pointer to position pointer
12 * void *end pointer to end of buffer (last byte + 1)
13 */
14
15static inline u64 ceph_decode_64(void **p)
16{
17 u64 v = get_unaligned_le64(*p);
18 *p += sizeof(u64);
19 return v;
20}
21static inline u32 ceph_decode_32(void **p)
22{
23 u32 v = get_unaligned_le32(*p);
24 *p += sizeof(u32);
25 return v;
26}
27static inline u16 ceph_decode_16(void **p)
28{
29 u16 v = get_unaligned_le16(*p);
30 *p += sizeof(u16);
31 return v;
32}
33static inline u8 ceph_decode_8(void **p)
34{
35 u8 v = *(u8 *)*p;
36 (*p)++;
37 return v;
38}
39static inline void ceph_decode_copy(void **p, void *pv, size_t n)
40{
41 memcpy(pv, *p, n);
42 *p += n;
43}
44
45/*
46 * bounds check input.
47 */
48#define ceph_decode_need(p, end, n, bad) \
49 do { \
50 if (unlikely(*(p) + (n) > (end))) \
51 goto bad; \
52 } while (0)
53
54#define ceph_decode_64_safe(p, end, v, bad) \
55 do { \
56 ceph_decode_need(p, end, sizeof(u64), bad); \
57 v = ceph_decode_64(p); \
58 } while (0)
59#define ceph_decode_32_safe(p, end, v, bad) \
60 do { \
61 ceph_decode_need(p, end, sizeof(u32), bad); \
62 v = ceph_decode_32(p); \
63 } while (0)
64#define ceph_decode_16_safe(p, end, v, bad) \
65 do { \
66 ceph_decode_need(p, end, sizeof(u16), bad); \
67 v = ceph_decode_16(p); \
68 } while (0)
69#define ceph_decode_8_safe(p, end, v, bad) \
70 do { \
71 ceph_decode_need(p, end, sizeof(u8), bad); \
72 v = ceph_decode_8(p); \
73 } while (0)
74
75#define ceph_decode_copy_safe(p, end, pv, n, bad) \
76 do { \
77 ceph_decode_need(p, end, n, bad); \
78 ceph_decode_copy(p, pv, n); \
79 } while (0)
80
81/*
82 * struct ceph_timespec <-> struct timespec
83 */
84static inline void ceph_decode_timespec(struct timespec *ts,
85 const struct ceph_timespec *tv)
86{
87 ts->tv_sec = le32_to_cpu(tv->tv_sec);
88 ts->tv_nsec = le32_to_cpu(tv->tv_nsec);
89}
90static inline void ceph_encode_timespec(struct ceph_timespec *tv,
91 const struct timespec *ts)
92{
93 tv->tv_sec = cpu_to_le32(ts->tv_sec);
94 tv->tv_nsec = cpu_to_le32(ts->tv_nsec);
95}
96
97/*
98 * sockaddr_storage <-> ceph_sockaddr
99 */
100static inline void ceph_encode_addr(struct ceph_entity_addr *a)
101{
102 a->in_addr.ss_family = htons(a->in_addr.ss_family);
103}
104static inline void ceph_decode_addr(struct ceph_entity_addr *a)
105{
106 a->in_addr.ss_family = ntohs(a->in_addr.ss_family);
107 WARN_ON(a->in_addr.ss_family == 512);
108}
109
110/*
111 * encoders
112 */
113static inline void ceph_encode_64(void **p, u64 v)
114{
115 put_unaligned_le64(v, (__le64 *)*p);
116 *p += sizeof(u64);
117}
118static inline void ceph_encode_32(void **p, u32 v)
119{
120 put_unaligned_le32(v, (__le32 *)*p);
121 *p += sizeof(u32);
122}
123static inline void ceph_encode_16(void **p, u16 v)
124{
125 put_unaligned_le16(v, (__le16 *)*p);
126 *p += sizeof(u16);
127}
128static inline void ceph_encode_8(void **p, u8 v)
129{
130 *(u8 *)*p = v;
131 (*p)++;
132}
133static inline void ceph_encode_copy(void **p, const void *s, int len)
134{
135 memcpy(*p, s, len);
136 *p += len;
137}
138
139/*
140 * filepath, string encoders
141 */
142static inline void ceph_encode_filepath(void **p, void *end,
143 u64 ino, const char *path)
144{
145 u32 len = path ? strlen(path) : 0;
146 BUG_ON(*p + sizeof(ino) + sizeof(len) + len > end);
147 ceph_encode_8(p, 1);
148 ceph_encode_64(p, ino);
149 ceph_encode_32(p, len);
150 if (len)
151 memcpy(*p, path, len);
152 *p += len;
153}
154
155static inline void ceph_encode_string(void **p, void *end,
156 const char *s, u32 len)
157{
158 BUG_ON(*p + sizeof(len) + len > end);
159 ceph_encode_32(p, len);
160 if (len)
161 memcpy(*p, s, len);
162 *p += len;
163}
164
165#define ceph_encode_need(p, end, n, bad) \
166 do { \
167 if (unlikely(*(p) + (n) > (end))) \
168 goto bad; \
169 } while (0)
170
171#define ceph_encode_64_safe(p, end, v, bad) \
172 do { \
173 ceph_encode_need(p, end, sizeof(u64), bad); \
174 ceph_encode_64(p, v); \
175 } while (0)
176#define ceph_encode_32_safe(p, end, v, bad) \
177 do { \
178 ceph_encode_need(p, end, sizeof(u32), bad); \
179 ceph_encode_32(p, v); \
180 } while (0)
181#define ceph_encode_16_safe(p, end, v, bad) \
182 do { \
183 ceph_encode_need(p, end, sizeof(u16), bad); \
184 ceph_encode_16(p, v); \
185 } while (0)
186
187#define ceph_encode_copy_safe(p, end, pv, n, bad) \
188 do { \
189 ceph_encode_need(p, end, n, bad); \
190 ceph_encode_copy(p, pv, n); \
191 } while (0)
192
193
194#endif
diff --git a/fs/ceph/dir.c b/fs/ceph/dir.c
new file mode 100644
index 000000000000..5107384ee029
--- /dev/null
+++ b/fs/ceph/dir.c
@@ -0,0 +1,1220 @@
1#include "ceph_debug.h"
2
3#include <linux/spinlock.h>
4#include <linux/fs_struct.h>
5#include <linux/namei.h>
6#include <linux/sched.h>
7
8#include "super.h"
9
10/*
11 * Directory operations: readdir, lookup, create, link, unlink,
12 * rename, etc.
13 */
14
15/*
16 * Ceph MDS operations are specified in terms of a base ino and
17 * relative path. Thus, the client can specify an operation on a
18 * specific inode (e.g., a getattr due to fstat(2)), or as a path
19 * relative to, say, the root directory.
20 *
21 * Normally, we limit ourselves to strict inode ops (no path component)
22 * or dentry operations (a single path component relative to an ino). The
23 * exception to this is open_root_dentry(), which will open the mount
24 * point by name.
25 */
26
27const struct inode_operations ceph_dir_iops;
28const struct file_operations ceph_dir_fops;
29struct dentry_operations ceph_dentry_ops;
30
31/*
32 * Initialize ceph dentry state.
33 */
34int ceph_init_dentry(struct dentry *dentry)
35{
36 struct ceph_dentry_info *di;
37
38 if (dentry->d_fsdata)
39 return 0;
40
41 if (ceph_snap(dentry->d_parent->d_inode) == CEPH_NOSNAP)
42 dentry->d_op = &ceph_dentry_ops;
43 else if (ceph_snap(dentry->d_parent->d_inode) == CEPH_SNAPDIR)
44 dentry->d_op = &ceph_snapdir_dentry_ops;
45 else
46 dentry->d_op = &ceph_snap_dentry_ops;
47
48 di = kmem_cache_alloc(ceph_dentry_cachep, GFP_NOFS);
49 if (!di)
50 return -ENOMEM; /* oh well */
51
52 spin_lock(&dentry->d_lock);
53 if (dentry->d_fsdata) /* lost a race */
54 goto out_unlock;
55 di->dentry = dentry;
56 di->lease_session = NULL;
57 dentry->d_fsdata = di;
58 dentry->d_time = jiffies;
59 ceph_dentry_lru_add(dentry);
60out_unlock:
61 spin_unlock(&dentry->d_lock);
62 return 0;
63}
64
65
66
67/*
68 * for readdir, we encode the directory frag and offset within that
69 * frag into f_pos.
70 */
71static unsigned fpos_frag(loff_t p)
72{
73 return p >> 32;
74}
75static unsigned fpos_off(loff_t p)
76{
77 return p & 0xffffffff;
78}
79
80/*
81 * When possible, we try to satisfy a readdir by peeking at the
82 * dcache. We make this work by carefully ordering dentries on
83 * d_u.d_child when we initially get results back from the MDS, and
84 * falling back to a "normal" sync readdir if any dentries in the dir
85 * are dropped.
86 *
87 * I_COMPLETE tells indicates we have all dentries in the dir. It is
88 * defined IFF we hold CEPH_CAP_FILE_SHARED (which will be revoked by
89 * the MDS if/when the directory is modified).
90 */
91static int __dcache_readdir(struct file *filp,
92 void *dirent, filldir_t filldir)
93{
94 struct inode *inode = filp->f_dentry->d_inode;
95 struct ceph_file_info *fi = filp->private_data;
96 struct dentry *parent = filp->f_dentry;
97 struct inode *dir = parent->d_inode;
98 struct list_head *p;
99 struct dentry *dentry, *last;
100 struct ceph_dentry_info *di;
101 int err = 0;
102
103 /* claim ref on last dentry we returned */
104 last = fi->dentry;
105 fi->dentry = NULL;
106
107 dout("__dcache_readdir %p at %llu (last %p)\n", dir, filp->f_pos,
108 last);
109
110 spin_lock(&dcache_lock);
111
112 /* start at beginning? */
113 if (filp->f_pos == 2 || (last &&
114 filp->f_pos < ceph_dentry(last)->offset)) {
115 if (list_empty(&parent->d_subdirs))
116 goto out_unlock;
117 p = parent->d_subdirs.prev;
118 dout(" initial p %p/%p\n", p->prev, p->next);
119 } else {
120 p = last->d_u.d_child.prev;
121 }
122
123more:
124 dentry = list_entry(p, struct dentry, d_u.d_child);
125 di = ceph_dentry(dentry);
126 while (1) {
127 dout(" p %p/%p d_subdirs %p/%p\n", p->prev, p->next,
128 parent->d_subdirs.prev, parent->d_subdirs.next);
129 if (p == &parent->d_subdirs) {
130 fi->at_end = 1;
131 goto out_unlock;
132 }
133 if (!d_unhashed(dentry) && dentry->d_inode &&
134 ceph_snap(dentry->d_inode) != CEPH_SNAPDIR &&
135 ceph_ino(dentry->d_inode) != CEPH_INO_CEPH &&
136 filp->f_pos <= di->offset)
137 break;
138 dout(" skipping %p %.*s at %llu (%llu)%s%s\n", dentry,
139 dentry->d_name.len, dentry->d_name.name, di->offset,
140 filp->f_pos, d_unhashed(dentry) ? " unhashed" : "",
141 !dentry->d_inode ? " null" : "");
142 p = p->prev;
143 dentry = list_entry(p, struct dentry, d_u.d_child);
144 di = ceph_dentry(dentry);
145 }
146
147 atomic_inc(&dentry->d_count);
148 spin_unlock(&dcache_lock);
149 spin_unlock(&inode->i_lock);
150
151 dout(" %llu (%llu) dentry %p %.*s %p\n", di->offset, filp->f_pos,
152 dentry, dentry->d_name.len, dentry->d_name.name, dentry->d_inode);
153 filp->f_pos = di->offset;
154 err = filldir(dirent, dentry->d_name.name,
155 dentry->d_name.len, di->offset,
156 dentry->d_inode->i_ino,
157 dentry->d_inode->i_mode >> 12);
158
159 if (last) {
160 if (err < 0) {
161 /* remember our position */
162 fi->dentry = last;
163 fi->next_offset = di->offset;
164 } else {
165 dput(last);
166 }
167 last = NULL;
168 }
169
170 spin_lock(&inode->i_lock);
171 spin_lock(&dcache_lock);
172
173 if (err < 0)
174 goto out_unlock;
175
176 last = dentry;
177
178 p = p->prev;
179 filp->f_pos++;
180
181 /* make sure a dentry wasn't dropped while we didn't have dcache_lock */
182 if ((ceph_inode(dir)->i_ceph_flags & CEPH_I_COMPLETE))
183 goto more;
184 dout(" lost I_COMPLETE on %p; falling back to mds\n", dir);
185 err = -EAGAIN;
186
187out_unlock:
188 spin_unlock(&dcache_lock);
189
190 if (last) {
191 spin_unlock(&inode->i_lock);
192 dput(last);
193 spin_lock(&inode->i_lock);
194 }
195
196 return err;
197}
198
199/*
200 * make note of the last dentry we read, so we can
201 * continue at the same lexicographical point,
202 * regardless of what dir changes take place on the
203 * server.
204 */
205static int note_last_dentry(struct ceph_file_info *fi, const char *name,
206 int len)
207{
208 kfree(fi->last_name);
209 fi->last_name = kmalloc(len+1, GFP_NOFS);
210 if (!fi->last_name)
211 return -ENOMEM;
212 memcpy(fi->last_name, name, len);
213 fi->last_name[len] = 0;
214 dout("note_last_dentry '%s'\n", fi->last_name);
215 return 0;
216}
217
218static int ceph_readdir(struct file *filp, void *dirent, filldir_t filldir)
219{
220 struct ceph_file_info *fi = filp->private_data;
221 struct inode *inode = filp->f_dentry->d_inode;
222 struct ceph_inode_info *ci = ceph_inode(inode);
223 struct ceph_client *client = ceph_inode_to_client(inode);
224 struct ceph_mds_client *mdsc = &client->mdsc;
225 unsigned frag = fpos_frag(filp->f_pos);
226 int off = fpos_off(filp->f_pos);
227 int err;
228 u32 ftype;
229 struct ceph_mds_reply_info_parsed *rinfo;
230 const int max_entries = client->mount_args->max_readdir;
231
232 dout("readdir %p filp %p frag %u off %u\n", inode, filp, frag, off);
233 if (fi->at_end)
234 return 0;
235
236 /* always start with . and .. */
237 if (filp->f_pos == 0) {
238 /* note dir version at start of readdir so we can tell
239 * if any dentries get dropped */
240 fi->dir_release_count = ci->i_release_count;
241
242 dout("readdir off 0 -> '.'\n");
243 if (filldir(dirent, ".", 1, ceph_make_fpos(0, 0),
244 inode->i_ino, inode->i_mode >> 12) < 0)
245 return 0;
246 filp->f_pos = 1;
247 off = 1;
248 }
249 if (filp->f_pos == 1) {
250 dout("readdir off 1 -> '..'\n");
251 if (filldir(dirent, "..", 2, ceph_make_fpos(0, 1),
252 filp->f_dentry->d_parent->d_inode->i_ino,
253 inode->i_mode >> 12) < 0)
254 return 0;
255 filp->f_pos = 2;
256 off = 2;
257 }
258
259 /* can we use the dcache? */
260 spin_lock(&inode->i_lock);
261 if ((filp->f_pos == 2 || fi->dentry) &&
262 !ceph_test_opt(client, NOASYNCREADDIR) &&
263 (ci->i_ceph_flags & CEPH_I_COMPLETE) &&
264 __ceph_caps_issued_mask(ci, CEPH_CAP_FILE_SHARED, 1)) {
265 err = __dcache_readdir(filp, dirent, filldir);
266 if (err != -EAGAIN) {
267 spin_unlock(&inode->i_lock);
268 return err;
269 }
270 }
271 spin_unlock(&inode->i_lock);
272 if (fi->dentry) {
273 err = note_last_dentry(fi, fi->dentry->d_name.name,
274 fi->dentry->d_name.len);
275 if (err)
276 return err;
277 dput(fi->dentry);
278 fi->dentry = NULL;
279 }
280
281 /* proceed with a normal readdir */
282
283more:
284 /* do we have the correct frag content buffered? */
285 if (fi->frag != frag || fi->last_readdir == NULL) {
286 struct ceph_mds_request *req;
287 int op = ceph_snap(inode) == CEPH_SNAPDIR ?
288 CEPH_MDS_OP_LSSNAP : CEPH_MDS_OP_READDIR;
289
290 /* discard old result, if any */
291 if (fi->last_readdir)
292 ceph_mdsc_put_request(fi->last_readdir);
293
294 /* requery frag tree, as the frag topology may have changed */
295 frag = ceph_choose_frag(ceph_inode(inode), frag, NULL, NULL);
296
297 dout("readdir fetching %llx.%llx frag %x offset '%s'\n",
298 ceph_vinop(inode), frag, fi->last_name);
299 req = ceph_mdsc_create_request(mdsc, op, USE_AUTH_MDS);
300 if (IS_ERR(req))
301 return PTR_ERR(req);
302 req->r_inode = igrab(inode);
303 req->r_dentry = dget(filp->f_dentry);
304 /* hints to request -> mds selection code */
305 req->r_direct_mode = USE_AUTH_MDS;
306 req->r_direct_hash = ceph_frag_value(frag);
307 req->r_direct_is_hash = true;
308 req->r_path2 = kstrdup(fi->last_name, GFP_NOFS);
309 req->r_readdir_offset = fi->next_offset;
310 req->r_args.readdir.frag = cpu_to_le32(frag);
311 req->r_args.readdir.max_entries = cpu_to_le32(max_entries);
312 req->r_num_caps = max_entries;
313 err = ceph_mdsc_do_request(mdsc, NULL, req);
314 if (err < 0) {
315 ceph_mdsc_put_request(req);
316 return err;
317 }
318 dout("readdir got and parsed readdir result=%d"
319 " on frag %x, end=%d, complete=%d\n", err, frag,
320 (int)req->r_reply_info.dir_end,
321 (int)req->r_reply_info.dir_complete);
322
323 if (!req->r_did_prepopulate) {
324 dout("readdir !did_prepopulate");
325 fi->dir_release_count--; /* preclude I_COMPLETE */
326 }
327
328 /* note next offset and last dentry name */
329 fi->offset = fi->next_offset;
330 fi->last_readdir = req;
331
332 if (req->r_reply_info.dir_end) {
333 kfree(fi->last_name);
334 fi->last_name = NULL;
335 fi->next_offset = 0;
336 } else {
337 rinfo = &req->r_reply_info;
338 err = note_last_dentry(fi,
339 rinfo->dir_dname[rinfo->dir_nr-1],
340 rinfo->dir_dname_len[rinfo->dir_nr-1]);
341 if (err)
342 return err;
343 fi->next_offset += rinfo->dir_nr;
344 }
345 }
346
347 rinfo = &fi->last_readdir->r_reply_info;
348 dout("readdir frag %x num %d off %d chunkoff %d\n", frag,
349 rinfo->dir_nr, off, fi->offset);
350 while (off - fi->offset >= 0 && off - fi->offset < rinfo->dir_nr) {
351 u64 pos = ceph_make_fpos(frag, off);
352 struct ceph_mds_reply_inode *in =
353 rinfo->dir_in[off - fi->offset].in;
354 dout("readdir off %d (%d/%d) -> %lld '%.*s' %p\n",
355 off, off - fi->offset, rinfo->dir_nr, pos,
356 rinfo->dir_dname_len[off - fi->offset],
357 rinfo->dir_dname[off - fi->offset], in);
358 BUG_ON(!in);
359 ftype = le32_to_cpu(in->mode) >> 12;
360 if (filldir(dirent,
361 rinfo->dir_dname[off - fi->offset],
362 rinfo->dir_dname_len[off - fi->offset],
363 pos,
364 le64_to_cpu(in->ino),
365 ftype) < 0) {
366 dout("filldir stopping us...\n");
367 return 0;
368 }
369 off++;
370 filp->f_pos = pos + 1;
371 }
372
373 if (fi->last_name) {
374 ceph_mdsc_put_request(fi->last_readdir);
375 fi->last_readdir = NULL;
376 goto more;
377 }
378
379 /* more frags? */
380 if (!ceph_frag_is_rightmost(frag)) {
381 frag = ceph_frag_next(frag);
382 off = 0;
383 filp->f_pos = ceph_make_fpos(frag, off);
384 dout("readdir next frag is %x\n", frag);
385 goto more;
386 }
387 fi->at_end = 1;
388
389 /*
390 * if dir_release_count still matches the dir, no dentries
391 * were released during the whole readdir, and we should have
392 * the complete dir contents in our cache.
393 */
394 spin_lock(&inode->i_lock);
395 if (ci->i_release_count == fi->dir_release_count) {
396 dout(" marking %p complete\n", inode);
397 ci->i_ceph_flags |= CEPH_I_COMPLETE;
398 ci->i_max_offset = filp->f_pos;
399 }
400 spin_unlock(&inode->i_lock);
401
402 dout("readdir %p filp %p done.\n", inode, filp);
403 return 0;
404}
405
406static void reset_readdir(struct ceph_file_info *fi)
407{
408 if (fi->last_readdir) {
409 ceph_mdsc_put_request(fi->last_readdir);
410 fi->last_readdir = NULL;
411 }
412 kfree(fi->last_name);
413 fi->next_offset = 2; /* compensate for . and .. */
414 if (fi->dentry) {
415 dput(fi->dentry);
416 fi->dentry = NULL;
417 }
418 fi->at_end = 0;
419}
420
421static loff_t ceph_dir_llseek(struct file *file, loff_t offset, int origin)
422{
423 struct ceph_file_info *fi = file->private_data;
424 struct inode *inode = file->f_mapping->host;
425 loff_t old_offset = offset;
426 loff_t retval;
427
428 mutex_lock(&inode->i_mutex);
429 switch (origin) {
430 case SEEK_END:
431 offset += inode->i_size + 2; /* FIXME */
432 break;
433 case SEEK_CUR:
434 offset += file->f_pos;
435 }
436 retval = -EINVAL;
437 if (offset >= 0 && offset <= inode->i_sb->s_maxbytes) {
438 if (offset != file->f_pos) {
439 file->f_pos = offset;
440 file->f_version = 0;
441 fi->at_end = 0;
442 }
443 retval = offset;
444
445 /*
446 * discard buffered readdir content on seekdir(0), or
447 * seek to new frag, or seek prior to current chunk.
448 */
449 if (offset == 0 ||
450 fpos_frag(offset) != fpos_frag(old_offset) ||
451 fpos_off(offset) < fi->offset) {
452 dout("dir_llseek dropping %p content\n", file);
453 reset_readdir(fi);
454 }
455
456 /* bump dir_release_count if we did a forward seek */
457 if (offset > old_offset)
458 fi->dir_release_count--;
459 }
460 mutex_unlock(&inode->i_mutex);
461 return retval;
462}
463
464/*
465 * Process result of a lookup/open request.
466 *
467 * Mainly, make sure we return the final req->r_dentry (if it already
468 * existed) in place of the original VFS-provided dentry when they
469 * differ.
470 *
471 * Gracefully handle the case where the MDS replies with -ENOENT and
472 * no trace (which it may do, at its discretion, e.g., if it doesn't
473 * care to issue a lease on the negative dentry).
474 */
475struct dentry *ceph_finish_lookup(struct ceph_mds_request *req,
476 struct dentry *dentry, int err)
477{
478 struct ceph_client *client = ceph_client(dentry->d_sb);
479 struct inode *parent = dentry->d_parent->d_inode;
480
481 /* .snap dir? */
482 if (err == -ENOENT &&
483 ceph_vino(parent).ino != CEPH_INO_ROOT && /* no .snap in root dir */
484 strcmp(dentry->d_name.name,
485 client->mount_args->snapdir_name) == 0) {
486 struct inode *inode = ceph_get_snapdir(parent);
487 dout("ENOENT on snapdir %p '%.*s', linking to snapdir %p\n",
488 dentry, dentry->d_name.len, dentry->d_name.name, inode);
489 d_add(dentry, inode);
490 err = 0;
491 }
492
493 if (err == -ENOENT) {
494 /* no trace? */
495 err = 0;
496 if (!req->r_reply_info.head->is_dentry) {
497 dout("ENOENT and no trace, dentry %p inode %p\n",
498 dentry, dentry->d_inode);
499 if (dentry->d_inode) {
500 d_drop(dentry);
501 err = -ENOENT;
502 } else {
503 d_add(dentry, NULL);
504 }
505 }
506 }
507 if (err)
508 dentry = ERR_PTR(err);
509 else if (dentry != req->r_dentry)
510 dentry = dget(req->r_dentry); /* we got spliced */
511 else
512 dentry = NULL;
513 return dentry;
514}
515
516static int is_root_ceph_dentry(struct inode *inode, struct dentry *dentry)
517{
518 return ceph_ino(inode) == CEPH_INO_ROOT &&
519 strncmp(dentry->d_name.name, ".ceph", 5) == 0;
520}
521
522/*
523 * Look up a single dir entry. If there is a lookup intent, inform
524 * the MDS so that it gets our 'caps wanted' value in a single op.
525 */
526static struct dentry *ceph_lookup(struct inode *dir, struct dentry *dentry,
527 struct nameidata *nd)
528{
529 struct ceph_client *client = ceph_sb_to_client(dir->i_sb);
530 struct ceph_mds_client *mdsc = &client->mdsc;
531 struct ceph_mds_request *req;
532 int op;
533 int err;
534
535 dout("lookup %p dentry %p '%.*s'\n",
536 dir, dentry, dentry->d_name.len, dentry->d_name.name);
537
538 if (dentry->d_name.len > NAME_MAX)
539 return ERR_PTR(-ENAMETOOLONG);
540
541 err = ceph_init_dentry(dentry);
542 if (err < 0)
543 return ERR_PTR(err);
544
545 /* open (but not create!) intent? */
546 if (nd &&
547 (nd->flags & LOOKUP_OPEN) &&
548 (nd->flags & LOOKUP_CONTINUE) == 0 && /* only open last component */
549 !(nd->intent.open.flags & O_CREAT)) {
550 int mode = nd->intent.open.create_mode & ~current->fs->umask;
551 return ceph_lookup_open(dir, dentry, nd, mode, 1);
552 }
553
554 /* can we conclude ENOENT locally? */
555 if (dentry->d_inode == NULL) {
556 struct ceph_inode_info *ci = ceph_inode(dir);
557 struct ceph_dentry_info *di = ceph_dentry(dentry);
558
559 spin_lock(&dir->i_lock);
560 dout(" dir %p flags are %d\n", dir, ci->i_ceph_flags);
561 if (strncmp(dentry->d_name.name,
562 client->mount_args->snapdir_name,
563 dentry->d_name.len) &&
564 !is_root_ceph_dentry(dir, dentry) &&
565 (ci->i_ceph_flags & CEPH_I_COMPLETE) &&
566 (__ceph_caps_issued_mask(ci, CEPH_CAP_FILE_SHARED, 1))) {
567 di->offset = ci->i_max_offset++;
568 spin_unlock(&dir->i_lock);
569 dout(" dir %p complete, -ENOENT\n", dir);
570 d_add(dentry, NULL);
571 di->lease_shared_gen = ci->i_shared_gen;
572 return NULL;
573 }
574 spin_unlock(&dir->i_lock);
575 }
576
577 op = ceph_snap(dir) == CEPH_SNAPDIR ?
578 CEPH_MDS_OP_LOOKUPSNAP : CEPH_MDS_OP_LOOKUP;
579 req = ceph_mdsc_create_request(mdsc, op, USE_ANY_MDS);
580 if (IS_ERR(req))
581 return ERR_PTR(PTR_ERR(req));
582 req->r_dentry = dget(dentry);
583 req->r_num_caps = 2;
584 /* we only need inode linkage */
585 req->r_args.getattr.mask = cpu_to_le32(CEPH_STAT_CAP_INODE);
586 req->r_locked_dir = dir;
587 err = ceph_mdsc_do_request(mdsc, NULL, req);
588 dentry = ceph_finish_lookup(req, dentry, err);
589 ceph_mdsc_put_request(req); /* will dput(dentry) */
590 dout("lookup result=%p\n", dentry);
591 return dentry;
592}
593
594/*
595 * If we do a create but get no trace back from the MDS, follow up with
596 * a lookup (the VFS expects us to link up the provided dentry).
597 */
598int ceph_handle_notrace_create(struct inode *dir, struct dentry *dentry)
599{
600 struct dentry *result = ceph_lookup(dir, dentry, NULL);
601
602 if (result && !IS_ERR(result)) {
603 /*
604 * We created the item, then did a lookup, and found
605 * it was already linked to another inode we already
606 * had in our cache (and thus got spliced). Link our
607 * dentry to that inode, but don't hash it, just in
608 * case the VFS wants to dereference it.
609 */
610 BUG_ON(!result->d_inode);
611 d_instantiate(dentry, result->d_inode);
612 return 0;
613 }
614 return PTR_ERR(result);
615}
616
617static int ceph_mknod(struct inode *dir, struct dentry *dentry,
618 int mode, dev_t rdev)
619{
620 struct ceph_client *client = ceph_sb_to_client(dir->i_sb);
621 struct ceph_mds_client *mdsc = &client->mdsc;
622 struct ceph_mds_request *req;
623 int err;
624
625 if (ceph_snap(dir) != CEPH_NOSNAP)
626 return -EROFS;
627
628 dout("mknod in dir %p dentry %p mode 0%o rdev %d\n",
629 dir, dentry, mode, rdev);
630 req = ceph_mdsc_create_request(mdsc, CEPH_MDS_OP_MKNOD, USE_AUTH_MDS);
631 if (IS_ERR(req)) {
632 d_drop(dentry);
633 return PTR_ERR(req);
634 }
635 req->r_dentry = dget(dentry);
636 req->r_num_caps = 2;
637 req->r_locked_dir = dir;
638 req->r_args.mknod.mode = cpu_to_le32(mode);
639 req->r_args.mknod.rdev = cpu_to_le32(rdev);
640 req->r_dentry_drop = CEPH_CAP_FILE_SHARED;
641 req->r_dentry_unless = CEPH_CAP_FILE_EXCL;
642 err = ceph_mdsc_do_request(mdsc, dir, req);
643 if (!err && !req->r_reply_info.head->is_dentry)
644 err = ceph_handle_notrace_create(dir, dentry);
645 ceph_mdsc_put_request(req);
646 if (err)
647 d_drop(dentry);
648 return err;
649}
650
651static int ceph_create(struct inode *dir, struct dentry *dentry, int mode,
652 struct nameidata *nd)
653{
654 dout("create in dir %p dentry %p name '%.*s'\n",
655 dir, dentry, dentry->d_name.len, dentry->d_name.name);
656
657 if (ceph_snap(dir) != CEPH_NOSNAP)
658 return -EROFS;
659
660 if (nd) {
661 BUG_ON((nd->flags & LOOKUP_OPEN) == 0);
662 dentry = ceph_lookup_open(dir, dentry, nd, mode, 0);
663 /* hrm, what should i do here if we get aliased? */
664 if (IS_ERR(dentry))
665 return PTR_ERR(dentry);
666 return 0;
667 }
668
669 /* fall back to mknod */
670 return ceph_mknod(dir, dentry, (mode & ~S_IFMT) | S_IFREG, 0);
671}
672
673static int ceph_symlink(struct inode *dir, struct dentry *dentry,
674 const char *dest)
675{
676 struct ceph_client *client = ceph_sb_to_client(dir->i_sb);
677 struct ceph_mds_client *mdsc = &client->mdsc;
678 struct ceph_mds_request *req;
679 int err;
680
681 if (ceph_snap(dir) != CEPH_NOSNAP)
682 return -EROFS;
683
684 dout("symlink in dir %p dentry %p to '%s'\n", dir, dentry, dest);
685 req = ceph_mdsc_create_request(mdsc, CEPH_MDS_OP_SYMLINK, USE_AUTH_MDS);
686 if (IS_ERR(req)) {
687 d_drop(dentry);
688 return PTR_ERR(req);
689 }
690 req->r_dentry = dget(dentry);
691 req->r_num_caps = 2;
692 req->r_path2 = kstrdup(dest, GFP_NOFS);
693 req->r_locked_dir = dir;
694 req->r_dentry_drop = CEPH_CAP_FILE_SHARED;
695 req->r_dentry_unless = CEPH_CAP_FILE_EXCL;
696 err = ceph_mdsc_do_request(mdsc, dir, req);
697 if (!err && !req->r_reply_info.head->is_dentry)
698 err = ceph_handle_notrace_create(dir, dentry);
699 ceph_mdsc_put_request(req);
700 if (err)
701 d_drop(dentry);
702 return err;
703}
704
705static int ceph_mkdir(struct inode *dir, struct dentry *dentry, int mode)
706{
707 struct ceph_client *client = ceph_sb_to_client(dir->i_sb);
708 struct ceph_mds_client *mdsc = &client->mdsc;
709 struct ceph_mds_request *req;
710 int err = -EROFS;
711 int op;
712
713 if (ceph_snap(dir) == CEPH_SNAPDIR) {
714 /* mkdir .snap/foo is a MKSNAP */
715 op = CEPH_MDS_OP_MKSNAP;
716 dout("mksnap dir %p snap '%.*s' dn %p\n", dir,
717 dentry->d_name.len, dentry->d_name.name, dentry);
718 } else if (ceph_snap(dir) == CEPH_NOSNAP) {
719 dout("mkdir dir %p dn %p mode 0%o\n", dir, dentry, mode);
720 op = CEPH_MDS_OP_MKDIR;
721 } else {
722 goto out;
723 }
724 req = ceph_mdsc_create_request(mdsc, op, USE_AUTH_MDS);
725 if (IS_ERR(req)) {
726 err = PTR_ERR(req);
727 goto out;
728 }
729
730 req->r_dentry = dget(dentry);
731 req->r_num_caps = 2;
732 req->r_locked_dir = dir;
733 req->r_args.mkdir.mode = cpu_to_le32(mode);
734 req->r_dentry_drop = CEPH_CAP_FILE_SHARED;
735 req->r_dentry_unless = CEPH_CAP_FILE_EXCL;
736 err = ceph_mdsc_do_request(mdsc, dir, req);
737 if (!err && !req->r_reply_info.head->is_dentry)
738 err = ceph_handle_notrace_create(dir, dentry);
739 ceph_mdsc_put_request(req);
740out:
741 if (err < 0)
742 d_drop(dentry);
743 return err;
744}
745
746static int ceph_link(struct dentry *old_dentry, struct inode *dir,
747 struct dentry *dentry)
748{
749 struct ceph_client *client = ceph_sb_to_client(dir->i_sb);
750 struct ceph_mds_client *mdsc = &client->mdsc;
751 struct ceph_mds_request *req;
752 int err;
753
754 if (ceph_snap(dir) != CEPH_NOSNAP)
755 return -EROFS;
756
757 dout("link in dir %p old_dentry %p dentry %p\n", dir,
758 old_dentry, dentry);
759 req = ceph_mdsc_create_request(mdsc, CEPH_MDS_OP_LINK, USE_AUTH_MDS);
760 if (IS_ERR(req)) {
761 d_drop(dentry);
762 return PTR_ERR(req);
763 }
764 req->r_dentry = dget(dentry);
765 req->r_num_caps = 2;
766 req->r_old_dentry = dget(old_dentry); /* or inode? hrm. */
767 req->r_locked_dir = dir;
768 req->r_dentry_drop = CEPH_CAP_FILE_SHARED;
769 req->r_dentry_unless = CEPH_CAP_FILE_EXCL;
770 err = ceph_mdsc_do_request(mdsc, dir, req);
771 if (err)
772 d_drop(dentry);
773 else if (!req->r_reply_info.head->is_dentry)
774 d_instantiate(dentry, igrab(old_dentry->d_inode));
775 ceph_mdsc_put_request(req);
776 return err;
777}
778
779/*
780 * For a soon-to-be unlinked file, drop the AUTH_RDCACHE caps. If it
781 * looks like the link count will hit 0, drop any other caps (other
782 * than PIN) we don't specifically want (due to the file still being
783 * open).
784 */
785static int drop_caps_for_unlink(struct inode *inode)
786{
787 struct ceph_inode_info *ci = ceph_inode(inode);
788 int drop = CEPH_CAP_LINK_SHARED | CEPH_CAP_LINK_EXCL;
789
790 spin_lock(&inode->i_lock);
791 if (inode->i_nlink == 1) {
792 drop |= ~(__ceph_caps_wanted(ci) | CEPH_CAP_PIN);
793 ci->i_ceph_flags |= CEPH_I_NODELAY;
794 }
795 spin_unlock(&inode->i_lock);
796 return drop;
797}
798
799/*
800 * rmdir and unlink are differ only by the metadata op code
801 */
802static int ceph_unlink(struct inode *dir, struct dentry *dentry)
803{
804 struct ceph_client *client = ceph_sb_to_client(dir->i_sb);
805 struct ceph_mds_client *mdsc = &client->mdsc;
806 struct inode *inode = dentry->d_inode;
807 struct ceph_mds_request *req;
808 int err = -EROFS;
809 int op;
810
811 if (ceph_snap(dir) == CEPH_SNAPDIR) {
812 /* rmdir .snap/foo is RMSNAP */
813 dout("rmsnap dir %p '%.*s' dn %p\n", dir, dentry->d_name.len,
814 dentry->d_name.name, dentry);
815 op = CEPH_MDS_OP_RMSNAP;
816 } else if (ceph_snap(dir) == CEPH_NOSNAP) {
817 dout("unlink/rmdir dir %p dn %p inode %p\n",
818 dir, dentry, inode);
819 op = ((dentry->d_inode->i_mode & S_IFMT) == S_IFDIR) ?
820 CEPH_MDS_OP_RMDIR : CEPH_MDS_OP_UNLINK;
821 } else
822 goto out;
823 req = ceph_mdsc_create_request(mdsc, op, USE_AUTH_MDS);
824 if (IS_ERR(req)) {
825 err = PTR_ERR(req);
826 goto out;
827 }
828 req->r_dentry = dget(dentry);
829 req->r_num_caps = 2;
830 req->r_locked_dir = dir;
831 req->r_dentry_drop = CEPH_CAP_FILE_SHARED;
832 req->r_dentry_unless = CEPH_CAP_FILE_EXCL;
833 req->r_inode_drop = drop_caps_for_unlink(inode);
834 err = ceph_mdsc_do_request(mdsc, dir, req);
835 if (!err && !req->r_reply_info.head->is_dentry)
836 d_delete(dentry);
837 ceph_mdsc_put_request(req);
838out:
839 return err;
840}
841
842static int ceph_rename(struct inode *old_dir, struct dentry *old_dentry,
843 struct inode *new_dir, struct dentry *new_dentry)
844{
845 struct ceph_client *client = ceph_sb_to_client(old_dir->i_sb);
846 struct ceph_mds_client *mdsc = &client->mdsc;
847 struct ceph_mds_request *req;
848 int err;
849
850 if (ceph_snap(old_dir) != ceph_snap(new_dir))
851 return -EXDEV;
852 if (ceph_snap(old_dir) != CEPH_NOSNAP ||
853 ceph_snap(new_dir) != CEPH_NOSNAP)
854 return -EROFS;
855 dout("rename dir %p dentry %p to dir %p dentry %p\n",
856 old_dir, old_dentry, new_dir, new_dentry);
857 req = ceph_mdsc_create_request(mdsc, CEPH_MDS_OP_RENAME, USE_AUTH_MDS);
858 if (IS_ERR(req))
859 return PTR_ERR(req);
860 req->r_dentry = dget(new_dentry);
861 req->r_num_caps = 2;
862 req->r_old_dentry = dget(old_dentry);
863 req->r_locked_dir = new_dir;
864 req->r_old_dentry_drop = CEPH_CAP_FILE_SHARED;
865 req->r_old_dentry_unless = CEPH_CAP_FILE_EXCL;
866 req->r_dentry_drop = CEPH_CAP_FILE_SHARED;
867 req->r_dentry_unless = CEPH_CAP_FILE_EXCL;
868 /* release LINK_RDCACHE on source inode (mds will lock it) */
869 req->r_old_inode_drop = CEPH_CAP_LINK_SHARED;
870 if (new_dentry->d_inode)
871 req->r_inode_drop = drop_caps_for_unlink(new_dentry->d_inode);
872 err = ceph_mdsc_do_request(mdsc, old_dir, req);
873 if (!err && !req->r_reply_info.head->is_dentry) {
874 /*
875 * Normally d_move() is done by fill_trace (called by
876 * do_request, above). If there is no trace, we need
877 * to do it here.
878 */
879 d_move(old_dentry, new_dentry);
880 }
881 ceph_mdsc_put_request(req);
882 return err;
883}
884
885
886/*
887 * Check if dentry lease is valid. If not, delete the lease. Try to
888 * renew if the least is more than half up.
889 */
890static int dentry_lease_is_valid(struct dentry *dentry)
891{
892 struct ceph_dentry_info *di;
893 struct ceph_mds_session *s;
894 int valid = 0;
895 u32 gen;
896 unsigned long ttl;
897 struct ceph_mds_session *session = NULL;
898 struct inode *dir = NULL;
899 u32 seq = 0;
900
901 spin_lock(&dentry->d_lock);
902 di = ceph_dentry(dentry);
903 if (di && di->lease_session) {
904 s = di->lease_session;
905 spin_lock(&s->s_cap_lock);
906 gen = s->s_cap_gen;
907 ttl = s->s_cap_ttl;
908 spin_unlock(&s->s_cap_lock);
909
910 if (di->lease_gen == gen &&
911 time_before(jiffies, dentry->d_time) &&
912 time_before(jiffies, ttl)) {
913 valid = 1;
914 if (di->lease_renew_after &&
915 time_after(jiffies, di->lease_renew_after)) {
916 /* we should renew */
917 dir = dentry->d_parent->d_inode;
918 session = ceph_get_mds_session(s);
919 seq = di->lease_seq;
920 di->lease_renew_after = 0;
921 di->lease_renew_from = jiffies;
922 }
923 }
924 }
925 spin_unlock(&dentry->d_lock);
926
927 if (session) {
928 ceph_mdsc_lease_send_msg(session, dir, dentry,
929 CEPH_MDS_LEASE_RENEW, seq);
930 ceph_put_mds_session(session);
931 }
932 dout("dentry_lease_is_valid - dentry %p = %d\n", dentry, valid);
933 return valid;
934}
935
936/*
937 * Check if directory-wide content lease/cap is valid.
938 */
939static int dir_lease_is_valid(struct inode *dir, struct dentry *dentry)
940{
941 struct ceph_inode_info *ci = ceph_inode(dir);
942 struct ceph_dentry_info *di = ceph_dentry(dentry);
943 int valid = 0;
944
945 spin_lock(&dir->i_lock);
946 if (ci->i_shared_gen == di->lease_shared_gen)
947 valid = __ceph_caps_issued_mask(ci, CEPH_CAP_FILE_SHARED, 1);
948 spin_unlock(&dir->i_lock);
949 dout("dir_lease_is_valid dir %p v%u dentry %p v%u = %d\n",
950 dir, (unsigned)ci->i_shared_gen, dentry,
951 (unsigned)di->lease_shared_gen, valid);
952 return valid;
953}
954
955/*
956 * Check if cached dentry can be trusted.
957 */
958static int ceph_d_revalidate(struct dentry *dentry, struct nameidata *nd)
959{
960 struct inode *dir = dentry->d_parent->d_inode;
961
962 dout("d_revalidate %p '%.*s' inode %p\n", dentry,
963 dentry->d_name.len, dentry->d_name.name, dentry->d_inode);
964
965 /* always trust cached snapped dentries, snapdir dentry */
966 if (ceph_snap(dir) != CEPH_NOSNAP) {
967 dout("d_revalidate %p '%.*s' inode %p is SNAPPED\n", dentry,
968 dentry->d_name.len, dentry->d_name.name, dentry->d_inode);
969 goto out_touch;
970 }
971 if (dentry->d_inode && ceph_snap(dentry->d_inode) == CEPH_SNAPDIR)
972 goto out_touch;
973
974 if (dentry_lease_is_valid(dentry) ||
975 dir_lease_is_valid(dir, dentry))
976 goto out_touch;
977
978 dout("d_revalidate %p invalid\n", dentry);
979 d_drop(dentry);
980 return 0;
981out_touch:
982 ceph_dentry_lru_touch(dentry);
983 return 1;
984}
985
986/*
987 * When a dentry is released, clear the dir I_COMPLETE if it was part
988 * of the current dir gen.
989 */
990static void ceph_dentry_release(struct dentry *dentry)
991{
992 struct ceph_dentry_info *di = ceph_dentry(dentry);
993 struct inode *parent_inode = dentry->d_parent->d_inode;
994
995 if (parent_inode) {
996 struct ceph_inode_info *ci = ceph_inode(parent_inode);
997
998 spin_lock(&parent_inode->i_lock);
999 if (ci->i_shared_gen == di->lease_shared_gen) {
1000 dout(" clearing %p complete (d_release)\n",
1001 parent_inode);
1002 ci->i_ceph_flags &= ~CEPH_I_COMPLETE;
1003 ci->i_release_count++;
1004 }
1005 spin_unlock(&parent_inode->i_lock);
1006 }
1007 if (di) {
1008 ceph_dentry_lru_del(dentry);
1009 if (di->lease_session)
1010 ceph_put_mds_session(di->lease_session);
1011 kmem_cache_free(ceph_dentry_cachep, di);
1012 dentry->d_fsdata = NULL;
1013 }
1014}
1015
1016static int ceph_snapdir_d_revalidate(struct dentry *dentry,
1017 struct nameidata *nd)
1018{
1019 /*
1020 * Eventually, we'll want to revalidate snapped metadata
1021 * too... probably...
1022 */
1023 return 1;
1024}
1025
1026
1027
1028/*
1029 * read() on a dir. This weird interface hack only works if mounted
1030 * with '-o dirstat'.
1031 */
1032static ssize_t ceph_read_dir(struct file *file, char __user *buf, size_t size,
1033 loff_t *ppos)
1034{
1035 struct ceph_file_info *cf = file->private_data;
1036 struct inode *inode = file->f_dentry->d_inode;
1037 struct ceph_inode_info *ci = ceph_inode(inode);
1038 int left;
1039
1040 if (!ceph_test_opt(ceph_client(inode->i_sb), DIRSTAT))
1041 return -EISDIR;
1042
1043 if (!cf->dir_info) {
1044 cf->dir_info = kmalloc(1024, GFP_NOFS);
1045 if (!cf->dir_info)
1046 return -ENOMEM;
1047 cf->dir_info_len =
1048 sprintf(cf->dir_info,
1049 "entries: %20lld\n"
1050 " files: %20lld\n"
1051 " subdirs: %20lld\n"
1052 "rentries: %20lld\n"
1053 " rfiles: %20lld\n"
1054 " rsubdirs: %20lld\n"
1055 "rbytes: %20lld\n"
1056 "rctime: %10ld.%09ld\n",
1057 ci->i_files + ci->i_subdirs,
1058 ci->i_files,
1059 ci->i_subdirs,
1060 ci->i_rfiles + ci->i_rsubdirs,
1061 ci->i_rfiles,
1062 ci->i_rsubdirs,
1063 ci->i_rbytes,
1064 (long)ci->i_rctime.tv_sec,
1065 (long)ci->i_rctime.tv_nsec);
1066 }
1067
1068 if (*ppos >= cf->dir_info_len)
1069 return 0;
1070 size = min_t(unsigned, size, cf->dir_info_len-*ppos);
1071 left = copy_to_user(buf, cf->dir_info + *ppos, size);
1072 if (left == size)
1073 return -EFAULT;
1074 *ppos += (size - left);
1075 return size - left;
1076}
1077
1078/*
1079 * an fsync() on a dir will wait for any uncommitted directory
1080 * operations to commit.
1081 */
1082static int ceph_dir_fsync(struct file *file, struct dentry *dentry,
1083 int datasync)
1084{
1085 struct inode *inode = dentry->d_inode;
1086 struct ceph_inode_info *ci = ceph_inode(inode);
1087 struct list_head *head = &ci->i_unsafe_dirops;
1088 struct ceph_mds_request *req;
1089 u64 last_tid;
1090 int ret = 0;
1091
1092 dout("dir_fsync %p\n", inode);
1093 spin_lock(&ci->i_unsafe_lock);
1094 if (list_empty(head))
1095 goto out;
1096
1097 req = list_entry(head->prev,
1098 struct ceph_mds_request, r_unsafe_dir_item);
1099 last_tid = req->r_tid;
1100
1101 do {
1102 ceph_mdsc_get_request(req);
1103 spin_unlock(&ci->i_unsafe_lock);
1104 dout("dir_fsync %p wait on tid %llu (until %llu)\n",
1105 inode, req->r_tid, last_tid);
1106 if (req->r_timeout) {
1107 ret = wait_for_completion_timeout(
1108 &req->r_safe_completion, req->r_timeout);
1109 if (ret > 0)
1110 ret = 0;
1111 else if (ret == 0)
1112 ret = -EIO; /* timed out */
1113 } else {
1114 wait_for_completion(&req->r_safe_completion);
1115 }
1116 spin_lock(&ci->i_unsafe_lock);
1117 ceph_mdsc_put_request(req);
1118
1119 if (ret || list_empty(head))
1120 break;
1121 req = list_entry(head->next,
1122 struct ceph_mds_request, r_unsafe_dir_item);
1123 } while (req->r_tid < last_tid);
1124out:
1125 spin_unlock(&ci->i_unsafe_lock);
1126 return ret;
1127}
1128
1129/*
1130 * We maintain a private dentry LRU.
1131 *
1132 * FIXME: this needs to be changed to a per-mds lru to be useful.
1133 */
1134void ceph_dentry_lru_add(struct dentry *dn)
1135{
1136 struct ceph_dentry_info *di = ceph_dentry(dn);
1137 struct ceph_mds_client *mdsc;
1138
1139 dout("dentry_lru_add %p %p '%.*s'\n", di, dn,
1140 dn->d_name.len, dn->d_name.name);
1141 if (di) {
1142 mdsc = &ceph_client(dn->d_sb)->mdsc;
1143 spin_lock(&mdsc->dentry_lru_lock);
1144 list_add_tail(&di->lru, &mdsc->dentry_lru);
1145 mdsc->num_dentry++;
1146 spin_unlock(&mdsc->dentry_lru_lock);
1147 }
1148}
1149
1150void ceph_dentry_lru_touch(struct dentry *dn)
1151{
1152 struct ceph_dentry_info *di = ceph_dentry(dn);
1153 struct ceph_mds_client *mdsc;
1154
1155 dout("dentry_lru_touch %p %p '%.*s'\n", di, dn,
1156 dn->d_name.len, dn->d_name.name);
1157 if (di) {
1158 mdsc = &ceph_client(dn->d_sb)->mdsc;
1159 spin_lock(&mdsc->dentry_lru_lock);
1160 list_move_tail(&di->lru, &mdsc->dentry_lru);
1161 spin_unlock(&mdsc->dentry_lru_lock);
1162 }
1163}
1164
1165void ceph_dentry_lru_del(struct dentry *dn)
1166{
1167 struct ceph_dentry_info *di = ceph_dentry(dn);
1168 struct ceph_mds_client *mdsc;
1169
1170 dout("dentry_lru_del %p %p '%.*s'\n", di, dn,
1171 dn->d_name.len, dn->d_name.name);
1172 if (di) {
1173 mdsc = &ceph_client(dn->d_sb)->mdsc;
1174 spin_lock(&mdsc->dentry_lru_lock);
1175 list_del_init(&di->lru);
1176 mdsc->num_dentry--;
1177 spin_unlock(&mdsc->dentry_lru_lock);
1178 }
1179}
1180
1181const struct file_operations ceph_dir_fops = {
1182 .read = ceph_read_dir,
1183 .readdir = ceph_readdir,
1184 .llseek = ceph_dir_llseek,
1185 .open = ceph_open,
1186 .release = ceph_release,
1187 .unlocked_ioctl = ceph_ioctl,
1188 .fsync = ceph_dir_fsync,
1189};
1190
1191const struct inode_operations ceph_dir_iops = {
1192 .lookup = ceph_lookup,
1193 .permission = ceph_permission,
1194 .getattr = ceph_getattr,
1195 .setattr = ceph_setattr,
1196 .setxattr = ceph_setxattr,
1197 .getxattr = ceph_getxattr,
1198 .listxattr = ceph_listxattr,
1199 .removexattr = ceph_removexattr,
1200 .mknod = ceph_mknod,
1201 .symlink = ceph_symlink,
1202 .mkdir = ceph_mkdir,
1203 .link = ceph_link,
1204 .unlink = ceph_unlink,
1205 .rmdir = ceph_unlink,
1206 .rename = ceph_rename,
1207 .create = ceph_create,
1208};
1209
1210struct dentry_operations ceph_dentry_ops = {
1211 .d_revalidate = ceph_d_revalidate,
1212 .d_release = ceph_dentry_release,
1213};
1214
1215struct dentry_operations ceph_snapdir_dentry_ops = {
1216 .d_revalidate = ceph_snapdir_d_revalidate,
1217};
1218
1219struct dentry_operations ceph_snap_dentry_ops = {
1220};
diff --git a/fs/ceph/export.c b/fs/ceph/export.c
new file mode 100644
index 000000000000..fc68e39cbad6
--- /dev/null
+++ b/fs/ceph/export.c
@@ -0,0 +1,223 @@
1#include "ceph_debug.h"
2
3#include <linux/exportfs.h>
4#include <asm/unaligned.h>
5
6#include "super.h"
7
8/*
9 * NFS export support
10 *
11 * NFS re-export of a ceph mount is, at present, only semireliable.
12 * The basic issue is that the Ceph architectures doesn't lend itself
13 * well to generating filehandles that will remain valid forever.
14 *
15 * So, we do our best. If you're lucky, your inode will be in the
16 * client's cache. If it's not, and you have a connectable fh, then
17 * the MDS server may be able to find it for you. Otherwise, you get
18 * ESTALE.
19 *
20 * There are ways to this more reliable, but in the non-connectable fh
21 * case, we won't every work perfectly, and in the connectable case,
22 * some changes are needed on the MDS side to work better.
23 */
24
25/*
26 * Basic fh
27 */
28struct ceph_nfs_fh {
29 u64 ino;
30} __attribute__ ((packed));
31
32/*
33 * Larger 'connectable' fh that includes parent ino and name hash.
34 * Use this whenever possible, as it works more reliably.
35 */
36struct ceph_nfs_confh {
37 u64 ino, parent_ino;
38 u32 parent_name_hash;
39} __attribute__ ((packed));
40
41static int ceph_encode_fh(struct dentry *dentry, u32 *rawfh, int *max_len,
42 int connectable)
43{
44 struct ceph_nfs_fh *fh = (void *)rawfh;
45 struct ceph_nfs_confh *cfh = (void *)rawfh;
46 struct dentry *parent = dentry->d_parent;
47 struct inode *inode = dentry->d_inode;
48 int type;
49
50 /* don't re-export snaps */
51 if (ceph_snap(inode) != CEPH_NOSNAP)
52 return -EINVAL;
53
54 if (*max_len >= sizeof(*cfh)) {
55 dout("encode_fh %p connectable\n", dentry);
56 cfh->ino = ceph_ino(dentry->d_inode);
57 cfh->parent_ino = ceph_ino(parent->d_inode);
58 cfh->parent_name_hash = parent->d_name.hash;
59 *max_len = sizeof(*cfh);
60 type = 2;
61 } else if (*max_len > sizeof(*fh)) {
62 if (connectable)
63 return -ENOSPC;
64 dout("encode_fh %p\n", dentry);
65 fh->ino = ceph_ino(dentry->d_inode);
66 *max_len = sizeof(*fh);
67 type = 1;
68 } else {
69 return -ENOSPC;
70 }
71 return type;
72}
73
74/*
75 * convert regular fh to dentry
76 *
77 * FIXME: we should try harder by querying the mds for the ino.
78 */
79static struct dentry *__fh_to_dentry(struct super_block *sb,
80 struct ceph_nfs_fh *fh)
81{
82 struct inode *inode;
83 struct dentry *dentry;
84 struct ceph_vino vino;
85 int err;
86
87 dout("__fh_to_dentry %llx\n", fh->ino);
88 vino.ino = fh->ino;
89 vino.snap = CEPH_NOSNAP;
90 inode = ceph_find_inode(sb, vino);
91 if (!inode)
92 return ERR_PTR(-ESTALE);
93
94 dentry = d_obtain_alias(inode);
95 if (!dentry) {
96 pr_err("fh_to_dentry %llx -- inode %p but ENOMEM\n",
97 fh->ino, inode);
98 iput(inode);
99 return ERR_PTR(-ENOMEM);
100 }
101 err = ceph_init_dentry(dentry);
102
103 if (err < 0) {
104 iput(inode);
105 return ERR_PTR(err);
106 }
107 dout("__fh_to_dentry %llx %p dentry %p\n", fh->ino, inode, dentry);
108 return dentry;
109}
110
111/*
112 * convert connectable fh to dentry
113 */
114static struct dentry *__cfh_to_dentry(struct super_block *sb,
115 struct ceph_nfs_confh *cfh)
116{
117 struct ceph_mds_client *mdsc = &ceph_client(sb)->mdsc;
118 struct inode *inode;
119 struct dentry *dentry;
120 struct ceph_vino vino;
121 int err;
122
123 dout("__cfh_to_dentry %llx (%llx/%x)\n",
124 cfh->ino, cfh->parent_ino, cfh->parent_name_hash);
125
126 vino.ino = cfh->ino;
127 vino.snap = CEPH_NOSNAP;
128 inode = ceph_find_inode(sb, vino);
129 if (!inode) {
130 struct ceph_mds_request *req;
131
132 req = ceph_mdsc_create_request(mdsc, CEPH_MDS_OP_LOOKUPHASH,
133 USE_ANY_MDS);
134 if (IS_ERR(req))
135 return ERR_PTR(PTR_ERR(req));
136
137 req->r_ino1 = vino;
138 req->r_ino2.ino = cfh->parent_ino;
139 req->r_ino2.snap = CEPH_NOSNAP;
140 req->r_path2 = kmalloc(16, GFP_NOFS);
141 snprintf(req->r_path2, 16, "%d", cfh->parent_name_hash);
142 req->r_num_caps = 1;
143 err = ceph_mdsc_do_request(mdsc, NULL, req);
144 ceph_mdsc_put_request(req);
145 inode = ceph_find_inode(sb, vino);
146 if (!inode)
147 return ERR_PTR(err ? err : -ESTALE);
148 }
149
150 dentry = d_obtain_alias(inode);
151 if (!dentry) {
152 pr_err("cfh_to_dentry %llx -- inode %p but ENOMEM\n",
153 cfh->ino, inode);
154 iput(inode);
155 return ERR_PTR(-ENOMEM);
156 }
157 err = ceph_init_dentry(dentry);
158 if (err < 0) {
159 iput(inode);
160 return ERR_PTR(err);
161 }
162 dout("__cfh_to_dentry %llx %p dentry %p\n", cfh->ino, inode, dentry);
163 return dentry;
164}
165
166static struct dentry *ceph_fh_to_dentry(struct super_block *sb, struct fid *fid,
167 int fh_len, int fh_type)
168{
169 if (fh_type == 1)
170 return __fh_to_dentry(sb, (struct ceph_nfs_fh *)fid->raw);
171 else
172 return __cfh_to_dentry(sb, (struct ceph_nfs_confh *)fid->raw);
173}
174
175/*
176 * get parent, if possible.
177 *
178 * FIXME: we could do better by querying the mds to discover the
179 * parent.
180 */
181static struct dentry *ceph_fh_to_parent(struct super_block *sb,
182 struct fid *fid,
183 int fh_len, int fh_type)
184{
185 struct ceph_nfs_confh *cfh = (void *)fid->raw;
186 struct ceph_vino vino;
187 struct inode *inode;
188 struct dentry *dentry;
189 int err;
190
191 if (fh_type == 1)
192 return ERR_PTR(-ESTALE);
193
194 pr_debug("fh_to_parent %llx/%d\n", cfh->parent_ino,
195 cfh->parent_name_hash);
196
197 vino.ino = cfh->ino;
198 vino.snap = CEPH_NOSNAP;
199 inode = ceph_find_inode(sb, vino);
200 if (!inode)
201 return ERR_PTR(-ESTALE);
202
203 dentry = d_obtain_alias(inode);
204 if (!dentry) {
205 pr_err("fh_to_parent %llx -- inode %p but ENOMEM\n",
206 cfh->ino, inode);
207 iput(inode);
208 return ERR_PTR(-ENOMEM);
209 }
210 err = ceph_init_dentry(dentry);
211 if (err < 0) {
212 iput(inode);
213 return ERR_PTR(err);
214 }
215 dout("fh_to_parent %llx %p dentry %p\n", cfh->ino, inode, dentry);
216 return dentry;
217}
218
219const struct export_operations ceph_export_ops = {
220 .encode_fh = ceph_encode_fh,
221 .fh_to_dentry = ceph_fh_to_dentry,
222 .fh_to_parent = ceph_fh_to_parent,
223};
diff --git a/fs/ceph/file.c b/fs/ceph/file.c
new file mode 100644
index 000000000000..5d2af8464f6a
--- /dev/null
+++ b/fs/ceph/file.c
@@ -0,0 +1,937 @@
1#include "ceph_debug.h"
2
3#include <linux/sched.h>
4#include <linux/file.h>
5#include <linux/namei.h>
6#include <linux/writeback.h>
7
8#include "super.h"
9#include "mds_client.h"
10
11/*
12 * Ceph file operations
13 *
14 * Implement basic open/close functionality, and implement
15 * read/write.
16 *
17 * We implement three modes of file I/O:
18 * - buffered uses the generic_file_aio_{read,write} helpers
19 *
20 * - synchronous is used when there is multi-client read/write
21 * sharing, avoids the page cache, and synchronously waits for an
22 * ack from the OSD.
23 *
24 * - direct io takes the variant of the sync path that references
25 * user pages directly.
26 *
27 * fsync() flushes and waits on dirty pages, but just queues metadata
28 * for writeback: since the MDS can recover size and mtime there is no
29 * need to wait for MDS acknowledgement.
30 */
31
32
33/*
34 * Prepare an open request. Preallocate ceph_cap to avoid an
35 * inopportune ENOMEM later.
36 */
37static struct ceph_mds_request *
38prepare_open_request(struct super_block *sb, int flags, int create_mode)
39{
40 struct ceph_client *client = ceph_sb_to_client(sb);
41 struct ceph_mds_client *mdsc = &client->mdsc;
42 struct ceph_mds_request *req;
43 int want_auth = USE_ANY_MDS;
44 int op = (flags & O_CREAT) ? CEPH_MDS_OP_CREATE : CEPH_MDS_OP_OPEN;
45
46 if (flags & (O_WRONLY|O_RDWR|O_CREAT|O_TRUNC))
47 want_auth = USE_AUTH_MDS;
48
49 req = ceph_mdsc_create_request(mdsc, op, want_auth);
50 if (IS_ERR(req))
51 goto out;
52 req->r_fmode = ceph_flags_to_mode(flags);
53 req->r_args.open.flags = cpu_to_le32(flags);
54 req->r_args.open.mode = cpu_to_le32(create_mode);
55 req->r_args.open.preferred = cpu_to_le32(-1);
56out:
57 return req;
58}
59
60/*
61 * initialize private struct file data.
62 * if we fail, clean up by dropping fmode reference on the ceph_inode
63 */
64static int ceph_init_file(struct inode *inode, struct file *file, int fmode)
65{
66 struct ceph_file_info *cf;
67 int ret = 0;
68
69 switch (inode->i_mode & S_IFMT) {
70 case S_IFREG:
71 case S_IFDIR:
72 dout("init_file %p %p 0%o (regular)\n", inode, file,
73 inode->i_mode);
74 cf = kmem_cache_alloc(ceph_file_cachep, GFP_NOFS | __GFP_ZERO);
75 if (cf == NULL) {
76 ceph_put_fmode(ceph_inode(inode), fmode); /* clean up */
77 return -ENOMEM;
78 }
79 cf->fmode = fmode;
80 cf->next_offset = 2;
81 file->private_data = cf;
82 BUG_ON(inode->i_fop->release != ceph_release);
83 break;
84
85 case S_IFLNK:
86 dout("init_file %p %p 0%o (symlink)\n", inode, file,
87 inode->i_mode);
88 ceph_put_fmode(ceph_inode(inode), fmode); /* clean up */
89 break;
90
91 default:
92 dout("init_file %p %p 0%o (special)\n", inode, file,
93 inode->i_mode);
94 /*
95 * we need to drop the open ref now, since we don't
96 * have .release set to ceph_release.
97 */
98 ceph_put_fmode(ceph_inode(inode), fmode); /* clean up */
99 BUG_ON(inode->i_fop->release == ceph_release);
100
101 /* call the proper open fop */
102 ret = inode->i_fop->open(inode, file);
103 }
104 return ret;
105}
106
107/*
108 * If the filp already has private_data, that means the file was
109 * already opened by intent during lookup, and we do nothing.
110 *
111 * If we already have the requisite capabilities, we can satisfy
112 * the open request locally (no need to request new caps from the
113 * MDS). We do, however, need to inform the MDS (asynchronously)
114 * if our wanted caps set expands.
115 */
116int ceph_open(struct inode *inode, struct file *file)
117{
118 struct ceph_inode_info *ci = ceph_inode(inode);
119 struct ceph_client *client = ceph_sb_to_client(inode->i_sb);
120 struct ceph_mds_client *mdsc = &client->mdsc;
121 struct ceph_mds_request *req;
122 struct ceph_file_info *cf = file->private_data;
123 struct inode *parent_inode = file->f_dentry->d_parent->d_inode;
124 int err;
125 int flags, fmode, wanted;
126
127 if (cf) {
128 dout("open file %p is already opened\n", file);
129 return 0;
130 }
131
132 /* filter out O_CREAT|O_EXCL; vfs did that already. yuck. */
133 flags = file->f_flags & ~(O_CREAT|O_EXCL);
134 if (S_ISDIR(inode->i_mode))
135 flags = O_DIRECTORY; /* mds likes to know */
136
137 dout("open inode %p ino %llx.%llx file %p flags %d (%d)\n", inode,
138 ceph_vinop(inode), file, flags, file->f_flags);
139 fmode = ceph_flags_to_mode(flags);
140 wanted = ceph_caps_for_mode(fmode);
141
142 /* snapped files are read-only */
143 if (ceph_snap(inode) != CEPH_NOSNAP && (file->f_mode & FMODE_WRITE))
144 return -EROFS;
145
146 /* trivially open snapdir */
147 if (ceph_snap(inode) == CEPH_SNAPDIR) {
148 spin_lock(&inode->i_lock);
149 __ceph_get_fmode(ci, fmode);
150 spin_unlock(&inode->i_lock);
151 return ceph_init_file(inode, file, fmode);
152 }
153
154 /*
155 * No need to block if we have any caps. Update wanted set
156 * asynchronously.
157 */
158 spin_lock(&inode->i_lock);
159 if (__ceph_is_any_real_caps(ci)) {
160 int mds_wanted = __ceph_caps_mds_wanted(ci);
161 int issued = __ceph_caps_issued(ci, NULL);
162
163 dout("open %p fmode %d want %s issued %s using existing\n",
164 inode, fmode, ceph_cap_string(wanted),
165 ceph_cap_string(issued));
166 __ceph_get_fmode(ci, fmode);
167 spin_unlock(&inode->i_lock);
168
169 /* adjust wanted? */
170 if ((issued & wanted) != wanted &&
171 (mds_wanted & wanted) != wanted &&
172 ceph_snap(inode) != CEPH_SNAPDIR)
173 ceph_check_caps(ci, 0, NULL);
174
175 return ceph_init_file(inode, file, fmode);
176 } else if (ceph_snap(inode) != CEPH_NOSNAP &&
177 (ci->i_snap_caps & wanted) == wanted) {
178 __ceph_get_fmode(ci, fmode);
179 spin_unlock(&inode->i_lock);
180 return ceph_init_file(inode, file, fmode);
181 }
182 spin_unlock(&inode->i_lock);
183
184 dout("open fmode %d wants %s\n", fmode, ceph_cap_string(wanted));
185 req = prepare_open_request(inode->i_sb, flags, 0);
186 if (IS_ERR(req)) {
187 err = PTR_ERR(req);
188 goto out;
189 }
190 req->r_inode = igrab(inode);
191 req->r_num_caps = 1;
192 err = ceph_mdsc_do_request(mdsc, parent_inode, req);
193 if (!err)
194 err = ceph_init_file(inode, file, req->r_fmode);
195 ceph_mdsc_put_request(req);
196 dout("open result=%d on %llx.%llx\n", err, ceph_vinop(inode));
197out:
198 return err;
199}
200
201
202/*
203 * Do a lookup + open with a single request.
204 *
205 * If this succeeds, but some subsequent check in the vfs
206 * may_open() fails, the struct *file gets cleaned up (i.e.
207 * ceph_release gets called). So fear not!
208 */
209/*
210 * flags
211 * path_lookup_open -> LOOKUP_OPEN
212 * path_lookup_create -> LOOKUP_OPEN|LOOKUP_CREATE
213 */
214struct dentry *ceph_lookup_open(struct inode *dir, struct dentry *dentry,
215 struct nameidata *nd, int mode,
216 int locked_dir)
217{
218 struct ceph_client *client = ceph_sb_to_client(dir->i_sb);
219 struct ceph_mds_client *mdsc = &client->mdsc;
220 struct file *file = nd->intent.open.file;
221 struct inode *parent_inode = get_dentry_parent_inode(file->f_dentry);
222 struct ceph_mds_request *req;
223 int err;
224 int flags = nd->intent.open.flags - 1; /* silly vfs! */
225
226 dout("ceph_lookup_open dentry %p '%.*s' flags %d mode 0%o\n",
227 dentry, dentry->d_name.len, dentry->d_name.name, flags, mode);
228
229 /* do the open */
230 req = prepare_open_request(dir->i_sb, flags, mode);
231 if (IS_ERR(req))
232 return ERR_PTR(PTR_ERR(req));
233 req->r_dentry = dget(dentry);
234 req->r_num_caps = 2;
235 if (flags & O_CREAT) {
236 req->r_dentry_drop = CEPH_CAP_FILE_SHARED;
237 req->r_dentry_unless = CEPH_CAP_FILE_EXCL;
238 }
239 req->r_locked_dir = dir; /* caller holds dir->i_mutex */
240 err = ceph_mdsc_do_request(mdsc, parent_inode, req);
241 dentry = ceph_finish_lookup(req, dentry, err);
242 if (!err && (flags & O_CREAT) && !req->r_reply_info.head->is_dentry)
243 err = ceph_handle_notrace_create(dir, dentry);
244 if (!err)
245 err = ceph_init_file(req->r_dentry->d_inode, file,
246 req->r_fmode);
247 ceph_mdsc_put_request(req);
248 dout("ceph_lookup_open result=%p\n", dentry);
249 return dentry;
250}
251
252int ceph_release(struct inode *inode, struct file *file)
253{
254 struct ceph_inode_info *ci = ceph_inode(inode);
255 struct ceph_file_info *cf = file->private_data;
256
257 dout("release inode %p file %p\n", inode, file);
258 ceph_put_fmode(ci, cf->fmode);
259 if (cf->last_readdir)
260 ceph_mdsc_put_request(cf->last_readdir);
261 kfree(cf->last_name);
262 kfree(cf->dir_info);
263 dput(cf->dentry);
264 kmem_cache_free(ceph_file_cachep, cf);
265
266 /* wake up anyone waiting for caps on this inode */
267 wake_up(&ci->i_cap_wq);
268 return 0;
269}
270
271/*
272 * build a vector of user pages
273 */
274static struct page **get_direct_page_vector(const char __user *data,
275 int num_pages,
276 loff_t off, size_t len)
277{
278 struct page **pages;
279 int rc;
280
281 pages = kmalloc(sizeof(*pages) * num_pages, GFP_NOFS);
282 if (!pages)
283 return ERR_PTR(-ENOMEM);
284
285 down_read(&current->mm->mmap_sem);
286 rc = get_user_pages(current, current->mm, (unsigned long)data,
287 num_pages, 0, 0, pages, NULL);
288 up_read(&current->mm->mmap_sem);
289 if (rc < 0)
290 goto fail;
291 return pages;
292
293fail:
294 kfree(pages);
295 return ERR_PTR(rc);
296}
297
298static void put_page_vector(struct page **pages, int num_pages)
299{
300 int i;
301
302 for (i = 0; i < num_pages; i++)
303 put_page(pages[i]);
304 kfree(pages);
305}
306
307void ceph_release_page_vector(struct page **pages, int num_pages)
308{
309 int i;
310
311 for (i = 0; i < num_pages; i++)
312 __free_pages(pages[i], 0);
313 kfree(pages);
314}
315
316/*
317 * allocate a vector new pages
318 */
319static struct page **alloc_page_vector(int num_pages)
320{
321 struct page **pages;
322 int i;
323
324 pages = kmalloc(sizeof(*pages) * num_pages, GFP_NOFS);
325 if (!pages)
326 return ERR_PTR(-ENOMEM);
327 for (i = 0; i < num_pages; i++) {
328 pages[i] = alloc_page(GFP_NOFS);
329 if (pages[i] == NULL) {
330 ceph_release_page_vector(pages, i);
331 return ERR_PTR(-ENOMEM);
332 }
333 }
334 return pages;
335}
336
337/*
338 * copy user data into a page vector
339 */
340static int copy_user_to_page_vector(struct page **pages,
341 const char __user *data,
342 loff_t off, size_t len)
343{
344 int i = 0;
345 int po = off & ~PAGE_CACHE_MASK;
346 int left = len;
347 int l, bad;
348
349 while (left > 0) {
350 l = min_t(int, PAGE_CACHE_SIZE-po, left);
351 bad = copy_from_user(page_address(pages[i]) + po, data, l);
352 if (bad == l)
353 return -EFAULT;
354 data += l - bad;
355 left -= l - bad;
356 po += l - bad;
357 if (po == PAGE_CACHE_SIZE) {
358 po = 0;
359 i++;
360 }
361 }
362 return len;
363}
364
365/*
366 * copy user data from a page vector into a user pointer
367 */
368static int copy_page_vector_to_user(struct page **pages, char __user *data,
369 loff_t off, size_t len)
370{
371 int i = 0;
372 int po = off & ~PAGE_CACHE_MASK;
373 int left = len;
374 int l, bad;
375
376 while (left > 0) {
377 l = min_t(int, left, PAGE_CACHE_SIZE-po);
378 bad = copy_to_user(data, page_address(pages[i]) + po, l);
379 if (bad == l)
380 return -EFAULT;
381 data += l - bad;
382 left -= l - bad;
383 if (po) {
384 po += l - bad;
385 if (po == PAGE_CACHE_SIZE)
386 po = 0;
387 }
388 i++;
389 }
390 return len;
391}
392
393/*
394 * Zero an extent within a page vector. Offset is relative to the
395 * start of the first page.
396 */
397static void zero_page_vector_range(int off, int len, struct page **pages)
398{
399 int i = off >> PAGE_CACHE_SHIFT;
400
401 off &= ~PAGE_CACHE_MASK;
402
403 dout("zero_page_vector_page %u~%u\n", off, len);
404
405 /* leading partial page? */
406 if (off) {
407 int end = min((int)PAGE_CACHE_SIZE, off + len);
408 dout("zeroing %d %p head from %d\n", i, pages[i],
409 (int)off);
410 zero_user_segment(pages[i], off, end);
411 len -= (end - off);
412 i++;
413 }
414 while (len >= PAGE_CACHE_SIZE) {
415 dout("zeroing %d %p len=%d\n", i, pages[i], len);
416 zero_user_segment(pages[i], 0, PAGE_CACHE_SIZE);
417 len -= PAGE_CACHE_SIZE;
418 i++;
419 }
420 /* trailing partial page? */
421 if (len) {
422 dout("zeroing %d %p tail to %d\n", i, pages[i], (int)len);
423 zero_user_segment(pages[i], 0, len);
424 }
425}
426
427
428/*
429 * Read a range of bytes striped over one or more objects. Iterate over
430 * objects we stripe over. (That's not atomic, but good enough for now.)
431 *
432 * If we get a short result from the OSD, check against i_size; we need to
433 * only return a short read to the caller if we hit EOF.
434 */
435static int striped_read(struct inode *inode,
436 u64 off, u64 len,
437 struct page **pages, int num_pages,
438 int *checkeof)
439{
440 struct ceph_client *client = ceph_inode_to_client(inode);
441 struct ceph_inode_info *ci = ceph_inode(inode);
442 u64 pos, this_len;
443 int page_off = off & ~PAGE_CACHE_MASK; /* first byte's offset in page */
444 int left, pages_left;
445 int read;
446 struct page **page_pos;
447 int ret;
448 bool hit_stripe, was_short;
449
450 /*
451 * we may need to do multiple reads. not atomic, unfortunately.
452 */
453 pos = off;
454 left = len;
455 page_pos = pages;
456 pages_left = num_pages;
457 read = 0;
458
459more:
460 this_len = left;
461 ret = ceph_osdc_readpages(&client->osdc, ceph_vino(inode),
462 &ci->i_layout, pos, &this_len,
463 ci->i_truncate_seq,
464 ci->i_truncate_size,
465 page_pos, pages_left);
466 hit_stripe = this_len < left;
467 was_short = ret >= 0 && ret < this_len;
468 if (ret == -ENOENT)
469 ret = 0;
470 dout("striped_read %llu~%u (read %u) got %d%s%s\n", pos, left, read,
471 ret, hit_stripe ? " HITSTRIPE" : "", was_short ? " SHORT" : "");
472
473 if (ret > 0) {
474 int didpages =
475 ((pos & ~PAGE_CACHE_MASK) + ret) >> PAGE_CACHE_SHIFT;
476
477 if (read < pos - off) {
478 dout(" zero gap %llu to %llu\n", off + read, pos);
479 zero_page_vector_range(page_off + read,
480 pos - off - read, pages);
481 }
482 pos += ret;
483 read = pos - off;
484 left -= ret;
485 page_pos += didpages;
486 pages_left -= didpages;
487
488 /* hit stripe? */
489 if (left && hit_stripe)
490 goto more;
491 }
492
493 if (was_short) {
494 /* was original extent fully inside i_size? */
495 if (pos + left <= inode->i_size) {
496 dout("zero tail\n");
497 zero_page_vector_range(page_off + read, len - read,
498 pages);
499 read = len;
500 goto out;
501 }
502
503 /* check i_size */
504 *checkeof = 1;
505 }
506
507out:
508 if (ret >= 0)
509 ret = read;
510 dout("striped_read returns %d\n", ret);
511 return ret;
512}
513
514/*
515 * Completely synchronous read and write methods. Direct from __user
516 * buffer to osd, or directly to user pages (if O_DIRECT).
517 *
518 * If the read spans object boundary, just do multiple reads.
519 */
520static ssize_t ceph_sync_read(struct file *file, char __user *data,
521 unsigned len, loff_t *poff, int *checkeof)
522{
523 struct inode *inode = file->f_dentry->d_inode;
524 struct page **pages;
525 u64 off = *poff;
526 int num_pages = calc_pages_for(off, len);
527 int ret;
528
529 dout("sync_read on file %p %llu~%u %s\n", file, off, len,
530 (file->f_flags & O_DIRECT) ? "O_DIRECT" : "");
531
532 if (file->f_flags & O_DIRECT) {
533 pages = get_direct_page_vector(data, num_pages, off, len);
534
535 /*
536 * flush any page cache pages in this range. this
537 * will make concurrent normal and O_DIRECT io slow,
538 * but it will at least behave sensibly when they are
539 * in sequence.
540 */
541 } else {
542 pages = alloc_page_vector(num_pages);
543 }
544 if (IS_ERR(pages))
545 return PTR_ERR(pages);
546
547 ret = filemap_write_and_wait(inode->i_mapping);
548 if (ret < 0)
549 goto done;
550
551 ret = striped_read(inode, off, len, pages, num_pages, checkeof);
552
553 if (ret >= 0 && (file->f_flags & O_DIRECT) == 0)
554 ret = copy_page_vector_to_user(pages, data, off, ret);
555 if (ret >= 0)
556 *poff = off + ret;
557
558done:
559 if (file->f_flags & O_DIRECT)
560 put_page_vector(pages, num_pages);
561 else
562 ceph_release_page_vector(pages, num_pages);
563 dout("sync_read result %d\n", ret);
564 return ret;
565}
566
567/*
568 * Write commit callback, called if we requested both an ACK and
569 * ONDISK commit reply from the OSD.
570 */
571static void sync_write_commit(struct ceph_osd_request *req,
572 struct ceph_msg *msg)
573{
574 struct ceph_inode_info *ci = ceph_inode(req->r_inode);
575
576 dout("sync_write_commit %p tid %llu\n", req, req->r_tid);
577 spin_lock(&ci->i_unsafe_lock);
578 list_del_init(&req->r_unsafe_item);
579 spin_unlock(&ci->i_unsafe_lock);
580 ceph_put_cap_refs(ci, CEPH_CAP_FILE_WR);
581}
582
583/*
584 * Synchronous write, straight from __user pointer or user pages (if
585 * O_DIRECT).
586 *
587 * If write spans object boundary, just do multiple writes. (For a
588 * correct atomic write, we should e.g. take write locks on all
589 * objects, rollback on failure, etc.)
590 */
591static ssize_t ceph_sync_write(struct file *file, const char __user *data,
592 size_t left, loff_t *offset)
593{
594 struct inode *inode = file->f_dentry->d_inode;
595 struct ceph_inode_info *ci = ceph_inode(inode);
596 struct ceph_client *client = ceph_inode_to_client(inode);
597 struct ceph_osd_request *req;
598 struct page **pages;
599 int num_pages;
600 long long unsigned pos;
601 u64 len;
602 int written = 0;
603 int flags;
604 int do_sync = 0;
605 int check_caps = 0;
606 int ret;
607 struct timespec mtime = CURRENT_TIME;
608
609 if (ceph_snap(file->f_dentry->d_inode) != CEPH_NOSNAP)
610 return -EROFS;
611
612 dout("sync_write on file %p %lld~%u %s\n", file, *offset,
613 (unsigned)left, (file->f_flags & O_DIRECT) ? "O_DIRECT" : "");
614
615 if (file->f_flags & O_APPEND)
616 pos = i_size_read(inode);
617 else
618 pos = *offset;
619
620 ret = filemap_write_and_wait_range(inode->i_mapping, pos, pos + left);
621 if (ret < 0)
622 return ret;
623
624 ret = invalidate_inode_pages2_range(inode->i_mapping,
625 pos >> PAGE_CACHE_SHIFT,
626 (pos + left) >> PAGE_CACHE_SHIFT);
627 if (ret < 0)
628 dout("invalidate_inode_pages2_range returned %d\n", ret);
629
630 flags = CEPH_OSD_FLAG_ORDERSNAP |
631 CEPH_OSD_FLAG_ONDISK |
632 CEPH_OSD_FLAG_WRITE;
633 if ((file->f_flags & (O_SYNC|O_DIRECT)) == 0)
634 flags |= CEPH_OSD_FLAG_ACK;
635 else
636 do_sync = 1;
637
638 /*
639 * we may need to do multiple writes here if we span an object
640 * boundary. this isn't atomic, unfortunately. :(
641 */
642more:
643 len = left;
644 req = ceph_osdc_new_request(&client->osdc, &ci->i_layout,
645 ceph_vino(inode), pos, &len,
646 CEPH_OSD_OP_WRITE, flags,
647 ci->i_snap_realm->cached_context,
648 do_sync,
649 ci->i_truncate_seq, ci->i_truncate_size,
650 &mtime, false, 2);
651 if (IS_ERR(req))
652 return PTR_ERR(req);
653
654 num_pages = calc_pages_for(pos, len);
655
656 if (file->f_flags & O_DIRECT) {
657 pages = get_direct_page_vector(data, num_pages, pos, len);
658 if (IS_ERR(pages)) {
659 ret = PTR_ERR(pages);
660 goto out;
661 }
662
663 /*
664 * throw out any page cache pages in this range. this
665 * may block.
666 */
667 truncate_inode_pages_range(inode->i_mapping, pos, pos+len);
668 } else {
669 pages = alloc_page_vector(num_pages);
670 if (IS_ERR(pages)) {
671 ret = PTR_ERR(pages);
672 goto out;
673 }
674 ret = copy_user_to_page_vector(pages, data, pos, len);
675 if (ret < 0) {
676 ceph_release_page_vector(pages, num_pages);
677 goto out;
678 }
679
680 if ((file->f_flags & O_SYNC) == 0) {
681 /* get a second commit callback */
682 req->r_safe_callback = sync_write_commit;
683 req->r_own_pages = 1;
684 }
685 }
686 req->r_pages = pages;
687 req->r_num_pages = num_pages;
688 req->r_inode = inode;
689
690 ret = ceph_osdc_start_request(&client->osdc, req, false);
691 if (!ret) {
692 if (req->r_safe_callback) {
693 /*
694 * Add to inode unsafe list only after we
695 * start_request so that a tid has been assigned.
696 */
697 spin_lock(&ci->i_unsafe_lock);
698 list_add(&ci->i_unsafe_writes, &req->r_unsafe_item);
699 spin_unlock(&ci->i_unsafe_lock);
700 ceph_get_cap_refs(ci, CEPH_CAP_FILE_WR);
701 }
702 ret = ceph_osdc_wait_request(&client->osdc, req);
703 }
704
705 if (file->f_flags & O_DIRECT)
706 put_page_vector(pages, num_pages);
707 else if (file->f_flags & O_SYNC)
708 ceph_release_page_vector(pages, num_pages);
709
710out:
711 ceph_osdc_put_request(req);
712 if (ret == 0) {
713 pos += len;
714 written += len;
715 left -= len;
716 if (left)
717 goto more;
718
719 ret = written;
720 *offset = pos;
721 if (pos > i_size_read(inode))
722 check_caps = ceph_inode_set_size(inode, pos);
723 if (check_caps)
724 ceph_check_caps(ceph_inode(inode), CHECK_CAPS_AUTHONLY,
725 NULL);
726 }
727 return ret;
728}
729
730/*
731 * Wrap generic_file_aio_read with checks for cap bits on the inode.
732 * Atomically grab references, so that those bits are not released
733 * back to the MDS mid-read.
734 *
735 * Hmm, the sync read case isn't actually async... should it be?
736 */
737static ssize_t ceph_aio_read(struct kiocb *iocb, const struct iovec *iov,
738 unsigned long nr_segs, loff_t pos)
739{
740 struct file *filp = iocb->ki_filp;
741 loff_t *ppos = &iocb->ki_pos;
742 size_t len = iov->iov_len;
743 struct inode *inode = filp->f_dentry->d_inode;
744 struct ceph_inode_info *ci = ceph_inode(inode);
745 void *base = iov->iov_base;
746 ssize_t ret;
747 int got = 0;
748 int checkeof = 0, read = 0;
749
750 dout("aio_read %p %llx.%llx %llu~%u trying to get caps on %p\n",
751 inode, ceph_vinop(inode), pos, (unsigned)len, inode);
752again:
753 __ceph_do_pending_vmtruncate(inode);
754 ret = ceph_get_caps(ci, CEPH_CAP_FILE_RD, CEPH_CAP_FILE_CACHE,
755 &got, -1);
756 if (ret < 0)
757 goto out;
758 dout("aio_read %p %llx.%llx %llu~%u got cap refs on %s\n",
759 inode, ceph_vinop(inode), pos, (unsigned)len,
760 ceph_cap_string(got));
761
762 if ((got & CEPH_CAP_FILE_CACHE) == 0 ||
763 (iocb->ki_filp->f_flags & O_DIRECT) ||
764 (inode->i_sb->s_flags & MS_SYNCHRONOUS))
765 /* hmm, this isn't really async... */
766 ret = ceph_sync_read(filp, base, len, ppos, &checkeof);
767 else
768 ret = generic_file_aio_read(iocb, iov, nr_segs, pos);
769
770out:
771 dout("aio_read %p %llx.%llx dropping cap refs on %s = %d\n",
772 inode, ceph_vinop(inode), ceph_cap_string(got), (int)ret);
773 ceph_put_cap_refs(ci, got);
774
775 if (checkeof && ret >= 0) {
776 int statret = ceph_do_getattr(inode, CEPH_STAT_CAP_SIZE);
777
778 /* hit EOF or hole? */
779 if (statret == 0 && *ppos < inode->i_size) {
780 dout("aio_read sync_read hit hole, reading more\n");
781 read += ret;
782 base += ret;
783 len -= ret;
784 checkeof = 0;
785 goto again;
786 }
787 }
788 if (ret >= 0)
789 ret += read;
790
791 return ret;
792}
793
794/*
795 * Take cap references to avoid releasing caps to MDS mid-write.
796 *
797 * If we are synchronous, and write with an old snap context, the OSD
798 * may return EOLDSNAPC. In that case, retry the write.. _after_
799 * dropping our cap refs and allowing the pending snap to logically
800 * complete _before_ this write occurs.
801 *
802 * If we are near ENOSPC, write synchronously.
803 */
804static ssize_t ceph_aio_write(struct kiocb *iocb, const struct iovec *iov,
805 unsigned long nr_segs, loff_t pos)
806{
807 struct file *file = iocb->ki_filp;
808 struct inode *inode = file->f_dentry->d_inode;
809 struct ceph_inode_info *ci = ceph_inode(inode);
810 struct ceph_osd_client *osdc = &ceph_client(inode->i_sb)->osdc;
811 loff_t endoff = pos + iov->iov_len;
812 int got = 0;
813 int ret, err;
814
815 if (ceph_snap(inode) != CEPH_NOSNAP)
816 return -EROFS;
817
818retry_snap:
819 if (ceph_osdmap_flag(osdc->osdmap, CEPH_OSDMAP_FULL))
820 return -ENOSPC;
821 __ceph_do_pending_vmtruncate(inode);
822 dout("aio_write %p %llx.%llx %llu~%u getting caps. i_size %llu\n",
823 inode, ceph_vinop(inode), pos, (unsigned)iov->iov_len,
824 inode->i_size);
825 ret = ceph_get_caps(ci, CEPH_CAP_FILE_WR, CEPH_CAP_FILE_BUFFER,
826 &got, endoff);
827 if (ret < 0)
828 goto out;
829
830 dout("aio_write %p %llx.%llx %llu~%u got cap refs on %s\n",
831 inode, ceph_vinop(inode), pos, (unsigned)iov->iov_len,
832 ceph_cap_string(got));
833
834 if ((got & CEPH_CAP_FILE_BUFFER) == 0 ||
835 (iocb->ki_filp->f_flags & O_DIRECT) ||
836 (inode->i_sb->s_flags & MS_SYNCHRONOUS)) {
837 ret = ceph_sync_write(file, iov->iov_base, iov->iov_len,
838 &iocb->ki_pos);
839 } else {
840 ret = generic_file_aio_write(iocb, iov, nr_segs, pos);
841
842 if ((ret >= 0 || ret == -EIOCBQUEUED) &&
843 ((file->f_flags & O_SYNC) || IS_SYNC(file->f_mapping->host)
844 || ceph_osdmap_flag(osdc->osdmap, CEPH_OSDMAP_NEARFULL))) {
845 err = vfs_fsync_range(file, file->f_path.dentry,
846 pos, pos + ret - 1, 1);
847 if (err < 0)
848 ret = err;
849 }
850 }
851 if (ret >= 0) {
852 spin_lock(&inode->i_lock);
853 __ceph_mark_dirty_caps(ci, CEPH_CAP_FILE_WR);
854 spin_unlock(&inode->i_lock);
855 }
856
857out:
858 dout("aio_write %p %llx.%llx %llu~%u dropping cap refs on %s\n",
859 inode, ceph_vinop(inode), pos, (unsigned)iov->iov_len,
860 ceph_cap_string(got));
861 ceph_put_cap_refs(ci, got);
862
863 if (ret == -EOLDSNAPC) {
864 dout("aio_write %p %llx.%llx %llu~%u got EOLDSNAPC, retrying\n",
865 inode, ceph_vinop(inode), pos, (unsigned)iov->iov_len);
866 goto retry_snap;
867 }
868
869 return ret;
870}
871
872/*
873 * llseek. be sure to verify file size on SEEK_END.
874 */
875static loff_t ceph_llseek(struct file *file, loff_t offset, int origin)
876{
877 struct inode *inode = file->f_mapping->host;
878 int ret;
879
880 mutex_lock(&inode->i_mutex);
881 __ceph_do_pending_vmtruncate(inode);
882 switch (origin) {
883 case SEEK_END:
884 ret = ceph_do_getattr(inode, CEPH_STAT_CAP_SIZE);
885 if (ret < 0) {
886 offset = ret;
887 goto out;
888 }
889 offset += inode->i_size;
890 break;
891 case SEEK_CUR:
892 /*
893 * Here we special-case the lseek(fd, 0, SEEK_CUR)
894 * position-querying operation. Avoid rewriting the "same"
895 * f_pos value back to the file because a concurrent read(),
896 * write() or lseek() might have altered it
897 */
898 if (offset == 0) {
899 offset = file->f_pos;
900 goto out;
901 }
902 offset += file->f_pos;
903 break;
904 }
905
906 if (offset < 0 || offset > inode->i_sb->s_maxbytes) {
907 offset = -EINVAL;
908 goto out;
909 }
910
911 /* Special lock needed here? */
912 if (offset != file->f_pos) {
913 file->f_pos = offset;
914 file->f_version = 0;
915 }
916
917out:
918 mutex_unlock(&inode->i_mutex);
919 return offset;
920}
921
922const struct file_operations ceph_file_fops = {
923 .open = ceph_open,
924 .release = ceph_release,
925 .llseek = ceph_llseek,
926 .read = do_sync_read,
927 .write = do_sync_write,
928 .aio_read = ceph_aio_read,
929 .aio_write = ceph_aio_write,
930 .mmap = ceph_mmap,
931 .fsync = ceph_fsync,
932 .splice_read = generic_file_splice_read,
933 .splice_write = generic_file_splice_write,
934 .unlocked_ioctl = ceph_ioctl,
935 .compat_ioctl = ceph_ioctl,
936};
937
diff --git a/fs/ceph/inode.c b/fs/ceph/inode.c
new file mode 100644
index 000000000000..7abe1aed819b
--- /dev/null
+++ b/fs/ceph/inode.c
@@ -0,0 +1,1750 @@
1#include "ceph_debug.h"
2
3#include <linux/module.h>
4#include <linux/fs.h>
5#include <linux/smp_lock.h>
6#include <linux/slab.h>
7#include <linux/string.h>
8#include <linux/uaccess.h>
9#include <linux/kernel.h>
10#include <linux/namei.h>
11#include <linux/writeback.h>
12#include <linux/vmalloc.h>
13#include <linux/pagevec.h>
14
15#include "super.h"
16#include "decode.h"
17
18/*
19 * Ceph inode operations
20 *
21 * Implement basic inode helpers (get, alloc) and inode ops (getattr,
22 * setattr, etc.), xattr helpers, and helpers for assimilating
23 * metadata returned by the MDS into our cache.
24 *
25 * Also define helpers for doing asynchronous writeback, invalidation,
26 * and truncation for the benefit of those who can't afford to block
27 * (typically because they are in the message handler path).
28 */
29
30static const struct inode_operations ceph_symlink_iops;
31
32static void ceph_invalidate_work(struct work_struct *work);
33static void ceph_writeback_work(struct work_struct *work);
34static void ceph_vmtruncate_work(struct work_struct *work);
35
36/*
37 * find or create an inode, given the ceph ino number
38 */
39struct inode *ceph_get_inode(struct super_block *sb, struct ceph_vino vino)
40{
41 struct inode *inode;
42 ino_t t = ceph_vino_to_ino(vino);
43
44 inode = iget5_locked(sb, t, ceph_ino_compare, ceph_set_ino_cb, &vino);
45 if (inode == NULL)
46 return ERR_PTR(-ENOMEM);
47 if (inode->i_state & I_NEW) {
48 dout("get_inode created new inode %p %llx.%llx ino %llx\n",
49 inode, ceph_vinop(inode), (u64)inode->i_ino);
50 unlock_new_inode(inode);
51 }
52
53 dout("get_inode on %lu=%llx.%llx got %p\n", inode->i_ino, vino.ino,
54 vino.snap, inode);
55 return inode;
56}
57
58/*
59 * get/constuct snapdir inode for a given directory
60 */
61struct inode *ceph_get_snapdir(struct inode *parent)
62{
63 struct ceph_vino vino = {
64 .ino = ceph_ino(parent),
65 .snap = CEPH_SNAPDIR,
66 };
67 struct inode *inode = ceph_get_inode(parent->i_sb, vino);
68 struct ceph_inode_info *ci = ceph_inode(inode);
69
70 BUG_ON(!S_ISDIR(parent->i_mode));
71 if (IS_ERR(inode))
72 return ERR_PTR(PTR_ERR(inode));
73 inode->i_mode = parent->i_mode;
74 inode->i_uid = parent->i_uid;
75 inode->i_gid = parent->i_gid;
76 inode->i_op = &ceph_dir_iops;
77 inode->i_fop = &ceph_dir_fops;
78 ci->i_snap_caps = CEPH_CAP_PIN; /* so we can open */
79 ci->i_rbytes = 0;
80 return inode;
81}
82
83const struct inode_operations ceph_file_iops = {
84 .permission = ceph_permission,
85 .setattr = ceph_setattr,
86 .getattr = ceph_getattr,
87 .setxattr = ceph_setxattr,
88 .getxattr = ceph_getxattr,
89 .listxattr = ceph_listxattr,
90 .removexattr = ceph_removexattr,
91};
92
93
94/*
95 * We use a 'frag tree' to keep track of the MDS's directory fragments
96 * for a given inode (usually there is just a single fragment). We
97 * need to know when a child frag is delegated to a new MDS, or when
98 * it is flagged as replicated, so we can direct our requests
99 * accordingly.
100 */
101
102/*
103 * find/create a frag in the tree
104 */
105static struct ceph_inode_frag *__get_or_create_frag(struct ceph_inode_info *ci,
106 u32 f)
107{
108 struct rb_node **p;
109 struct rb_node *parent = NULL;
110 struct ceph_inode_frag *frag;
111 int c;
112
113 p = &ci->i_fragtree.rb_node;
114 while (*p) {
115 parent = *p;
116 frag = rb_entry(parent, struct ceph_inode_frag, node);
117 c = ceph_frag_compare(f, frag->frag);
118 if (c < 0)
119 p = &(*p)->rb_left;
120 else if (c > 0)
121 p = &(*p)->rb_right;
122 else
123 return frag;
124 }
125
126 frag = kmalloc(sizeof(*frag), GFP_NOFS);
127 if (!frag) {
128 pr_err("__get_or_create_frag ENOMEM on %p %llx.%llx "
129 "frag %x\n", &ci->vfs_inode,
130 ceph_vinop(&ci->vfs_inode), f);
131 return ERR_PTR(-ENOMEM);
132 }
133 frag->frag = f;
134 frag->split_by = 0;
135 frag->mds = -1;
136 frag->ndist = 0;
137
138 rb_link_node(&frag->node, parent, p);
139 rb_insert_color(&frag->node, &ci->i_fragtree);
140
141 dout("get_or_create_frag added %llx.%llx frag %x\n",
142 ceph_vinop(&ci->vfs_inode), f);
143 return frag;
144}
145
146/*
147 * find a specific frag @f
148 */
149struct ceph_inode_frag *__ceph_find_frag(struct ceph_inode_info *ci, u32 f)
150{
151 struct rb_node *n = ci->i_fragtree.rb_node;
152
153 while (n) {
154 struct ceph_inode_frag *frag =
155 rb_entry(n, struct ceph_inode_frag, node);
156 int c = ceph_frag_compare(f, frag->frag);
157 if (c < 0)
158 n = n->rb_left;
159 else if (c > 0)
160 n = n->rb_right;
161 else
162 return frag;
163 }
164 return NULL;
165}
166
167/*
168 * Choose frag containing the given value @v. If @pfrag is
169 * specified, copy the frag delegation info to the caller if
170 * it is present.
171 */
172u32 ceph_choose_frag(struct ceph_inode_info *ci, u32 v,
173 struct ceph_inode_frag *pfrag,
174 int *found)
175{
176 u32 t = ceph_frag_make(0, 0);
177 struct ceph_inode_frag *frag;
178 unsigned nway, i;
179 u32 n;
180
181 if (found)
182 *found = 0;
183
184 mutex_lock(&ci->i_fragtree_mutex);
185 while (1) {
186 WARN_ON(!ceph_frag_contains_value(t, v));
187 frag = __ceph_find_frag(ci, t);
188 if (!frag)
189 break; /* t is a leaf */
190 if (frag->split_by == 0) {
191 if (pfrag)
192 memcpy(pfrag, frag, sizeof(*pfrag));
193 if (found)
194 *found = 1;
195 break;
196 }
197
198 /* choose child */
199 nway = 1 << frag->split_by;
200 dout("choose_frag(%x) %x splits by %d (%d ways)\n", v, t,
201 frag->split_by, nway);
202 for (i = 0; i < nway; i++) {
203 n = ceph_frag_make_child(t, frag->split_by, i);
204 if (ceph_frag_contains_value(n, v)) {
205 t = n;
206 break;
207 }
208 }
209 BUG_ON(i == nway);
210 }
211 dout("choose_frag(%x) = %x\n", v, t);
212
213 mutex_unlock(&ci->i_fragtree_mutex);
214 return t;
215}
216
217/*
218 * Process dirfrag (delegation) info from the mds. Include leaf
219 * fragment in tree ONLY if ndist > 0. Otherwise, only
220 * branches/splits are included in i_fragtree)
221 */
222static int ceph_fill_dirfrag(struct inode *inode,
223 struct ceph_mds_reply_dirfrag *dirinfo)
224{
225 struct ceph_inode_info *ci = ceph_inode(inode);
226 struct ceph_inode_frag *frag;
227 u32 id = le32_to_cpu(dirinfo->frag);
228 int mds = le32_to_cpu(dirinfo->auth);
229 int ndist = le32_to_cpu(dirinfo->ndist);
230 int i;
231 int err = 0;
232
233 mutex_lock(&ci->i_fragtree_mutex);
234 if (ndist == 0) {
235 /* no delegation info needed. */
236 frag = __ceph_find_frag(ci, id);
237 if (!frag)
238 goto out;
239 if (frag->split_by == 0) {
240 /* tree leaf, remove */
241 dout("fill_dirfrag removed %llx.%llx frag %x"
242 " (no ref)\n", ceph_vinop(inode), id);
243 rb_erase(&frag->node, &ci->i_fragtree);
244 kfree(frag);
245 } else {
246 /* tree branch, keep and clear */
247 dout("fill_dirfrag cleared %llx.%llx frag %x"
248 " referral\n", ceph_vinop(inode), id);
249 frag->mds = -1;
250 frag->ndist = 0;
251 }
252 goto out;
253 }
254
255
256 /* find/add this frag to store mds delegation info */
257 frag = __get_or_create_frag(ci, id);
258 if (IS_ERR(frag)) {
259 /* this is not the end of the world; we can continue
260 with bad/inaccurate delegation info */
261 pr_err("fill_dirfrag ENOMEM on mds ref %llx.%llx fg %x\n",
262 ceph_vinop(inode), le32_to_cpu(dirinfo->frag));
263 err = -ENOMEM;
264 goto out;
265 }
266
267 frag->mds = mds;
268 frag->ndist = min_t(u32, ndist, CEPH_MAX_DIRFRAG_REP);
269 for (i = 0; i < frag->ndist; i++)
270 frag->dist[i] = le32_to_cpu(dirinfo->dist[i]);
271 dout("fill_dirfrag %llx.%llx frag %x ndist=%d\n",
272 ceph_vinop(inode), frag->frag, frag->ndist);
273
274out:
275 mutex_unlock(&ci->i_fragtree_mutex);
276 return err;
277}
278
279
280/*
281 * initialize a newly allocated inode.
282 */
283struct inode *ceph_alloc_inode(struct super_block *sb)
284{
285 struct ceph_inode_info *ci;
286 int i;
287
288 ci = kmem_cache_alloc(ceph_inode_cachep, GFP_NOFS);
289 if (!ci)
290 return NULL;
291
292 dout("alloc_inode %p\n", &ci->vfs_inode);
293
294 ci->i_version = 0;
295 ci->i_time_warp_seq = 0;
296 ci->i_ceph_flags = 0;
297 ci->i_release_count = 0;
298 ci->i_symlink = NULL;
299
300 ci->i_fragtree = RB_ROOT;
301 mutex_init(&ci->i_fragtree_mutex);
302
303 ci->i_xattrs.blob = NULL;
304 ci->i_xattrs.prealloc_blob = NULL;
305 ci->i_xattrs.dirty = false;
306 ci->i_xattrs.index = RB_ROOT;
307 ci->i_xattrs.count = 0;
308 ci->i_xattrs.names_size = 0;
309 ci->i_xattrs.vals_size = 0;
310 ci->i_xattrs.version = 0;
311 ci->i_xattrs.index_version = 0;
312
313 ci->i_caps = RB_ROOT;
314 ci->i_auth_cap = NULL;
315 ci->i_dirty_caps = 0;
316 ci->i_flushing_caps = 0;
317 INIT_LIST_HEAD(&ci->i_dirty_item);
318 INIT_LIST_HEAD(&ci->i_flushing_item);
319 ci->i_cap_flush_seq = 0;
320 ci->i_cap_flush_last_tid = 0;
321 memset(&ci->i_cap_flush_tid, 0, sizeof(ci->i_cap_flush_tid));
322 init_waitqueue_head(&ci->i_cap_wq);
323 ci->i_hold_caps_min = 0;
324 ci->i_hold_caps_max = 0;
325 INIT_LIST_HEAD(&ci->i_cap_delay_list);
326 ci->i_cap_exporting_mds = 0;
327 ci->i_cap_exporting_mseq = 0;
328 ci->i_cap_exporting_issued = 0;
329 INIT_LIST_HEAD(&ci->i_cap_snaps);
330 ci->i_head_snapc = NULL;
331 ci->i_snap_caps = 0;
332
333 for (i = 0; i < CEPH_FILE_MODE_NUM; i++)
334 ci->i_nr_by_mode[i] = 0;
335
336 ci->i_truncate_seq = 0;
337 ci->i_truncate_size = 0;
338 ci->i_truncate_pending = 0;
339
340 ci->i_max_size = 0;
341 ci->i_reported_size = 0;
342 ci->i_wanted_max_size = 0;
343 ci->i_requested_max_size = 0;
344
345 ci->i_pin_ref = 0;
346 ci->i_rd_ref = 0;
347 ci->i_rdcache_ref = 0;
348 ci->i_wr_ref = 0;
349 ci->i_wrbuffer_ref = 0;
350 ci->i_wrbuffer_ref_head = 0;
351 ci->i_shared_gen = 0;
352 ci->i_rdcache_gen = 0;
353 ci->i_rdcache_revoking = 0;
354
355 INIT_LIST_HEAD(&ci->i_unsafe_writes);
356 INIT_LIST_HEAD(&ci->i_unsafe_dirops);
357 spin_lock_init(&ci->i_unsafe_lock);
358
359 ci->i_snap_realm = NULL;
360 INIT_LIST_HEAD(&ci->i_snap_realm_item);
361 INIT_LIST_HEAD(&ci->i_snap_flush_item);
362
363 INIT_WORK(&ci->i_wb_work, ceph_writeback_work);
364 INIT_WORK(&ci->i_pg_inv_work, ceph_invalidate_work);
365
366 INIT_WORK(&ci->i_vmtruncate_work, ceph_vmtruncate_work);
367
368 return &ci->vfs_inode;
369}
370
371void ceph_destroy_inode(struct inode *inode)
372{
373 struct ceph_inode_info *ci = ceph_inode(inode);
374 struct ceph_inode_frag *frag;
375 struct rb_node *n;
376
377 dout("destroy_inode %p ino %llx.%llx\n", inode, ceph_vinop(inode));
378
379 ceph_queue_caps_release(inode);
380
381 kfree(ci->i_symlink);
382 while ((n = rb_first(&ci->i_fragtree)) != NULL) {
383 frag = rb_entry(n, struct ceph_inode_frag, node);
384 rb_erase(n, &ci->i_fragtree);
385 kfree(frag);
386 }
387
388 __ceph_destroy_xattrs(ci);
389 if (ci->i_xattrs.blob)
390 ceph_buffer_put(ci->i_xattrs.blob);
391 if (ci->i_xattrs.prealloc_blob)
392 ceph_buffer_put(ci->i_xattrs.prealloc_blob);
393
394 kmem_cache_free(ceph_inode_cachep, ci);
395}
396
397
398/*
399 * Helpers to fill in size, ctime, mtime, and atime. We have to be
400 * careful because either the client or MDS may have more up to date
401 * info, depending on which capabilities are held, and whether
402 * time_warp_seq or truncate_seq have increased. (Ordinarily, mtime
403 * and size are monotonically increasing, except when utimes() or
404 * truncate() increments the corresponding _seq values.)
405 */
406int ceph_fill_file_size(struct inode *inode, int issued,
407 u32 truncate_seq, u64 truncate_size, u64 size)
408{
409 struct ceph_inode_info *ci = ceph_inode(inode);
410 int queue_trunc = 0;
411
412 if (ceph_seq_cmp(truncate_seq, ci->i_truncate_seq) > 0 ||
413 (truncate_seq == ci->i_truncate_seq && size > inode->i_size)) {
414 dout("size %lld -> %llu\n", inode->i_size, size);
415 inode->i_size = size;
416 inode->i_blocks = (size + (1<<9) - 1) >> 9;
417 ci->i_reported_size = size;
418 if (truncate_seq != ci->i_truncate_seq) {
419 dout("truncate_seq %u -> %u\n",
420 ci->i_truncate_seq, truncate_seq);
421 ci->i_truncate_seq = truncate_seq;
422 /*
423 * If we hold relevant caps, or in the case where we're
424 * not the only client referencing this file and we
425 * don't hold those caps, then we need to check whether
426 * the file is either opened or mmaped
427 */
428 if ((issued & (CEPH_CAP_FILE_CACHE|CEPH_CAP_FILE_RD|
429 CEPH_CAP_FILE_WR|CEPH_CAP_FILE_BUFFER|
430 CEPH_CAP_FILE_EXCL)) ||
431 mapping_mapped(inode->i_mapping) ||
432 __ceph_caps_file_wanted(ci)) {
433 ci->i_truncate_pending++;
434 queue_trunc = 1;
435 }
436 }
437 }
438 if (ceph_seq_cmp(truncate_seq, ci->i_truncate_seq) >= 0 &&
439 ci->i_truncate_size != truncate_size) {
440 dout("truncate_size %lld -> %llu\n", ci->i_truncate_size,
441 truncate_size);
442 ci->i_truncate_size = truncate_size;
443 }
444 return queue_trunc;
445}
446
447void ceph_fill_file_time(struct inode *inode, int issued,
448 u64 time_warp_seq, struct timespec *ctime,
449 struct timespec *mtime, struct timespec *atime)
450{
451 struct ceph_inode_info *ci = ceph_inode(inode);
452 int warn = 0;
453
454 if (issued & (CEPH_CAP_FILE_EXCL|
455 CEPH_CAP_FILE_WR|
456 CEPH_CAP_FILE_BUFFER)) {
457 if (timespec_compare(ctime, &inode->i_ctime) > 0) {
458 dout("ctime %ld.%09ld -> %ld.%09ld inc w/ cap\n",
459 inode->i_ctime.tv_sec, inode->i_ctime.tv_nsec,
460 ctime->tv_sec, ctime->tv_nsec);
461 inode->i_ctime = *ctime;
462 }
463 if (ceph_seq_cmp(time_warp_seq, ci->i_time_warp_seq) > 0) {
464 /* the MDS did a utimes() */
465 dout("mtime %ld.%09ld -> %ld.%09ld "
466 "tw %d -> %d\n",
467 inode->i_mtime.tv_sec, inode->i_mtime.tv_nsec,
468 mtime->tv_sec, mtime->tv_nsec,
469 ci->i_time_warp_seq, (int)time_warp_seq);
470
471 inode->i_mtime = *mtime;
472 inode->i_atime = *atime;
473 ci->i_time_warp_seq = time_warp_seq;
474 } else if (time_warp_seq == ci->i_time_warp_seq) {
475 /* nobody did utimes(); take the max */
476 if (timespec_compare(mtime, &inode->i_mtime) > 0) {
477 dout("mtime %ld.%09ld -> %ld.%09ld inc\n",
478 inode->i_mtime.tv_sec,
479 inode->i_mtime.tv_nsec,
480 mtime->tv_sec, mtime->tv_nsec);
481 inode->i_mtime = *mtime;
482 }
483 if (timespec_compare(atime, &inode->i_atime) > 0) {
484 dout("atime %ld.%09ld -> %ld.%09ld inc\n",
485 inode->i_atime.tv_sec,
486 inode->i_atime.tv_nsec,
487 atime->tv_sec, atime->tv_nsec);
488 inode->i_atime = *atime;
489 }
490 } else if (issued & CEPH_CAP_FILE_EXCL) {
491 /* we did a utimes(); ignore mds values */
492 } else {
493 warn = 1;
494 }
495 } else {
496 /* we have no write caps; whatever the MDS says is true */
497 if (ceph_seq_cmp(time_warp_seq, ci->i_time_warp_seq) >= 0) {
498 inode->i_ctime = *ctime;
499 inode->i_mtime = *mtime;
500 inode->i_atime = *atime;
501 ci->i_time_warp_seq = time_warp_seq;
502 } else {
503 warn = 1;
504 }
505 }
506 if (warn) /* time_warp_seq shouldn't go backwards */
507 dout("%p mds time_warp_seq %llu < %u\n",
508 inode, time_warp_seq, ci->i_time_warp_seq);
509}
510
511/*
512 * Populate an inode based on info from mds. May be called on new or
513 * existing inodes.
514 */
515static int fill_inode(struct inode *inode,
516 struct ceph_mds_reply_info_in *iinfo,
517 struct ceph_mds_reply_dirfrag *dirinfo,
518 struct ceph_mds_session *session,
519 unsigned long ttl_from, int cap_fmode,
520 struct ceph_cap_reservation *caps_reservation)
521{
522 struct ceph_mds_reply_inode *info = iinfo->in;
523 struct ceph_inode_info *ci = ceph_inode(inode);
524 int i;
525 int issued, implemented;
526 struct timespec mtime, atime, ctime;
527 u32 nsplits;
528 struct ceph_buffer *xattr_blob = NULL;
529 int err = 0;
530 int queue_trunc = 0;
531
532 dout("fill_inode %p ino %llx.%llx v %llu had %llu\n",
533 inode, ceph_vinop(inode), le64_to_cpu(info->version),
534 ci->i_version);
535
536 /*
537 * prealloc xattr data, if it looks like we'll need it. only
538 * if len > 4 (meaning there are actually xattrs; the first 4
539 * bytes are the xattr count).
540 */
541 if (iinfo->xattr_len > 4) {
542 xattr_blob = ceph_buffer_new(iinfo->xattr_len, GFP_NOFS);
543 if (!xattr_blob)
544 pr_err("fill_inode ENOMEM xattr blob %d bytes\n",
545 iinfo->xattr_len);
546 }
547
548 spin_lock(&inode->i_lock);
549
550 /*
551 * provided version will be odd if inode value is projected,
552 * even if stable. skip the update if we have a newer info
553 * (e.g., due to inode info racing form multiple MDSs), or if
554 * we are getting projected (unstable) inode info.
555 */
556 if (le64_to_cpu(info->version) > 0 &&
557 (ci->i_version & ~1) > le64_to_cpu(info->version))
558 goto no_change;
559
560 issued = __ceph_caps_issued(ci, &implemented);
561 issued |= implemented | __ceph_caps_dirty(ci);
562
563 /* update inode */
564 ci->i_version = le64_to_cpu(info->version);
565 inode->i_version++;
566 inode->i_rdev = le32_to_cpu(info->rdev);
567
568 if ((issued & CEPH_CAP_AUTH_EXCL) == 0) {
569 inode->i_mode = le32_to_cpu(info->mode);
570 inode->i_uid = le32_to_cpu(info->uid);
571 inode->i_gid = le32_to_cpu(info->gid);
572 dout("%p mode 0%o uid.gid %d.%d\n", inode, inode->i_mode,
573 inode->i_uid, inode->i_gid);
574 }
575
576 if ((issued & CEPH_CAP_LINK_EXCL) == 0)
577 inode->i_nlink = le32_to_cpu(info->nlink);
578
579 /* be careful with mtime, atime, size */
580 ceph_decode_timespec(&atime, &info->atime);
581 ceph_decode_timespec(&mtime, &info->mtime);
582 ceph_decode_timespec(&ctime, &info->ctime);
583 queue_trunc = ceph_fill_file_size(inode, issued,
584 le32_to_cpu(info->truncate_seq),
585 le64_to_cpu(info->truncate_size),
586 le64_to_cpu(info->size));
587 ceph_fill_file_time(inode, issued,
588 le32_to_cpu(info->time_warp_seq),
589 &ctime, &mtime, &atime);
590
591 ci->i_max_size = le64_to_cpu(info->max_size);
592 ci->i_layout = info->layout;
593 inode->i_blkbits = fls(le32_to_cpu(info->layout.fl_stripe_unit)) - 1;
594
595 /* xattrs */
596 /* note that if i_xattrs.len <= 4, i_xattrs.data will still be NULL. */
597 if ((issued & CEPH_CAP_XATTR_EXCL) == 0 &&
598 le64_to_cpu(info->xattr_version) > ci->i_xattrs.version) {
599 if (ci->i_xattrs.blob)
600 ceph_buffer_put(ci->i_xattrs.blob);
601 ci->i_xattrs.blob = xattr_blob;
602 if (xattr_blob)
603 memcpy(ci->i_xattrs.blob->vec.iov_base,
604 iinfo->xattr_data, iinfo->xattr_len);
605 ci->i_xattrs.version = le64_to_cpu(info->xattr_version);
606 }
607
608 inode->i_mapping->a_ops = &ceph_aops;
609 inode->i_mapping->backing_dev_info =
610 &ceph_client(inode->i_sb)->backing_dev_info;
611
612 switch (inode->i_mode & S_IFMT) {
613 case S_IFIFO:
614 case S_IFBLK:
615 case S_IFCHR:
616 case S_IFSOCK:
617 init_special_inode(inode, inode->i_mode, inode->i_rdev);
618 inode->i_op = &ceph_file_iops;
619 break;
620 case S_IFREG:
621 inode->i_op = &ceph_file_iops;
622 inode->i_fop = &ceph_file_fops;
623 break;
624 case S_IFLNK:
625 inode->i_op = &ceph_symlink_iops;
626 if (!ci->i_symlink) {
627 int symlen = iinfo->symlink_len;
628 char *sym;
629
630 BUG_ON(symlen != inode->i_size);
631 spin_unlock(&inode->i_lock);
632
633 err = -ENOMEM;
634 sym = kmalloc(symlen+1, GFP_NOFS);
635 if (!sym)
636 goto out;
637 memcpy(sym, iinfo->symlink, symlen);
638 sym[symlen] = 0;
639
640 spin_lock(&inode->i_lock);
641 if (!ci->i_symlink)
642 ci->i_symlink = sym;
643 else
644 kfree(sym); /* lost a race */
645 }
646 break;
647 case S_IFDIR:
648 inode->i_op = &ceph_dir_iops;
649 inode->i_fop = &ceph_dir_fops;
650
651 ci->i_files = le64_to_cpu(info->files);
652 ci->i_subdirs = le64_to_cpu(info->subdirs);
653 ci->i_rbytes = le64_to_cpu(info->rbytes);
654 ci->i_rfiles = le64_to_cpu(info->rfiles);
655 ci->i_rsubdirs = le64_to_cpu(info->rsubdirs);
656 ceph_decode_timespec(&ci->i_rctime, &info->rctime);
657
658 /* set dir completion flag? */
659 if (ci->i_files == 0 && ci->i_subdirs == 0 &&
660 ceph_snap(inode) == CEPH_NOSNAP &&
661 (le32_to_cpu(info->cap.caps) & CEPH_CAP_FILE_SHARED)) {
662 dout(" marking %p complete (empty)\n", inode);
663 ci->i_ceph_flags |= CEPH_I_COMPLETE;
664 ci->i_max_offset = 2;
665 }
666
667 /* it may be better to set st_size in getattr instead? */
668 if (ceph_test_opt(ceph_client(inode->i_sb), RBYTES))
669 inode->i_size = ci->i_rbytes;
670 break;
671 default:
672 pr_err("fill_inode %llx.%llx BAD mode 0%o\n",
673 ceph_vinop(inode), inode->i_mode);
674 }
675
676no_change:
677 spin_unlock(&inode->i_lock);
678
679 /* queue truncate if we saw i_size decrease */
680 if (queue_trunc)
681 ceph_queue_vmtruncate(inode);
682
683 /* populate frag tree */
684 /* FIXME: move me up, if/when version reflects fragtree changes */
685 nsplits = le32_to_cpu(info->fragtree.nsplits);
686 mutex_lock(&ci->i_fragtree_mutex);
687 for (i = 0; i < nsplits; i++) {
688 u32 id = le32_to_cpu(info->fragtree.splits[i].frag);
689 struct ceph_inode_frag *frag = __get_or_create_frag(ci, id);
690
691 if (IS_ERR(frag))
692 continue;
693 frag->split_by = le32_to_cpu(info->fragtree.splits[i].by);
694 dout(" frag %x split by %d\n", frag->frag, frag->split_by);
695 }
696 mutex_unlock(&ci->i_fragtree_mutex);
697
698 /* were we issued a capability? */
699 if (info->cap.caps) {
700 if (ceph_snap(inode) == CEPH_NOSNAP) {
701 ceph_add_cap(inode, session,
702 le64_to_cpu(info->cap.cap_id),
703 cap_fmode,
704 le32_to_cpu(info->cap.caps),
705 le32_to_cpu(info->cap.wanted),
706 le32_to_cpu(info->cap.seq),
707 le32_to_cpu(info->cap.mseq),
708 le64_to_cpu(info->cap.realm),
709 info->cap.flags,
710 caps_reservation);
711 } else {
712 spin_lock(&inode->i_lock);
713 dout(" %p got snap_caps %s\n", inode,
714 ceph_cap_string(le32_to_cpu(info->cap.caps)));
715 ci->i_snap_caps |= le32_to_cpu(info->cap.caps);
716 if (cap_fmode >= 0)
717 __ceph_get_fmode(ci, cap_fmode);
718 spin_unlock(&inode->i_lock);
719 }
720 }
721
722 /* update delegation info? */
723 if (dirinfo)
724 ceph_fill_dirfrag(inode, dirinfo);
725
726 err = 0;
727
728out:
729 if (xattr_blob)
730 ceph_buffer_put(xattr_blob);
731 return err;
732}
733
734/*
735 * caller should hold session s_mutex.
736 */
737static void update_dentry_lease(struct dentry *dentry,
738 struct ceph_mds_reply_lease *lease,
739 struct ceph_mds_session *session,
740 unsigned long from_time)
741{
742 struct ceph_dentry_info *di = ceph_dentry(dentry);
743 long unsigned duration = le32_to_cpu(lease->duration_ms);
744 long unsigned ttl = from_time + (duration * HZ) / 1000;
745 long unsigned half_ttl = from_time + (duration * HZ / 2) / 1000;
746 struct inode *dir;
747
748 /* only track leases on regular dentries */
749 if (dentry->d_op != &ceph_dentry_ops)
750 return;
751
752 spin_lock(&dentry->d_lock);
753 dout("update_dentry_lease %p mask %d duration %lu ms ttl %lu\n",
754 dentry, le16_to_cpu(lease->mask), duration, ttl);
755
756 /* make lease_rdcache_gen match directory */
757 dir = dentry->d_parent->d_inode;
758 di->lease_shared_gen = ceph_inode(dir)->i_shared_gen;
759
760 if (lease->mask == 0)
761 goto out_unlock;
762
763 if (di->lease_gen == session->s_cap_gen &&
764 time_before(ttl, dentry->d_time))
765 goto out_unlock; /* we already have a newer lease. */
766
767 if (di->lease_session && di->lease_session != session)
768 goto out_unlock;
769
770 ceph_dentry_lru_touch(dentry);
771
772 if (!di->lease_session)
773 di->lease_session = ceph_get_mds_session(session);
774 di->lease_gen = session->s_cap_gen;
775 di->lease_seq = le32_to_cpu(lease->seq);
776 di->lease_renew_after = half_ttl;
777 di->lease_renew_from = 0;
778 dentry->d_time = ttl;
779out_unlock:
780 spin_unlock(&dentry->d_lock);
781 return;
782}
783
784/*
785 * splice a dentry to an inode.
786 * caller must hold directory i_mutex for this to be safe.
787 *
788 * we will only rehash the resulting dentry if @prehash is
789 * true; @prehash will be set to false (for the benefit of
790 * the caller) if we fail.
791 */
792static struct dentry *splice_dentry(struct dentry *dn, struct inode *in,
793 bool *prehash)
794{
795 struct dentry *realdn;
796
797 /* dn must be unhashed */
798 if (!d_unhashed(dn))
799 d_drop(dn);
800 realdn = d_materialise_unique(dn, in);
801 if (IS_ERR(realdn)) {
802 pr_err("splice_dentry error %p inode %p ino %llx.%llx\n",
803 dn, in, ceph_vinop(in));
804 if (prehash)
805 *prehash = false; /* don't rehash on error */
806 dn = realdn; /* note realdn contains the error */
807 goto out;
808 } else if (realdn) {
809 dout("dn %p (%d) spliced with %p (%d) "
810 "inode %p ino %llx.%llx\n",
811 dn, atomic_read(&dn->d_count),
812 realdn, atomic_read(&realdn->d_count),
813 realdn->d_inode, ceph_vinop(realdn->d_inode));
814 dput(dn);
815 dn = realdn;
816 } else {
817 BUG_ON(!ceph_dentry(dn));
818
819 dout("dn %p attached to %p ino %llx.%llx\n",
820 dn, dn->d_inode, ceph_vinop(dn->d_inode));
821 }
822 if ((!prehash || *prehash) && d_unhashed(dn))
823 d_rehash(dn);
824out:
825 return dn;
826}
827
828/*
829 * Set dentry's directory position based on the current dir's max, and
830 * order it in d_subdirs, so that dcache_readdir behaves.
831 */
832static void ceph_set_dentry_offset(struct dentry *dn)
833{
834 struct dentry *dir = dn->d_parent;
835 struct inode *inode = dn->d_parent->d_inode;
836 struct ceph_dentry_info *di;
837
838 BUG_ON(!inode);
839
840 di = ceph_dentry(dn);
841
842 spin_lock(&inode->i_lock);
843 di->offset = ceph_inode(inode)->i_max_offset++;
844 spin_unlock(&inode->i_lock);
845
846 spin_lock(&dcache_lock);
847 spin_lock(&dn->d_lock);
848 list_move_tail(&dir->d_subdirs, &dn->d_u.d_child);
849 dout("set_dentry_offset %p %lld (%p %p)\n", dn, di->offset,
850 dn->d_u.d_child.prev, dn->d_u.d_child.next);
851 spin_unlock(&dn->d_lock);
852 spin_unlock(&dcache_lock);
853}
854
855/*
856 * Incorporate results into the local cache. This is either just
857 * one inode, or a directory, dentry, and possibly linked-to inode (e.g.,
858 * after a lookup).
859 *
860 * A reply may contain
861 * a directory inode along with a dentry.
862 * and/or a target inode
863 *
864 * Called with snap_rwsem (read).
865 */
866int ceph_fill_trace(struct super_block *sb, struct ceph_mds_request *req,
867 struct ceph_mds_session *session)
868{
869 struct ceph_mds_reply_info_parsed *rinfo = &req->r_reply_info;
870 struct inode *in = NULL;
871 struct ceph_mds_reply_inode *ininfo;
872 struct ceph_vino vino;
873 int i = 0;
874 int err = 0;
875
876 dout("fill_trace %p is_dentry %d is_target %d\n", req,
877 rinfo->head->is_dentry, rinfo->head->is_target);
878
879#if 0
880 /*
881 * Debugging hook:
882 *
883 * If we resend completed ops to a recovering mds, we get no
884 * trace. Since that is very rare, pretend this is the case
885 * to ensure the 'no trace' handlers in the callers behave.
886 *
887 * Fill in inodes unconditionally to avoid breaking cap
888 * invariants.
889 */
890 if (rinfo->head->op & CEPH_MDS_OP_WRITE) {
891 pr_info("fill_trace faking empty trace on %lld %s\n",
892 req->r_tid, ceph_mds_op_name(rinfo->head->op));
893 if (rinfo->head->is_dentry) {
894 rinfo->head->is_dentry = 0;
895 err = fill_inode(req->r_locked_dir,
896 &rinfo->diri, rinfo->dirfrag,
897 session, req->r_request_started, -1);
898 }
899 if (rinfo->head->is_target) {
900 rinfo->head->is_target = 0;
901 ininfo = rinfo->targeti.in;
902 vino.ino = le64_to_cpu(ininfo->ino);
903 vino.snap = le64_to_cpu(ininfo->snapid);
904 in = ceph_get_inode(sb, vino);
905 err = fill_inode(in, &rinfo->targeti, NULL,
906 session, req->r_request_started,
907 req->r_fmode);
908 iput(in);
909 }
910 }
911#endif
912
913 if (!rinfo->head->is_target && !rinfo->head->is_dentry) {
914 dout("fill_trace reply is empty!\n");
915 if (rinfo->head->result == 0 && req->r_locked_dir) {
916 struct ceph_inode_info *ci =
917 ceph_inode(req->r_locked_dir);
918 dout(" clearing %p complete (empty trace)\n",
919 req->r_locked_dir);
920 ci->i_ceph_flags &= ~CEPH_I_COMPLETE;
921 ci->i_release_count++;
922 }
923 return 0;
924 }
925
926 if (rinfo->head->is_dentry) {
927 struct inode *dir = req->r_locked_dir;
928
929 err = fill_inode(dir, &rinfo->diri, rinfo->dirfrag,
930 session, req->r_request_started, -1,
931 &req->r_caps_reservation);
932 if (err < 0)
933 return err;
934 }
935
936 if (rinfo->head->is_dentry && !req->r_aborted) {
937 /*
938 * lookup link rename : null -> possibly existing inode
939 * mknod symlink mkdir : null -> new inode
940 * unlink : linked -> null
941 */
942 struct inode *dir = req->r_locked_dir;
943 struct dentry *dn = req->r_dentry;
944 bool have_dir_cap, have_lease;
945
946 BUG_ON(!dn);
947 BUG_ON(!dir);
948 BUG_ON(dn->d_parent->d_inode != dir);
949 BUG_ON(ceph_ino(dir) !=
950 le64_to_cpu(rinfo->diri.in->ino));
951 BUG_ON(ceph_snap(dir) !=
952 le64_to_cpu(rinfo->diri.in->snapid));
953
954 /* do we have a lease on the whole dir? */
955 have_dir_cap =
956 (le32_to_cpu(rinfo->diri.in->cap.caps) &
957 CEPH_CAP_FILE_SHARED);
958
959 /* do we have a dn lease? */
960 have_lease = have_dir_cap ||
961 (le16_to_cpu(rinfo->dlease->mask) &
962 CEPH_LOCK_DN);
963
964 if (!have_lease)
965 dout("fill_trace no dentry lease or dir cap\n");
966
967 /* rename? */
968 if (req->r_old_dentry && req->r_op == CEPH_MDS_OP_RENAME) {
969 dout(" src %p '%.*s' dst %p '%.*s'\n",
970 req->r_old_dentry,
971 req->r_old_dentry->d_name.len,
972 req->r_old_dentry->d_name.name,
973 dn, dn->d_name.len, dn->d_name.name);
974 dout("fill_trace doing d_move %p -> %p\n",
975 req->r_old_dentry, dn);
976 d_move(req->r_old_dentry, dn);
977 dout(" src %p '%.*s' dst %p '%.*s'\n",
978 req->r_old_dentry,
979 req->r_old_dentry->d_name.len,
980 req->r_old_dentry->d_name.name,
981 dn, dn->d_name.len, dn->d_name.name);
982 /* ensure target dentry is invalidated, despite
983 rehashing bug in vfs_rename_dir */
984 dn->d_time = jiffies;
985 ceph_dentry(dn)->lease_shared_gen = 0;
986 /* take overwritten dentry's readdir offset */
987 ceph_dentry(req->r_old_dentry)->offset =
988 ceph_dentry(dn)->offset;
989 dn = req->r_old_dentry; /* use old_dentry */
990 in = dn->d_inode;
991 }
992
993 /* null dentry? */
994 if (!rinfo->head->is_target) {
995 dout("fill_trace null dentry\n");
996 if (dn->d_inode) {
997 dout("d_delete %p\n", dn);
998 d_delete(dn);
999 } else {
1000 dout("d_instantiate %p NULL\n", dn);
1001 d_instantiate(dn, NULL);
1002 if (have_lease && d_unhashed(dn))
1003 d_rehash(dn);
1004 update_dentry_lease(dn, rinfo->dlease,
1005 session,
1006 req->r_request_started);
1007 }
1008 goto done;
1009 }
1010
1011 /* attach proper inode */
1012 ininfo = rinfo->targeti.in;
1013 vino.ino = le64_to_cpu(ininfo->ino);
1014 vino.snap = le64_to_cpu(ininfo->snapid);
1015 if (!dn->d_inode) {
1016 in = ceph_get_inode(sb, vino);
1017 if (IS_ERR(in)) {
1018 pr_err("fill_trace bad get_inode "
1019 "%llx.%llx\n", vino.ino, vino.snap);
1020 err = PTR_ERR(in);
1021 d_delete(dn);
1022 goto done;
1023 }
1024 dn = splice_dentry(dn, in, &have_lease);
1025 if (IS_ERR(dn)) {
1026 err = PTR_ERR(dn);
1027 goto done;
1028 }
1029 req->r_dentry = dn; /* may have spliced */
1030 ceph_set_dentry_offset(dn);
1031 igrab(in);
1032 } else if (ceph_ino(in) == vino.ino &&
1033 ceph_snap(in) == vino.snap) {
1034 igrab(in);
1035 } else {
1036 dout(" %p links to %p %llx.%llx, not %llx.%llx\n",
1037 dn, in, ceph_ino(in), ceph_snap(in),
1038 vino.ino, vino.snap);
1039 have_lease = false;
1040 in = NULL;
1041 }
1042
1043 if (have_lease)
1044 update_dentry_lease(dn, rinfo->dlease, session,
1045 req->r_request_started);
1046 dout(" final dn %p\n", dn);
1047 i++;
1048 } else if (req->r_op == CEPH_MDS_OP_LOOKUPSNAP ||
1049 req->r_op == CEPH_MDS_OP_MKSNAP) {
1050 struct dentry *dn = req->r_dentry;
1051
1052 /* fill out a snapdir LOOKUPSNAP dentry */
1053 BUG_ON(!dn);
1054 BUG_ON(!req->r_locked_dir);
1055 BUG_ON(ceph_snap(req->r_locked_dir) != CEPH_SNAPDIR);
1056 ininfo = rinfo->targeti.in;
1057 vino.ino = le64_to_cpu(ininfo->ino);
1058 vino.snap = le64_to_cpu(ininfo->snapid);
1059 in = ceph_get_inode(sb, vino);
1060 if (IS_ERR(in)) {
1061 pr_err("fill_inode get_inode badness %llx.%llx\n",
1062 vino.ino, vino.snap);
1063 err = PTR_ERR(in);
1064 d_delete(dn);
1065 goto done;
1066 }
1067 dout(" linking snapped dir %p to dn %p\n", in, dn);
1068 dn = splice_dentry(dn, in, NULL);
1069 if (IS_ERR(dn)) {
1070 err = PTR_ERR(dn);
1071 goto done;
1072 }
1073 ceph_set_dentry_offset(dn);
1074 req->r_dentry = dn; /* may have spliced */
1075 igrab(in);
1076 rinfo->head->is_dentry = 1; /* fool notrace handlers */
1077 }
1078
1079 if (rinfo->head->is_target) {
1080 vino.ino = le64_to_cpu(rinfo->targeti.in->ino);
1081 vino.snap = le64_to_cpu(rinfo->targeti.in->snapid);
1082
1083 if (in == NULL || ceph_ino(in) != vino.ino ||
1084 ceph_snap(in) != vino.snap) {
1085 in = ceph_get_inode(sb, vino);
1086 if (IS_ERR(in)) {
1087 err = PTR_ERR(in);
1088 goto done;
1089 }
1090 }
1091 req->r_target_inode = in;
1092
1093 err = fill_inode(in,
1094 &rinfo->targeti, NULL,
1095 session, req->r_request_started,
1096 (le32_to_cpu(rinfo->head->result) == 0) ?
1097 req->r_fmode : -1,
1098 &req->r_caps_reservation);
1099 if (err < 0) {
1100 pr_err("fill_inode badness %p %llx.%llx\n",
1101 in, ceph_vinop(in));
1102 goto done;
1103 }
1104 }
1105
1106done:
1107 dout("fill_trace done err=%d\n", err);
1108 return err;
1109}
1110
1111/*
1112 * Prepopulate our cache with readdir results, leases, etc.
1113 */
1114int ceph_readdir_prepopulate(struct ceph_mds_request *req,
1115 struct ceph_mds_session *session)
1116{
1117 struct dentry *parent = req->r_dentry;
1118 struct ceph_mds_reply_info_parsed *rinfo = &req->r_reply_info;
1119 struct qstr dname;
1120 struct dentry *dn;
1121 struct inode *in;
1122 int err = 0, i;
1123 struct inode *snapdir = NULL;
1124 struct ceph_mds_request_head *rhead = req->r_request->front.iov_base;
1125 u64 frag = le32_to_cpu(rhead->args.readdir.frag);
1126 struct ceph_dentry_info *di;
1127
1128 if (le32_to_cpu(rinfo->head->op) == CEPH_MDS_OP_LSSNAP) {
1129 snapdir = ceph_get_snapdir(parent->d_inode);
1130 parent = d_find_alias(snapdir);
1131 dout("readdir_prepopulate %d items under SNAPDIR dn %p\n",
1132 rinfo->dir_nr, parent);
1133 } else {
1134 dout("readdir_prepopulate %d items under dn %p\n",
1135 rinfo->dir_nr, parent);
1136 if (rinfo->dir_dir)
1137 ceph_fill_dirfrag(parent->d_inode, rinfo->dir_dir);
1138 }
1139
1140 for (i = 0; i < rinfo->dir_nr; i++) {
1141 struct ceph_vino vino;
1142
1143 dname.name = rinfo->dir_dname[i];
1144 dname.len = rinfo->dir_dname_len[i];
1145 dname.hash = full_name_hash(dname.name, dname.len);
1146
1147 vino.ino = le64_to_cpu(rinfo->dir_in[i].in->ino);
1148 vino.snap = le64_to_cpu(rinfo->dir_in[i].in->snapid);
1149
1150retry_lookup:
1151 dn = d_lookup(parent, &dname);
1152 dout("d_lookup on parent=%p name=%.*s got %p\n",
1153 parent, dname.len, dname.name, dn);
1154
1155 if (!dn) {
1156 dn = d_alloc(parent, &dname);
1157 dout("d_alloc %p '%.*s' = %p\n", parent,
1158 dname.len, dname.name, dn);
1159 if (dn == NULL) {
1160 dout("d_alloc badness\n");
1161 err = -ENOMEM;
1162 goto out;
1163 }
1164 err = ceph_init_dentry(dn);
1165 if (err < 0)
1166 goto out;
1167 } else if (dn->d_inode &&
1168 (ceph_ino(dn->d_inode) != vino.ino ||
1169 ceph_snap(dn->d_inode) != vino.snap)) {
1170 dout(" dn %p points to wrong inode %p\n",
1171 dn, dn->d_inode);
1172 d_delete(dn);
1173 dput(dn);
1174 goto retry_lookup;
1175 } else {
1176 /* reorder parent's d_subdirs */
1177 spin_lock(&dcache_lock);
1178 spin_lock(&dn->d_lock);
1179 list_move(&dn->d_u.d_child, &parent->d_subdirs);
1180 spin_unlock(&dn->d_lock);
1181 spin_unlock(&dcache_lock);
1182 }
1183
1184 di = dn->d_fsdata;
1185 di->offset = ceph_make_fpos(frag, i + req->r_readdir_offset);
1186
1187 /* inode */
1188 if (dn->d_inode) {
1189 in = dn->d_inode;
1190 } else {
1191 in = ceph_get_inode(parent->d_sb, vino);
1192 if (in == NULL) {
1193 dout("new_inode badness\n");
1194 d_delete(dn);
1195 dput(dn);
1196 err = -ENOMEM;
1197 goto out;
1198 }
1199 dn = splice_dentry(dn, in, NULL);
1200 }
1201
1202 if (fill_inode(in, &rinfo->dir_in[i], NULL, session,
1203 req->r_request_started, -1,
1204 &req->r_caps_reservation) < 0) {
1205 pr_err("fill_inode badness on %p\n", in);
1206 dput(dn);
1207 continue;
1208 }
1209 update_dentry_lease(dn, rinfo->dir_dlease[i],
1210 req->r_session, req->r_request_started);
1211 dput(dn);
1212 }
1213 req->r_did_prepopulate = true;
1214
1215out:
1216 if (snapdir) {
1217 iput(snapdir);
1218 dput(parent);
1219 }
1220 dout("readdir_prepopulate done\n");
1221 return err;
1222}
1223
1224int ceph_inode_set_size(struct inode *inode, loff_t size)
1225{
1226 struct ceph_inode_info *ci = ceph_inode(inode);
1227 int ret = 0;
1228
1229 spin_lock(&inode->i_lock);
1230 dout("set_size %p %llu -> %llu\n", inode, inode->i_size, size);
1231 inode->i_size = size;
1232 inode->i_blocks = (size + (1 << 9) - 1) >> 9;
1233
1234 /* tell the MDS if we are approaching max_size */
1235 if ((size << 1) >= ci->i_max_size &&
1236 (ci->i_reported_size << 1) < ci->i_max_size)
1237 ret = 1;
1238
1239 spin_unlock(&inode->i_lock);
1240 return ret;
1241}
1242
1243/*
1244 * Write back inode data in a worker thread. (This can't be done
1245 * in the message handler context.)
1246 */
1247void ceph_queue_writeback(struct inode *inode)
1248{
1249 if (queue_work(ceph_inode_to_client(inode)->wb_wq,
1250 &ceph_inode(inode)->i_wb_work)) {
1251 dout("ceph_queue_writeback %p\n", inode);
1252 igrab(inode);
1253 } else {
1254 dout("ceph_queue_writeback %p failed\n", inode);
1255 }
1256}
1257
1258static void ceph_writeback_work(struct work_struct *work)
1259{
1260 struct ceph_inode_info *ci = container_of(work, struct ceph_inode_info,
1261 i_wb_work);
1262 struct inode *inode = &ci->vfs_inode;
1263
1264 dout("writeback %p\n", inode);
1265 filemap_fdatawrite(&inode->i_data);
1266 iput(inode);
1267}
1268
1269/*
1270 * queue an async invalidation
1271 */
1272void ceph_queue_invalidate(struct inode *inode)
1273{
1274 if (queue_work(ceph_inode_to_client(inode)->pg_inv_wq,
1275 &ceph_inode(inode)->i_pg_inv_work)) {
1276 dout("ceph_queue_invalidate %p\n", inode);
1277 igrab(inode);
1278 } else {
1279 dout("ceph_queue_invalidate %p failed\n", inode);
1280 }
1281}
1282
1283/*
1284 * invalidate any pages that are not dirty or under writeback. this
1285 * includes pages that are clean and mapped.
1286 */
1287static void ceph_invalidate_nondirty_pages(struct address_space *mapping)
1288{
1289 struct pagevec pvec;
1290 pgoff_t next = 0;
1291 int i;
1292
1293 pagevec_init(&pvec, 0);
1294 while (pagevec_lookup(&pvec, mapping, next, PAGEVEC_SIZE)) {
1295 for (i = 0; i < pagevec_count(&pvec); i++) {
1296 struct page *page = pvec.pages[i];
1297 pgoff_t index;
1298 int skip_page =
1299 (PageDirty(page) || PageWriteback(page));
1300
1301 if (!skip_page)
1302 skip_page = !trylock_page(page);
1303
1304 /*
1305 * We really shouldn't be looking at the ->index of an
1306 * unlocked page. But we're not allowed to lock these
1307 * pages. So we rely upon nobody altering the ->index
1308 * of this (pinned-by-us) page.
1309 */
1310 index = page->index;
1311 if (index > next)
1312 next = index;
1313 next++;
1314
1315 if (skip_page)
1316 continue;
1317
1318 generic_error_remove_page(mapping, page);
1319 unlock_page(page);
1320 }
1321 pagevec_release(&pvec);
1322 cond_resched();
1323 }
1324}
1325
1326/*
1327 * Invalidate inode pages in a worker thread. (This can't be done
1328 * in the message handler context.)
1329 */
1330static void ceph_invalidate_work(struct work_struct *work)
1331{
1332 struct ceph_inode_info *ci = container_of(work, struct ceph_inode_info,
1333 i_pg_inv_work);
1334 struct inode *inode = &ci->vfs_inode;
1335 u32 orig_gen;
1336 int check = 0;
1337
1338 spin_lock(&inode->i_lock);
1339 dout("invalidate_pages %p gen %d revoking %d\n", inode,
1340 ci->i_rdcache_gen, ci->i_rdcache_revoking);
1341 if (ci->i_rdcache_gen == 0 ||
1342 ci->i_rdcache_revoking != ci->i_rdcache_gen) {
1343 BUG_ON(ci->i_rdcache_revoking > ci->i_rdcache_gen);
1344 /* nevermind! */
1345 ci->i_rdcache_revoking = 0;
1346 spin_unlock(&inode->i_lock);
1347 goto out;
1348 }
1349 orig_gen = ci->i_rdcache_gen;
1350 spin_unlock(&inode->i_lock);
1351
1352 ceph_invalidate_nondirty_pages(inode->i_mapping);
1353
1354 spin_lock(&inode->i_lock);
1355 if (orig_gen == ci->i_rdcache_gen) {
1356 dout("invalidate_pages %p gen %d successful\n", inode,
1357 ci->i_rdcache_gen);
1358 ci->i_rdcache_gen = 0;
1359 ci->i_rdcache_revoking = 0;
1360 check = 1;
1361 } else {
1362 dout("invalidate_pages %p gen %d raced, gen now %d\n",
1363 inode, orig_gen, ci->i_rdcache_gen);
1364 }
1365 spin_unlock(&inode->i_lock);
1366
1367 if (check)
1368 ceph_check_caps(ci, 0, NULL);
1369out:
1370 iput(inode);
1371}
1372
1373
1374/*
1375 * called by trunc_wq; take i_mutex ourselves
1376 *
1377 * We also truncate in a separate thread as well.
1378 */
1379static void ceph_vmtruncate_work(struct work_struct *work)
1380{
1381 struct ceph_inode_info *ci = container_of(work, struct ceph_inode_info,
1382 i_vmtruncate_work);
1383 struct inode *inode = &ci->vfs_inode;
1384
1385 dout("vmtruncate_work %p\n", inode);
1386 mutex_lock(&inode->i_mutex);
1387 __ceph_do_pending_vmtruncate(inode);
1388 mutex_unlock(&inode->i_mutex);
1389 iput(inode);
1390}
1391
1392/*
1393 * Queue an async vmtruncate. If we fail to queue work, we will handle
1394 * the truncation the next time we call __ceph_do_pending_vmtruncate.
1395 */
1396void ceph_queue_vmtruncate(struct inode *inode)
1397{
1398 struct ceph_inode_info *ci = ceph_inode(inode);
1399
1400 if (queue_work(ceph_client(inode->i_sb)->trunc_wq,
1401 &ci->i_vmtruncate_work)) {
1402 dout("ceph_queue_vmtruncate %p\n", inode);
1403 igrab(inode);
1404 } else {
1405 dout("ceph_queue_vmtruncate %p failed, pending=%d\n",
1406 inode, ci->i_truncate_pending);
1407 }
1408}
1409
1410/*
1411 * called with i_mutex held.
1412 *
1413 * Make sure any pending truncation is applied before doing anything
1414 * that may depend on it.
1415 */
1416void __ceph_do_pending_vmtruncate(struct inode *inode)
1417{
1418 struct ceph_inode_info *ci = ceph_inode(inode);
1419 u64 to;
1420 int wrbuffer_refs, wake = 0;
1421
1422retry:
1423 spin_lock(&inode->i_lock);
1424 if (ci->i_truncate_pending == 0) {
1425 dout("__do_pending_vmtruncate %p none pending\n", inode);
1426 spin_unlock(&inode->i_lock);
1427 return;
1428 }
1429
1430 /*
1431 * make sure any dirty snapped pages are flushed before we
1432 * possibly truncate them.. so write AND block!
1433 */
1434 if (ci->i_wrbuffer_ref_head < ci->i_wrbuffer_ref) {
1435 dout("__do_pending_vmtruncate %p flushing snaps first\n",
1436 inode);
1437 spin_unlock(&inode->i_lock);
1438 filemap_write_and_wait_range(&inode->i_data, 0,
1439 inode->i_sb->s_maxbytes);
1440 goto retry;
1441 }
1442
1443 to = ci->i_truncate_size;
1444 wrbuffer_refs = ci->i_wrbuffer_ref;
1445 dout("__do_pending_vmtruncate %p (%d) to %lld\n", inode,
1446 ci->i_truncate_pending, to);
1447 spin_unlock(&inode->i_lock);
1448
1449 truncate_inode_pages(inode->i_mapping, to);
1450
1451 spin_lock(&inode->i_lock);
1452 ci->i_truncate_pending--;
1453 if (ci->i_truncate_pending == 0)
1454 wake = 1;
1455 spin_unlock(&inode->i_lock);
1456
1457 if (wrbuffer_refs == 0)
1458 ceph_check_caps(ci, CHECK_CAPS_AUTHONLY, NULL);
1459 if (wake)
1460 wake_up(&ci->i_cap_wq);
1461}
1462
1463
1464/*
1465 * symlinks
1466 */
1467static void *ceph_sym_follow_link(struct dentry *dentry, struct nameidata *nd)
1468{
1469 struct ceph_inode_info *ci = ceph_inode(dentry->d_inode);
1470 nd_set_link(nd, ci->i_symlink);
1471 return NULL;
1472}
1473
1474static const struct inode_operations ceph_symlink_iops = {
1475 .readlink = generic_readlink,
1476 .follow_link = ceph_sym_follow_link,
1477};
1478
1479/*
1480 * setattr
1481 */
1482int ceph_setattr(struct dentry *dentry, struct iattr *attr)
1483{
1484 struct inode *inode = dentry->d_inode;
1485 struct ceph_inode_info *ci = ceph_inode(inode);
1486 struct inode *parent_inode = dentry->d_parent->d_inode;
1487 const unsigned int ia_valid = attr->ia_valid;
1488 struct ceph_mds_request *req;
1489 struct ceph_mds_client *mdsc = &ceph_client(dentry->d_sb)->mdsc;
1490 int issued;
1491 int release = 0, dirtied = 0;
1492 int mask = 0;
1493 int err = 0;
1494
1495 if (ceph_snap(inode) != CEPH_NOSNAP)
1496 return -EROFS;
1497
1498 __ceph_do_pending_vmtruncate(inode);
1499
1500 err = inode_change_ok(inode, attr);
1501 if (err != 0)
1502 return err;
1503
1504 req = ceph_mdsc_create_request(mdsc, CEPH_MDS_OP_SETATTR,
1505 USE_AUTH_MDS);
1506 if (IS_ERR(req))
1507 return PTR_ERR(req);
1508
1509 spin_lock(&inode->i_lock);
1510 issued = __ceph_caps_issued(ci, NULL);
1511 dout("setattr %p issued %s\n", inode, ceph_cap_string(issued));
1512
1513 if (ia_valid & ATTR_UID) {
1514 dout("setattr %p uid %d -> %d\n", inode,
1515 inode->i_uid, attr->ia_uid);
1516 if (issued & CEPH_CAP_AUTH_EXCL) {
1517 inode->i_uid = attr->ia_uid;
1518 dirtied |= CEPH_CAP_AUTH_EXCL;
1519 } else if ((issued & CEPH_CAP_AUTH_SHARED) == 0 ||
1520 attr->ia_uid != inode->i_uid) {
1521 req->r_args.setattr.uid = cpu_to_le32(attr->ia_uid);
1522 mask |= CEPH_SETATTR_UID;
1523 release |= CEPH_CAP_AUTH_SHARED;
1524 }
1525 }
1526 if (ia_valid & ATTR_GID) {
1527 dout("setattr %p gid %d -> %d\n", inode,
1528 inode->i_gid, attr->ia_gid);
1529 if (issued & CEPH_CAP_AUTH_EXCL) {
1530 inode->i_gid = attr->ia_gid;
1531 dirtied |= CEPH_CAP_AUTH_EXCL;
1532 } else if ((issued & CEPH_CAP_AUTH_SHARED) == 0 ||
1533 attr->ia_gid != inode->i_gid) {
1534 req->r_args.setattr.gid = cpu_to_le32(attr->ia_gid);
1535 mask |= CEPH_SETATTR_GID;
1536 release |= CEPH_CAP_AUTH_SHARED;
1537 }
1538 }
1539 if (ia_valid & ATTR_MODE) {
1540 dout("setattr %p mode 0%o -> 0%o\n", inode, inode->i_mode,
1541 attr->ia_mode);
1542 if (issued & CEPH_CAP_AUTH_EXCL) {
1543 inode->i_mode = attr->ia_mode;
1544 dirtied |= CEPH_CAP_AUTH_EXCL;
1545 } else if ((issued & CEPH_CAP_AUTH_SHARED) == 0 ||
1546 attr->ia_mode != inode->i_mode) {
1547 req->r_args.setattr.mode = cpu_to_le32(attr->ia_mode);
1548 mask |= CEPH_SETATTR_MODE;
1549 release |= CEPH_CAP_AUTH_SHARED;
1550 }
1551 }
1552
1553 if (ia_valid & ATTR_ATIME) {
1554 dout("setattr %p atime %ld.%ld -> %ld.%ld\n", inode,
1555 inode->i_atime.tv_sec, inode->i_atime.tv_nsec,
1556 attr->ia_atime.tv_sec, attr->ia_atime.tv_nsec);
1557 if (issued & CEPH_CAP_FILE_EXCL) {
1558 ci->i_time_warp_seq++;
1559 inode->i_atime = attr->ia_atime;
1560 dirtied |= CEPH_CAP_FILE_EXCL;
1561 } else if ((issued & CEPH_CAP_FILE_WR) &&
1562 timespec_compare(&inode->i_atime,
1563 &attr->ia_atime) < 0) {
1564 inode->i_atime = attr->ia_atime;
1565 dirtied |= CEPH_CAP_FILE_WR;
1566 } else if ((issued & CEPH_CAP_FILE_SHARED) == 0 ||
1567 !timespec_equal(&inode->i_atime, &attr->ia_atime)) {
1568 ceph_encode_timespec(&req->r_args.setattr.atime,
1569 &attr->ia_atime);
1570 mask |= CEPH_SETATTR_ATIME;
1571 release |= CEPH_CAP_FILE_CACHE | CEPH_CAP_FILE_RD |
1572 CEPH_CAP_FILE_WR;
1573 }
1574 }
1575 if (ia_valid & ATTR_MTIME) {
1576 dout("setattr %p mtime %ld.%ld -> %ld.%ld\n", inode,
1577 inode->i_mtime.tv_sec, inode->i_mtime.tv_nsec,
1578 attr->ia_mtime.tv_sec, attr->ia_mtime.tv_nsec);
1579 if (issued & CEPH_CAP_FILE_EXCL) {
1580 ci->i_time_warp_seq++;
1581 inode->i_mtime = attr->ia_mtime;
1582 dirtied |= CEPH_CAP_FILE_EXCL;
1583 } else if ((issued & CEPH_CAP_FILE_WR) &&
1584 timespec_compare(&inode->i_mtime,
1585 &attr->ia_mtime) < 0) {
1586 inode->i_mtime = attr->ia_mtime;
1587 dirtied |= CEPH_CAP_FILE_WR;
1588 } else if ((issued & CEPH_CAP_FILE_SHARED) == 0 ||
1589 !timespec_equal(&inode->i_mtime, &attr->ia_mtime)) {
1590 ceph_encode_timespec(&req->r_args.setattr.mtime,
1591 &attr->ia_mtime);
1592 mask |= CEPH_SETATTR_MTIME;
1593 release |= CEPH_CAP_FILE_SHARED | CEPH_CAP_FILE_RD |
1594 CEPH_CAP_FILE_WR;
1595 }
1596 }
1597 if (ia_valid & ATTR_SIZE) {
1598 dout("setattr %p size %lld -> %lld\n", inode,
1599 inode->i_size, attr->ia_size);
1600 if (attr->ia_size > inode->i_sb->s_maxbytes) {
1601 err = -EINVAL;
1602 goto out;
1603 }
1604 if ((issued & CEPH_CAP_FILE_EXCL) &&
1605 attr->ia_size > inode->i_size) {
1606 inode->i_size = attr->ia_size;
1607 inode->i_blocks =
1608 (attr->ia_size + (1 << 9) - 1) >> 9;
1609 inode->i_ctime = attr->ia_ctime;
1610 ci->i_reported_size = attr->ia_size;
1611 dirtied |= CEPH_CAP_FILE_EXCL;
1612 } else if ((issued & CEPH_CAP_FILE_SHARED) == 0 ||
1613 attr->ia_size != inode->i_size) {
1614 req->r_args.setattr.size = cpu_to_le64(attr->ia_size);
1615 req->r_args.setattr.old_size =
1616 cpu_to_le64(inode->i_size);
1617 mask |= CEPH_SETATTR_SIZE;
1618 release |= CEPH_CAP_FILE_SHARED | CEPH_CAP_FILE_RD |
1619 CEPH_CAP_FILE_WR;
1620 }
1621 }
1622
1623 /* these do nothing */
1624 if (ia_valid & ATTR_CTIME) {
1625 bool only = (ia_valid & (ATTR_SIZE|ATTR_MTIME|ATTR_ATIME|
1626 ATTR_MODE|ATTR_UID|ATTR_GID)) == 0;
1627 dout("setattr %p ctime %ld.%ld -> %ld.%ld (%s)\n", inode,
1628 inode->i_ctime.tv_sec, inode->i_ctime.tv_nsec,
1629 attr->ia_ctime.tv_sec, attr->ia_ctime.tv_nsec,
1630 only ? "ctime only" : "ignored");
1631 inode->i_ctime = attr->ia_ctime;
1632 if (only) {
1633 /*
1634 * if kernel wants to dirty ctime but nothing else,
1635 * we need to choose a cap to dirty under, or do
1636 * a almost-no-op setattr
1637 */
1638 if (issued & CEPH_CAP_AUTH_EXCL)
1639 dirtied |= CEPH_CAP_AUTH_EXCL;
1640 else if (issued & CEPH_CAP_FILE_EXCL)
1641 dirtied |= CEPH_CAP_FILE_EXCL;
1642 else if (issued & CEPH_CAP_XATTR_EXCL)
1643 dirtied |= CEPH_CAP_XATTR_EXCL;
1644 else
1645 mask |= CEPH_SETATTR_CTIME;
1646 }
1647 }
1648 if (ia_valid & ATTR_FILE)
1649 dout("setattr %p ATTR_FILE ... hrm!\n", inode);
1650
1651 if (dirtied) {
1652 __ceph_mark_dirty_caps(ci, dirtied);
1653 inode->i_ctime = CURRENT_TIME;
1654 }
1655
1656 release &= issued;
1657 spin_unlock(&inode->i_lock);
1658
1659 if (mask) {
1660 req->r_inode = igrab(inode);
1661 req->r_inode_drop = release;
1662 req->r_args.setattr.mask = cpu_to_le32(mask);
1663 req->r_num_caps = 1;
1664 err = ceph_mdsc_do_request(mdsc, parent_inode, req);
1665 }
1666 dout("setattr %p result=%d (%s locally, %d remote)\n", inode, err,
1667 ceph_cap_string(dirtied), mask);
1668
1669 ceph_mdsc_put_request(req);
1670 __ceph_do_pending_vmtruncate(inode);
1671 return err;
1672out:
1673 spin_unlock(&inode->i_lock);
1674 ceph_mdsc_put_request(req);
1675 return err;
1676}
1677
1678/*
1679 * Verify that we have a lease on the given mask. If not,
1680 * do a getattr against an mds.
1681 */
1682int ceph_do_getattr(struct inode *inode, int mask)
1683{
1684 struct ceph_client *client = ceph_sb_to_client(inode->i_sb);
1685 struct ceph_mds_client *mdsc = &client->mdsc;
1686 struct ceph_mds_request *req;
1687 int err;
1688
1689 if (ceph_snap(inode) == CEPH_SNAPDIR) {
1690 dout("do_getattr inode %p SNAPDIR\n", inode);
1691 return 0;
1692 }
1693
1694 dout("do_getattr inode %p mask %s\n", inode, ceph_cap_string(mask));
1695 if (ceph_caps_issued_mask(ceph_inode(inode), mask, 1))
1696 return 0;
1697
1698 req = ceph_mdsc_create_request(mdsc, CEPH_MDS_OP_GETATTR, USE_ANY_MDS);
1699 if (IS_ERR(req))
1700 return PTR_ERR(req);
1701 req->r_inode = igrab(inode);
1702 req->r_num_caps = 1;
1703 req->r_args.getattr.mask = cpu_to_le32(mask);
1704 err = ceph_mdsc_do_request(mdsc, NULL, req);
1705 ceph_mdsc_put_request(req);
1706 dout("do_getattr result=%d\n", err);
1707 return err;
1708}
1709
1710
1711/*
1712 * Check inode permissions. We verify we have a valid value for
1713 * the AUTH cap, then call the generic handler.
1714 */
1715int ceph_permission(struct inode *inode, int mask)
1716{
1717 int err = ceph_do_getattr(inode, CEPH_CAP_AUTH_SHARED);
1718
1719 if (!err)
1720 err = generic_permission(inode, mask, NULL);
1721 return err;
1722}
1723
1724/*
1725 * Get all attributes. Hopefully somedata we'll have a statlite()
1726 * and can limit the fields we require to be accurate.
1727 */
1728int ceph_getattr(struct vfsmount *mnt, struct dentry *dentry,
1729 struct kstat *stat)
1730{
1731 struct inode *inode = dentry->d_inode;
1732 struct ceph_inode_info *ci = ceph_inode(inode);
1733 int err;
1734
1735 err = ceph_do_getattr(inode, CEPH_STAT_CAP_INODE_ALL);
1736 if (!err) {
1737 generic_fillattr(inode, stat);
1738 stat->ino = inode->i_ino;
1739 if (ceph_snap(inode) != CEPH_NOSNAP)
1740 stat->dev = ceph_snap(inode);
1741 else
1742 stat->dev = 0;
1743 if (S_ISDIR(inode->i_mode)) {
1744 stat->size = ci->i_rbytes;
1745 stat->blocks = 0;
1746 stat->blksize = 65536;
1747 }
1748 }
1749 return err;
1750}
diff --git a/fs/ceph/ioctl.c b/fs/ceph/ioctl.c
new file mode 100644
index 000000000000..8a5bcae62846
--- /dev/null
+++ b/fs/ceph/ioctl.c
@@ -0,0 +1,160 @@
1#include <linux/in.h>
2
3#include "ioctl.h"
4#include "super.h"
5#include "ceph_debug.h"
6
7
8/*
9 * ioctls
10 */
11
12/*
13 * get and set the file layout
14 */
15static long ceph_ioctl_get_layout(struct file *file, void __user *arg)
16{
17 struct ceph_inode_info *ci = ceph_inode(file->f_dentry->d_inode);
18 struct ceph_ioctl_layout l;
19 int err;
20
21 err = ceph_do_getattr(file->f_dentry->d_inode, CEPH_STAT_CAP_LAYOUT);
22 if (!err) {
23 l.stripe_unit = ceph_file_layout_su(ci->i_layout);
24 l.stripe_count = ceph_file_layout_stripe_count(ci->i_layout);
25 l.object_size = ceph_file_layout_object_size(ci->i_layout);
26 l.data_pool = le32_to_cpu(ci->i_layout.fl_pg_pool);
27 l.preferred_osd =
28 (s32)le32_to_cpu(ci->i_layout.fl_pg_preferred);
29 if (copy_to_user(arg, &l, sizeof(l)))
30 return -EFAULT;
31 }
32
33 return err;
34}
35
36static long ceph_ioctl_set_layout(struct file *file, void __user *arg)
37{
38 struct inode *inode = file->f_dentry->d_inode;
39 struct inode *parent_inode = file->f_dentry->d_parent->d_inode;
40 struct ceph_mds_client *mdsc = &ceph_sb_to_client(inode->i_sb)->mdsc;
41 struct ceph_mds_request *req;
42 struct ceph_ioctl_layout l;
43 int err, i;
44
45 /* copy and validate */
46 if (copy_from_user(&l, arg, sizeof(l)))
47 return -EFAULT;
48
49 if ((l.object_size & ~PAGE_MASK) ||
50 (l.stripe_unit & ~PAGE_MASK) ||
51 !l.stripe_unit ||
52 (l.object_size &&
53 (unsigned)l.object_size % (unsigned)l.stripe_unit))
54 return -EINVAL;
55
56 /* make sure it's a valid data pool */
57 if (l.data_pool > 0) {
58 mutex_lock(&mdsc->mutex);
59 err = -EINVAL;
60 for (i = 0; i < mdsc->mdsmap->m_num_data_pg_pools; i++)
61 if (mdsc->mdsmap->m_data_pg_pools[i] == l.data_pool) {
62 err = 0;
63 break;
64 }
65 mutex_unlock(&mdsc->mutex);
66 if (err)
67 return err;
68 }
69
70 req = ceph_mdsc_create_request(mdsc, CEPH_MDS_OP_SETLAYOUT,
71 USE_AUTH_MDS);
72 if (IS_ERR(req))
73 return PTR_ERR(req);
74 req->r_inode = igrab(inode);
75 req->r_inode_drop = CEPH_CAP_FILE_SHARED | CEPH_CAP_FILE_EXCL;
76
77 req->r_args.setlayout.layout.fl_stripe_unit =
78 cpu_to_le32(l.stripe_unit);
79 req->r_args.setlayout.layout.fl_stripe_count =
80 cpu_to_le32(l.stripe_count);
81 req->r_args.setlayout.layout.fl_object_size =
82 cpu_to_le32(l.object_size);
83 req->r_args.setlayout.layout.fl_pg_pool = cpu_to_le32(l.data_pool);
84 req->r_args.setlayout.layout.fl_pg_preferred =
85 cpu_to_le32(l.preferred_osd);
86
87 err = ceph_mdsc_do_request(mdsc, parent_inode, req);
88 ceph_mdsc_put_request(req);
89 return err;
90}
91
92/*
93 * Return object name, size/offset information, and location (OSD
94 * number, network address) for a given file offset.
95 */
96static long ceph_ioctl_get_dataloc(struct file *file, void __user *arg)
97{
98 struct ceph_ioctl_dataloc dl;
99 struct inode *inode = file->f_dentry->d_inode;
100 struct ceph_inode_info *ci = ceph_inode(inode);
101 struct ceph_osd_client *osdc = &ceph_client(inode->i_sb)->osdc;
102 u64 len = 1, olen;
103 u64 tmp;
104 struct ceph_object_layout ol;
105 struct ceph_pg pgid;
106
107 /* copy and validate */
108 if (copy_from_user(&dl, arg, sizeof(dl)))
109 return -EFAULT;
110
111 down_read(&osdc->map_sem);
112 ceph_calc_file_object_mapping(&ci->i_layout, dl.file_offset, &len,
113 &dl.object_no, &dl.object_offset, &olen);
114 dl.file_offset -= dl.object_offset;
115 dl.object_size = ceph_file_layout_object_size(ci->i_layout);
116 dl.block_size = ceph_file_layout_su(ci->i_layout);
117
118 /* block_offset = object_offset % block_size */
119 tmp = dl.object_offset;
120 dl.block_offset = do_div(tmp, dl.block_size);
121
122 snprintf(dl.object_name, sizeof(dl.object_name), "%llx.%08llx",
123 ceph_ino(inode), dl.object_no);
124 ceph_calc_object_layout(&ol, dl.object_name, &ci->i_layout,
125 osdc->osdmap);
126
127 pgid = ol.ol_pgid;
128 dl.osd = ceph_calc_pg_primary(osdc->osdmap, pgid);
129 if (dl.osd >= 0) {
130 struct ceph_entity_addr *a =
131 ceph_osd_addr(osdc->osdmap, dl.osd);
132 if (a)
133 memcpy(&dl.osd_addr, &a->in_addr, sizeof(dl.osd_addr));
134 } else {
135 memset(&dl.osd_addr, 0, sizeof(dl.osd_addr));
136 }
137 up_read(&osdc->map_sem);
138
139 /* send result back to user */
140 if (copy_to_user(arg, &dl, sizeof(dl)))
141 return -EFAULT;
142
143 return 0;
144}
145
146long ceph_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
147{
148 dout("ioctl file %p cmd %u arg %lu\n", file, cmd, arg);
149 switch (cmd) {
150 case CEPH_IOC_GET_LAYOUT:
151 return ceph_ioctl_get_layout(file, (void __user *)arg);
152
153 case CEPH_IOC_SET_LAYOUT:
154 return ceph_ioctl_set_layout(file, (void __user *)arg);
155
156 case CEPH_IOC_GET_DATALOC:
157 return ceph_ioctl_get_dataloc(file, (void __user *)arg);
158 }
159 return -ENOTTY;
160}
diff --git a/fs/ceph/ioctl.h b/fs/ceph/ioctl.h
new file mode 100644
index 000000000000..25e4f1a9d059
--- /dev/null
+++ b/fs/ceph/ioctl.h
@@ -0,0 +1,40 @@
1#ifndef FS_CEPH_IOCTL_H
2#define FS_CEPH_IOCTL_H
3
4#include <linux/ioctl.h>
5#include <linux/types.h>
6
7#define CEPH_IOCTL_MAGIC 0x97
8
9/* just use u64 to align sanely on all archs */
10struct ceph_ioctl_layout {
11 __u64 stripe_unit, stripe_count, object_size;
12 __u64 data_pool;
13 __s64 preferred_osd;
14};
15
16#define CEPH_IOC_GET_LAYOUT _IOR(CEPH_IOCTL_MAGIC, 1, \
17 struct ceph_ioctl_layout)
18#define CEPH_IOC_SET_LAYOUT _IOW(CEPH_IOCTL_MAGIC, 2, \
19 struct ceph_ioctl_layout)
20
21/*
22 * Extract identity, address of the OSD and object storing a given
23 * file offset.
24 */
25struct ceph_ioctl_dataloc {
26 __u64 file_offset; /* in+out: file offset */
27 __u64 object_offset; /* out: offset in object */
28 __u64 object_no; /* out: object # */
29 __u64 object_size; /* out: object size */
30 char object_name[64]; /* out: object name */
31 __u64 block_offset; /* out: offset in block */
32 __u64 block_size; /* out: block length */
33 __s64 osd; /* out: osd # */
34 struct sockaddr_storage osd_addr; /* out: osd address */
35};
36
37#define CEPH_IOC_GET_DATALOC _IOWR(CEPH_IOCTL_MAGIC, 3, \
38 struct ceph_ioctl_dataloc)
39
40#endif
diff --git a/fs/ceph/mds_client.c b/fs/ceph/mds_client.c
new file mode 100644
index 000000000000..a2600101ec22
--- /dev/null
+++ b/fs/ceph/mds_client.c
@@ -0,0 +1,3021 @@
1#include "ceph_debug.h"
2
3#include <linux/wait.h>
4#include <linux/sched.h>
5
6#include "mds_client.h"
7#include "mon_client.h"
8#include "super.h"
9#include "messenger.h"
10#include "decode.h"
11#include "auth.h"
12#include "pagelist.h"
13
14/*
15 * A cluster of MDS (metadata server) daemons is responsible for
16 * managing the file system namespace (the directory hierarchy and
17 * inodes) and for coordinating shared access to storage. Metadata is
18 * partitioning hierarchically across a number of servers, and that
19 * partition varies over time as the cluster adjusts the distribution
20 * in order to balance load.
21 *
22 * The MDS client is primarily responsible to managing synchronous
23 * metadata requests for operations like open, unlink, and so forth.
24 * If there is a MDS failure, we find out about it when we (possibly
25 * request and) receive a new MDS map, and can resubmit affected
26 * requests.
27 *
28 * For the most part, though, we take advantage of a lossless
29 * communications channel to the MDS, and do not need to worry about
30 * timing out or resubmitting requests.
31 *
32 * We maintain a stateful "session" with each MDS we interact with.
33 * Within each session, we sent periodic heartbeat messages to ensure
34 * any capabilities or leases we have been issues remain valid. If
35 * the session times out and goes stale, our leases and capabilities
36 * are no longer valid.
37 */
38
39static void __wake_requests(struct ceph_mds_client *mdsc,
40 struct list_head *head);
41
42const static struct ceph_connection_operations mds_con_ops;
43
44
45/*
46 * mds reply parsing
47 */
48
49/*
50 * parse individual inode info
51 */
52static int parse_reply_info_in(void **p, void *end,
53 struct ceph_mds_reply_info_in *info)
54{
55 int err = -EIO;
56
57 info->in = *p;
58 *p += sizeof(struct ceph_mds_reply_inode) +
59 sizeof(*info->in->fragtree.splits) *
60 le32_to_cpu(info->in->fragtree.nsplits);
61
62 ceph_decode_32_safe(p, end, info->symlink_len, bad);
63 ceph_decode_need(p, end, info->symlink_len, bad);
64 info->symlink = *p;
65 *p += info->symlink_len;
66
67 ceph_decode_32_safe(p, end, info->xattr_len, bad);
68 ceph_decode_need(p, end, info->xattr_len, bad);
69 info->xattr_data = *p;
70 *p += info->xattr_len;
71 return 0;
72bad:
73 return err;
74}
75
76/*
77 * parse a normal reply, which may contain a (dir+)dentry and/or a
78 * target inode.
79 */
80static int parse_reply_info_trace(void **p, void *end,
81 struct ceph_mds_reply_info_parsed *info)
82{
83 int err;
84
85 if (info->head->is_dentry) {
86 err = parse_reply_info_in(p, end, &info->diri);
87 if (err < 0)
88 goto out_bad;
89
90 if (unlikely(*p + sizeof(*info->dirfrag) > end))
91 goto bad;
92 info->dirfrag = *p;
93 *p += sizeof(*info->dirfrag) +
94 sizeof(u32)*le32_to_cpu(info->dirfrag->ndist);
95 if (unlikely(*p > end))
96 goto bad;
97
98 ceph_decode_32_safe(p, end, info->dname_len, bad);
99 ceph_decode_need(p, end, info->dname_len, bad);
100 info->dname = *p;
101 *p += info->dname_len;
102 info->dlease = *p;
103 *p += sizeof(*info->dlease);
104 }
105
106 if (info->head->is_target) {
107 err = parse_reply_info_in(p, end, &info->targeti);
108 if (err < 0)
109 goto out_bad;
110 }
111
112 if (unlikely(*p != end))
113 goto bad;
114 return 0;
115
116bad:
117 err = -EIO;
118out_bad:
119 pr_err("problem parsing mds trace %d\n", err);
120 return err;
121}
122
123/*
124 * parse readdir results
125 */
126static int parse_reply_info_dir(void **p, void *end,
127 struct ceph_mds_reply_info_parsed *info)
128{
129 u32 num, i = 0;
130 int err;
131
132 info->dir_dir = *p;
133 if (*p + sizeof(*info->dir_dir) > end)
134 goto bad;
135 *p += sizeof(*info->dir_dir) +
136 sizeof(u32)*le32_to_cpu(info->dir_dir->ndist);
137 if (*p > end)
138 goto bad;
139
140 ceph_decode_need(p, end, sizeof(num) + 2, bad);
141 num = ceph_decode_32(p);
142 info->dir_end = ceph_decode_8(p);
143 info->dir_complete = ceph_decode_8(p);
144 if (num == 0)
145 goto done;
146
147 /* alloc large array */
148 info->dir_nr = num;
149 info->dir_in = kcalloc(num, sizeof(*info->dir_in) +
150 sizeof(*info->dir_dname) +
151 sizeof(*info->dir_dname_len) +
152 sizeof(*info->dir_dlease),
153 GFP_NOFS);
154 if (info->dir_in == NULL) {
155 err = -ENOMEM;
156 goto out_bad;
157 }
158 info->dir_dname = (void *)(info->dir_in + num);
159 info->dir_dname_len = (void *)(info->dir_dname + num);
160 info->dir_dlease = (void *)(info->dir_dname_len + num);
161
162 while (num) {
163 /* dentry */
164 ceph_decode_need(p, end, sizeof(u32)*2, bad);
165 info->dir_dname_len[i] = ceph_decode_32(p);
166 ceph_decode_need(p, end, info->dir_dname_len[i], bad);
167 info->dir_dname[i] = *p;
168 *p += info->dir_dname_len[i];
169 dout("parsed dir dname '%.*s'\n", info->dir_dname_len[i],
170 info->dir_dname[i]);
171 info->dir_dlease[i] = *p;
172 *p += sizeof(struct ceph_mds_reply_lease);
173
174 /* inode */
175 err = parse_reply_info_in(p, end, &info->dir_in[i]);
176 if (err < 0)
177 goto out_bad;
178 i++;
179 num--;
180 }
181
182done:
183 if (*p != end)
184 goto bad;
185 return 0;
186
187bad:
188 err = -EIO;
189out_bad:
190 pr_err("problem parsing dir contents %d\n", err);
191 return err;
192}
193
194/*
195 * parse entire mds reply
196 */
197static int parse_reply_info(struct ceph_msg *msg,
198 struct ceph_mds_reply_info_parsed *info)
199{
200 void *p, *end;
201 u32 len;
202 int err;
203
204 info->head = msg->front.iov_base;
205 p = msg->front.iov_base + sizeof(struct ceph_mds_reply_head);
206 end = p + msg->front.iov_len - sizeof(struct ceph_mds_reply_head);
207
208 /* trace */
209 ceph_decode_32_safe(&p, end, len, bad);
210 if (len > 0) {
211 err = parse_reply_info_trace(&p, p+len, info);
212 if (err < 0)
213 goto out_bad;
214 }
215
216 /* dir content */
217 ceph_decode_32_safe(&p, end, len, bad);
218 if (len > 0) {
219 err = parse_reply_info_dir(&p, p+len, info);
220 if (err < 0)
221 goto out_bad;
222 }
223
224 /* snap blob */
225 ceph_decode_32_safe(&p, end, len, bad);
226 info->snapblob_len = len;
227 info->snapblob = p;
228 p += len;
229
230 if (p != end)
231 goto bad;
232 return 0;
233
234bad:
235 err = -EIO;
236out_bad:
237 pr_err("mds parse_reply err %d\n", err);
238 return err;
239}
240
241static void destroy_reply_info(struct ceph_mds_reply_info_parsed *info)
242{
243 kfree(info->dir_in);
244}
245
246
247/*
248 * sessions
249 */
250static const char *session_state_name(int s)
251{
252 switch (s) {
253 case CEPH_MDS_SESSION_NEW: return "new";
254 case CEPH_MDS_SESSION_OPENING: return "opening";
255 case CEPH_MDS_SESSION_OPEN: return "open";
256 case CEPH_MDS_SESSION_HUNG: return "hung";
257 case CEPH_MDS_SESSION_CLOSING: return "closing";
258 case CEPH_MDS_SESSION_RESTARTING: return "restarting";
259 case CEPH_MDS_SESSION_RECONNECTING: return "reconnecting";
260 default: return "???";
261 }
262}
263
264static struct ceph_mds_session *get_session(struct ceph_mds_session *s)
265{
266 if (atomic_inc_not_zero(&s->s_ref)) {
267 dout("mdsc get_session %p %d -> %d\n", s,
268 atomic_read(&s->s_ref)-1, atomic_read(&s->s_ref));
269 return s;
270 } else {
271 dout("mdsc get_session %p 0 -- FAIL", s);
272 return NULL;
273 }
274}
275
276void ceph_put_mds_session(struct ceph_mds_session *s)
277{
278 dout("mdsc put_session %p %d -> %d\n", s,
279 atomic_read(&s->s_ref), atomic_read(&s->s_ref)-1);
280 if (atomic_dec_and_test(&s->s_ref)) {
281 if (s->s_authorizer)
282 s->s_mdsc->client->monc.auth->ops->destroy_authorizer(
283 s->s_mdsc->client->monc.auth, s->s_authorizer);
284 kfree(s);
285 }
286}
287
288/*
289 * called under mdsc->mutex
290 */
291struct ceph_mds_session *__ceph_lookup_mds_session(struct ceph_mds_client *mdsc,
292 int mds)
293{
294 struct ceph_mds_session *session;
295
296 if (mds >= mdsc->max_sessions || mdsc->sessions[mds] == NULL)
297 return NULL;
298 session = mdsc->sessions[mds];
299 dout("lookup_mds_session %p %d\n", session,
300 atomic_read(&session->s_ref));
301 get_session(session);
302 return session;
303}
304
305static bool __have_session(struct ceph_mds_client *mdsc, int mds)
306{
307 if (mds >= mdsc->max_sessions)
308 return false;
309 return mdsc->sessions[mds];
310}
311
312static int __verify_registered_session(struct ceph_mds_client *mdsc,
313 struct ceph_mds_session *s)
314{
315 if (s->s_mds >= mdsc->max_sessions ||
316 mdsc->sessions[s->s_mds] != s)
317 return -ENOENT;
318 return 0;
319}
320
321/*
322 * create+register a new session for given mds.
323 * called under mdsc->mutex.
324 */
325static struct ceph_mds_session *register_session(struct ceph_mds_client *mdsc,
326 int mds)
327{
328 struct ceph_mds_session *s;
329
330 s = kzalloc(sizeof(*s), GFP_NOFS);
331 s->s_mdsc = mdsc;
332 s->s_mds = mds;
333 s->s_state = CEPH_MDS_SESSION_NEW;
334 s->s_ttl = 0;
335 s->s_seq = 0;
336 mutex_init(&s->s_mutex);
337
338 ceph_con_init(mdsc->client->msgr, &s->s_con);
339 s->s_con.private = s;
340 s->s_con.ops = &mds_con_ops;
341 s->s_con.peer_name.type = CEPH_ENTITY_TYPE_MDS;
342 s->s_con.peer_name.num = cpu_to_le64(mds);
343
344 spin_lock_init(&s->s_cap_lock);
345 s->s_cap_gen = 0;
346 s->s_cap_ttl = 0;
347 s->s_renew_requested = 0;
348 s->s_renew_seq = 0;
349 INIT_LIST_HEAD(&s->s_caps);
350 s->s_nr_caps = 0;
351 s->s_trim_caps = 0;
352 atomic_set(&s->s_ref, 1);
353 INIT_LIST_HEAD(&s->s_waiting);
354 INIT_LIST_HEAD(&s->s_unsafe);
355 s->s_num_cap_releases = 0;
356 s->s_cap_iterator = NULL;
357 INIT_LIST_HEAD(&s->s_cap_releases);
358 INIT_LIST_HEAD(&s->s_cap_releases_done);
359 INIT_LIST_HEAD(&s->s_cap_flushing);
360 INIT_LIST_HEAD(&s->s_cap_snaps_flushing);
361
362 dout("register_session mds%d\n", mds);
363 if (mds >= mdsc->max_sessions) {
364 int newmax = 1 << get_count_order(mds+1);
365 struct ceph_mds_session **sa;
366
367 dout("register_session realloc to %d\n", newmax);
368 sa = kcalloc(newmax, sizeof(void *), GFP_NOFS);
369 if (sa == NULL)
370 goto fail_realloc;
371 if (mdsc->sessions) {
372 memcpy(sa, mdsc->sessions,
373 mdsc->max_sessions * sizeof(void *));
374 kfree(mdsc->sessions);
375 }
376 mdsc->sessions = sa;
377 mdsc->max_sessions = newmax;
378 }
379 mdsc->sessions[mds] = s;
380 atomic_inc(&s->s_ref); /* one ref to sessions[], one to caller */
381
382 ceph_con_open(&s->s_con, ceph_mdsmap_get_addr(mdsc->mdsmap, mds));
383
384 return s;
385
386fail_realloc:
387 kfree(s);
388 return ERR_PTR(-ENOMEM);
389}
390
391/*
392 * called under mdsc->mutex
393 */
394static void __unregister_session(struct ceph_mds_client *mdsc,
395 struct ceph_mds_session *s)
396{
397 dout("__unregister_session mds%d %p\n", s->s_mds, s);
398 BUG_ON(mdsc->sessions[s->s_mds] != s);
399 mdsc->sessions[s->s_mds] = NULL;
400 ceph_con_close(&s->s_con);
401 ceph_put_mds_session(s);
402}
403
404/*
405 * drop session refs in request.
406 *
407 * should be last request ref, or hold mdsc->mutex
408 */
409static void put_request_session(struct ceph_mds_request *req)
410{
411 if (req->r_session) {
412 ceph_put_mds_session(req->r_session);
413 req->r_session = NULL;
414 }
415}
416
417void ceph_mdsc_release_request(struct kref *kref)
418{
419 struct ceph_mds_request *req = container_of(kref,
420 struct ceph_mds_request,
421 r_kref);
422 if (req->r_request)
423 ceph_msg_put(req->r_request);
424 if (req->r_reply) {
425 ceph_msg_put(req->r_reply);
426 destroy_reply_info(&req->r_reply_info);
427 }
428 if (req->r_inode) {
429 ceph_put_cap_refs(ceph_inode(req->r_inode),
430 CEPH_CAP_PIN);
431 iput(req->r_inode);
432 }
433 if (req->r_locked_dir)
434 ceph_put_cap_refs(ceph_inode(req->r_locked_dir),
435 CEPH_CAP_PIN);
436 if (req->r_target_inode)
437 iput(req->r_target_inode);
438 if (req->r_dentry)
439 dput(req->r_dentry);
440 if (req->r_old_dentry) {
441 ceph_put_cap_refs(
442 ceph_inode(req->r_old_dentry->d_parent->d_inode),
443 CEPH_CAP_PIN);
444 dput(req->r_old_dentry);
445 }
446 kfree(req->r_path1);
447 kfree(req->r_path2);
448 put_request_session(req);
449 ceph_unreserve_caps(&req->r_caps_reservation);
450 kfree(req);
451}
452
453/*
454 * lookup session, bump ref if found.
455 *
456 * called under mdsc->mutex.
457 */
458static struct ceph_mds_request *__lookup_request(struct ceph_mds_client *mdsc,
459 u64 tid)
460{
461 struct ceph_mds_request *req;
462 struct rb_node *n = mdsc->request_tree.rb_node;
463
464 while (n) {
465 req = rb_entry(n, struct ceph_mds_request, r_node);
466 if (tid < req->r_tid)
467 n = n->rb_left;
468 else if (tid > req->r_tid)
469 n = n->rb_right;
470 else {
471 ceph_mdsc_get_request(req);
472 return req;
473 }
474 }
475 return NULL;
476}
477
478static void __insert_request(struct ceph_mds_client *mdsc,
479 struct ceph_mds_request *new)
480{
481 struct rb_node **p = &mdsc->request_tree.rb_node;
482 struct rb_node *parent = NULL;
483 struct ceph_mds_request *req = NULL;
484
485 while (*p) {
486 parent = *p;
487 req = rb_entry(parent, struct ceph_mds_request, r_node);
488 if (new->r_tid < req->r_tid)
489 p = &(*p)->rb_left;
490 else if (new->r_tid > req->r_tid)
491 p = &(*p)->rb_right;
492 else
493 BUG();
494 }
495
496 rb_link_node(&new->r_node, parent, p);
497 rb_insert_color(&new->r_node, &mdsc->request_tree);
498}
499
500/*
501 * Register an in-flight request, and assign a tid. Link to directory
502 * are modifying (if any).
503 *
504 * Called under mdsc->mutex.
505 */
506static void __register_request(struct ceph_mds_client *mdsc,
507 struct ceph_mds_request *req,
508 struct inode *dir)
509{
510 req->r_tid = ++mdsc->last_tid;
511 if (req->r_num_caps)
512 ceph_reserve_caps(&req->r_caps_reservation, req->r_num_caps);
513 dout("__register_request %p tid %lld\n", req, req->r_tid);
514 ceph_mdsc_get_request(req);
515 __insert_request(mdsc, req);
516
517 if (dir) {
518 struct ceph_inode_info *ci = ceph_inode(dir);
519
520 spin_lock(&ci->i_unsafe_lock);
521 req->r_unsafe_dir = dir;
522 list_add_tail(&req->r_unsafe_dir_item, &ci->i_unsafe_dirops);
523 spin_unlock(&ci->i_unsafe_lock);
524 }
525}
526
527static void __unregister_request(struct ceph_mds_client *mdsc,
528 struct ceph_mds_request *req)
529{
530 dout("__unregister_request %p tid %lld\n", req, req->r_tid);
531 rb_erase(&req->r_node, &mdsc->request_tree);
532 ceph_mdsc_put_request(req);
533
534 if (req->r_unsafe_dir) {
535 struct ceph_inode_info *ci = ceph_inode(req->r_unsafe_dir);
536
537 spin_lock(&ci->i_unsafe_lock);
538 list_del_init(&req->r_unsafe_dir_item);
539 spin_unlock(&ci->i_unsafe_lock);
540 }
541}
542
543/*
544 * Choose mds to send request to next. If there is a hint set in the
545 * request (e.g., due to a prior forward hint from the mds), use that.
546 * Otherwise, consult frag tree and/or caps to identify the
547 * appropriate mds. If all else fails, choose randomly.
548 *
549 * Called under mdsc->mutex.
550 */
551static int __choose_mds(struct ceph_mds_client *mdsc,
552 struct ceph_mds_request *req)
553{
554 struct inode *inode;
555 struct ceph_inode_info *ci;
556 struct ceph_cap *cap;
557 int mode = req->r_direct_mode;
558 int mds = -1;
559 u32 hash = req->r_direct_hash;
560 bool is_hash = req->r_direct_is_hash;
561
562 /*
563 * is there a specific mds we should try? ignore hint if we have
564 * no session and the mds is not up (active or recovering).
565 */
566 if (req->r_resend_mds >= 0 &&
567 (__have_session(mdsc, req->r_resend_mds) ||
568 ceph_mdsmap_get_state(mdsc->mdsmap, req->r_resend_mds) > 0)) {
569 dout("choose_mds using resend_mds mds%d\n",
570 req->r_resend_mds);
571 return req->r_resend_mds;
572 }
573
574 if (mode == USE_RANDOM_MDS)
575 goto random;
576
577 inode = NULL;
578 if (req->r_inode) {
579 inode = req->r_inode;
580 } else if (req->r_dentry) {
581 if (req->r_dentry->d_inode) {
582 inode = req->r_dentry->d_inode;
583 } else {
584 inode = req->r_dentry->d_parent->d_inode;
585 hash = req->r_dentry->d_name.hash;
586 is_hash = true;
587 }
588 }
589 dout("__choose_mds %p is_hash=%d (%d) mode %d\n", inode, (int)is_hash,
590 (int)hash, mode);
591 if (!inode)
592 goto random;
593 ci = ceph_inode(inode);
594
595 if (is_hash && S_ISDIR(inode->i_mode)) {
596 struct ceph_inode_frag frag;
597 int found;
598
599 ceph_choose_frag(ci, hash, &frag, &found);
600 if (found) {
601 if (mode == USE_ANY_MDS && frag.ndist > 0) {
602 u8 r;
603
604 /* choose a random replica */
605 get_random_bytes(&r, 1);
606 r %= frag.ndist;
607 mds = frag.dist[r];
608 dout("choose_mds %p %llx.%llx "
609 "frag %u mds%d (%d/%d)\n",
610 inode, ceph_vinop(inode),
611 frag.frag, frag.mds,
612 (int)r, frag.ndist);
613 return mds;
614 }
615
616 /* since this file/dir wasn't known to be
617 * replicated, then we want to look for the
618 * authoritative mds. */
619 mode = USE_AUTH_MDS;
620 if (frag.mds >= 0) {
621 /* choose auth mds */
622 mds = frag.mds;
623 dout("choose_mds %p %llx.%llx "
624 "frag %u mds%d (auth)\n",
625 inode, ceph_vinop(inode), frag.frag, mds);
626 return mds;
627 }
628 }
629 }
630
631 spin_lock(&inode->i_lock);
632 cap = NULL;
633 if (mode == USE_AUTH_MDS)
634 cap = ci->i_auth_cap;
635 if (!cap && !RB_EMPTY_ROOT(&ci->i_caps))
636 cap = rb_entry(rb_first(&ci->i_caps), struct ceph_cap, ci_node);
637 if (!cap) {
638 spin_unlock(&inode->i_lock);
639 goto random;
640 }
641 mds = cap->session->s_mds;
642 dout("choose_mds %p %llx.%llx mds%d (%scap %p)\n",
643 inode, ceph_vinop(inode), mds,
644 cap == ci->i_auth_cap ? "auth " : "", cap);
645 spin_unlock(&inode->i_lock);
646 return mds;
647
648random:
649 mds = ceph_mdsmap_get_random_mds(mdsc->mdsmap);
650 dout("choose_mds chose random mds%d\n", mds);
651 return mds;
652}
653
654
655/*
656 * session messages
657 */
658static struct ceph_msg *create_session_msg(u32 op, u64 seq)
659{
660 struct ceph_msg *msg;
661 struct ceph_mds_session_head *h;
662
663 msg = ceph_msg_new(CEPH_MSG_CLIENT_SESSION, sizeof(*h), 0, 0, NULL);
664 if (IS_ERR(msg)) {
665 pr_err("create_session_msg ENOMEM creating msg\n");
666 return ERR_PTR(PTR_ERR(msg));
667 }
668 h = msg->front.iov_base;
669 h->op = cpu_to_le32(op);
670 h->seq = cpu_to_le64(seq);
671 return msg;
672}
673
674/*
675 * send session open request.
676 *
677 * called under mdsc->mutex
678 */
679static int __open_session(struct ceph_mds_client *mdsc,
680 struct ceph_mds_session *session)
681{
682 struct ceph_msg *msg;
683 int mstate;
684 int mds = session->s_mds;
685 int err = 0;
686
687 /* wait for mds to go active? */
688 mstate = ceph_mdsmap_get_state(mdsc->mdsmap, mds);
689 dout("open_session to mds%d (%s)\n", mds,
690 ceph_mds_state_name(mstate));
691 session->s_state = CEPH_MDS_SESSION_OPENING;
692 session->s_renew_requested = jiffies;
693
694 /* send connect message */
695 msg = create_session_msg(CEPH_SESSION_REQUEST_OPEN, session->s_seq);
696 if (IS_ERR(msg)) {
697 err = PTR_ERR(msg);
698 goto out;
699 }
700 ceph_con_send(&session->s_con, msg);
701
702out:
703 return 0;
704}
705
706/*
707 * session caps
708 */
709
710/*
711 * Free preallocated cap messages assigned to this session
712 */
713static void cleanup_cap_releases(struct ceph_mds_session *session)
714{
715 struct ceph_msg *msg;
716
717 spin_lock(&session->s_cap_lock);
718 while (!list_empty(&session->s_cap_releases)) {
719 msg = list_first_entry(&session->s_cap_releases,
720 struct ceph_msg, list_head);
721 list_del_init(&msg->list_head);
722 ceph_msg_put(msg);
723 }
724 while (!list_empty(&session->s_cap_releases_done)) {
725 msg = list_first_entry(&session->s_cap_releases_done,
726 struct ceph_msg, list_head);
727 list_del_init(&msg->list_head);
728 ceph_msg_put(msg);
729 }
730 spin_unlock(&session->s_cap_lock);
731}
732
733/*
734 * Helper to safely iterate over all caps associated with a session.
735 *
736 * caller must hold session s_mutex
737 */
738static int iterate_session_caps(struct ceph_mds_session *session,
739 int (*cb)(struct inode *, struct ceph_cap *,
740 void *), void *arg)
741{
742 struct list_head *p;
743 struct ceph_cap *cap;
744 struct inode *inode, *last_inode = NULL;
745 struct ceph_cap *old_cap = NULL;
746 int ret;
747
748 dout("iterate_session_caps %p mds%d\n", session, session->s_mds);
749 spin_lock(&session->s_cap_lock);
750 p = session->s_caps.next;
751 while (p != &session->s_caps) {
752 cap = list_entry(p, struct ceph_cap, session_caps);
753 inode = igrab(&cap->ci->vfs_inode);
754 if (!inode) {
755 p = p->next;
756 continue;
757 }
758 session->s_cap_iterator = cap;
759 spin_unlock(&session->s_cap_lock);
760
761 if (last_inode) {
762 iput(last_inode);
763 last_inode = NULL;
764 }
765 if (old_cap) {
766 ceph_put_cap(old_cap);
767 old_cap = NULL;
768 }
769
770 ret = cb(inode, cap, arg);
771 last_inode = inode;
772
773 spin_lock(&session->s_cap_lock);
774 p = p->next;
775 if (cap->ci == NULL) {
776 dout("iterate_session_caps finishing cap %p removal\n",
777 cap);
778 BUG_ON(cap->session != session);
779 list_del_init(&cap->session_caps);
780 session->s_nr_caps--;
781 cap->session = NULL;
782 old_cap = cap; /* put_cap it w/o locks held */
783 }
784 if (ret < 0)
785 goto out;
786 }
787 ret = 0;
788out:
789 session->s_cap_iterator = NULL;
790 spin_unlock(&session->s_cap_lock);
791
792 if (last_inode)
793 iput(last_inode);
794 if (old_cap)
795 ceph_put_cap(old_cap);
796
797 return ret;
798}
799
800static int remove_session_caps_cb(struct inode *inode, struct ceph_cap *cap,
801 void *arg)
802{
803 struct ceph_inode_info *ci = ceph_inode(inode);
804 dout("removing cap %p, ci is %p, inode is %p\n",
805 cap, ci, &ci->vfs_inode);
806 ceph_remove_cap(cap);
807 return 0;
808}
809
810/*
811 * caller must hold session s_mutex
812 */
813static void remove_session_caps(struct ceph_mds_session *session)
814{
815 dout("remove_session_caps on %p\n", session);
816 iterate_session_caps(session, remove_session_caps_cb, NULL);
817 BUG_ON(session->s_nr_caps > 0);
818 cleanup_cap_releases(session);
819}
820
821/*
822 * wake up any threads waiting on this session's caps. if the cap is
823 * old (didn't get renewed on the client reconnect), remove it now.
824 *
825 * caller must hold s_mutex.
826 */
827static int wake_up_session_cb(struct inode *inode, struct ceph_cap *cap,
828 void *arg)
829{
830 struct ceph_inode_info *ci = ceph_inode(inode);
831
832 wake_up(&ci->i_cap_wq);
833 if (arg) {
834 spin_lock(&inode->i_lock);
835 ci->i_wanted_max_size = 0;
836 ci->i_requested_max_size = 0;
837 spin_unlock(&inode->i_lock);
838 }
839 return 0;
840}
841
842static void wake_up_session_caps(struct ceph_mds_session *session,
843 int reconnect)
844{
845 dout("wake_up_session_caps %p mds%d\n", session, session->s_mds);
846 iterate_session_caps(session, wake_up_session_cb,
847 (void *)(unsigned long)reconnect);
848}
849
850/*
851 * Send periodic message to MDS renewing all currently held caps. The
852 * ack will reset the expiration for all caps from this session.
853 *
854 * caller holds s_mutex
855 */
856static int send_renew_caps(struct ceph_mds_client *mdsc,
857 struct ceph_mds_session *session)
858{
859 struct ceph_msg *msg;
860 int state;
861
862 if (time_after_eq(jiffies, session->s_cap_ttl) &&
863 time_after_eq(session->s_cap_ttl, session->s_renew_requested))
864 pr_info("mds%d caps stale\n", session->s_mds);
865
866 /* do not try to renew caps until a recovering mds has reconnected
867 * with its clients. */
868 state = ceph_mdsmap_get_state(mdsc->mdsmap, session->s_mds);
869 if (state < CEPH_MDS_STATE_RECONNECT) {
870 dout("send_renew_caps ignoring mds%d (%s)\n",
871 session->s_mds, ceph_mds_state_name(state));
872 return 0;
873 }
874
875 dout("send_renew_caps to mds%d (%s)\n", session->s_mds,
876 ceph_mds_state_name(state));
877 session->s_renew_requested = jiffies;
878 msg = create_session_msg(CEPH_SESSION_REQUEST_RENEWCAPS,
879 ++session->s_renew_seq);
880 if (IS_ERR(msg))
881 return PTR_ERR(msg);
882 ceph_con_send(&session->s_con, msg);
883 return 0;
884}
885
886/*
887 * Note new cap ttl, and any transition from stale -> not stale (fresh?).
888 *
889 * Called under session->s_mutex
890 */
891static void renewed_caps(struct ceph_mds_client *mdsc,
892 struct ceph_mds_session *session, int is_renew)
893{
894 int was_stale;
895 int wake = 0;
896
897 spin_lock(&session->s_cap_lock);
898 was_stale = is_renew && (session->s_cap_ttl == 0 ||
899 time_after_eq(jiffies, session->s_cap_ttl));
900
901 session->s_cap_ttl = session->s_renew_requested +
902 mdsc->mdsmap->m_session_timeout*HZ;
903
904 if (was_stale) {
905 if (time_before(jiffies, session->s_cap_ttl)) {
906 pr_info("mds%d caps renewed\n", session->s_mds);
907 wake = 1;
908 } else {
909 pr_info("mds%d caps still stale\n", session->s_mds);
910 }
911 }
912 dout("renewed_caps mds%d ttl now %lu, was %s, now %s\n",
913 session->s_mds, session->s_cap_ttl, was_stale ? "stale" : "fresh",
914 time_before(jiffies, session->s_cap_ttl) ? "stale" : "fresh");
915 spin_unlock(&session->s_cap_lock);
916
917 if (wake)
918 wake_up_session_caps(session, 0);
919}
920
921/*
922 * send a session close request
923 */
924static int request_close_session(struct ceph_mds_client *mdsc,
925 struct ceph_mds_session *session)
926{
927 struct ceph_msg *msg;
928 int err = 0;
929
930 dout("request_close_session mds%d state %s seq %lld\n",
931 session->s_mds, session_state_name(session->s_state),
932 session->s_seq);
933 msg = create_session_msg(CEPH_SESSION_REQUEST_CLOSE, session->s_seq);
934 if (IS_ERR(msg))
935 err = PTR_ERR(msg);
936 else
937 ceph_con_send(&session->s_con, msg);
938 return err;
939}
940
941/*
942 * Called with s_mutex held.
943 */
944static int __close_session(struct ceph_mds_client *mdsc,
945 struct ceph_mds_session *session)
946{
947 if (session->s_state >= CEPH_MDS_SESSION_CLOSING)
948 return 0;
949 session->s_state = CEPH_MDS_SESSION_CLOSING;
950 return request_close_session(mdsc, session);
951}
952
953/*
954 * Trim old(er) caps.
955 *
956 * Because we can't cache an inode without one or more caps, we do
957 * this indirectly: if a cap is unused, we prune its aliases, at which
958 * point the inode will hopefully get dropped to.
959 *
960 * Yes, this is a bit sloppy. Our only real goal here is to respond to
961 * memory pressure from the MDS, though, so it needn't be perfect.
962 */
963static int trim_caps_cb(struct inode *inode, struct ceph_cap *cap, void *arg)
964{
965 struct ceph_mds_session *session = arg;
966 struct ceph_inode_info *ci = ceph_inode(inode);
967 int used, oissued, mine;
968
969 if (session->s_trim_caps <= 0)
970 return -1;
971
972 spin_lock(&inode->i_lock);
973 mine = cap->issued | cap->implemented;
974 used = __ceph_caps_used(ci);
975 oissued = __ceph_caps_issued_other(ci, cap);
976
977 dout("trim_caps_cb %p cap %p mine %s oissued %s used %s\n",
978 inode, cap, ceph_cap_string(mine), ceph_cap_string(oissued),
979 ceph_cap_string(used));
980 if (ci->i_dirty_caps)
981 goto out; /* dirty caps */
982 if ((used & ~oissued) & mine)
983 goto out; /* we need these caps */
984
985 session->s_trim_caps--;
986 if (oissued) {
987 /* we aren't the only cap.. just remove us */
988 __ceph_remove_cap(cap);
989 } else {
990 /* try to drop referring dentries */
991 spin_unlock(&inode->i_lock);
992 d_prune_aliases(inode);
993 dout("trim_caps_cb %p cap %p pruned, count now %d\n",
994 inode, cap, atomic_read(&inode->i_count));
995 return 0;
996 }
997
998out:
999 spin_unlock(&inode->i_lock);
1000 return 0;
1001}
1002
1003/*
1004 * Trim session cap count down to some max number.
1005 */
1006static int trim_caps(struct ceph_mds_client *mdsc,
1007 struct ceph_mds_session *session,
1008 int max_caps)
1009{
1010 int trim_caps = session->s_nr_caps - max_caps;
1011
1012 dout("trim_caps mds%d start: %d / %d, trim %d\n",
1013 session->s_mds, session->s_nr_caps, max_caps, trim_caps);
1014 if (trim_caps > 0) {
1015 session->s_trim_caps = trim_caps;
1016 iterate_session_caps(session, trim_caps_cb, session);
1017 dout("trim_caps mds%d done: %d / %d, trimmed %d\n",
1018 session->s_mds, session->s_nr_caps, max_caps,
1019 trim_caps - session->s_trim_caps);
1020 session->s_trim_caps = 0;
1021 }
1022 return 0;
1023}
1024
1025/*
1026 * Allocate cap_release messages. If there is a partially full message
1027 * in the queue, try to allocate enough to cover it's remainder, so that
1028 * we can send it immediately.
1029 *
1030 * Called under s_mutex.
1031 */
1032static int add_cap_releases(struct ceph_mds_client *mdsc,
1033 struct ceph_mds_session *session,
1034 int extra)
1035{
1036 struct ceph_msg *msg;
1037 struct ceph_mds_cap_release *head;
1038 int err = -ENOMEM;
1039
1040 if (extra < 0)
1041 extra = mdsc->client->mount_args->cap_release_safety;
1042
1043 spin_lock(&session->s_cap_lock);
1044
1045 if (!list_empty(&session->s_cap_releases)) {
1046 msg = list_first_entry(&session->s_cap_releases,
1047 struct ceph_msg,
1048 list_head);
1049 head = msg->front.iov_base;
1050 extra += CEPH_CAPS_PER_RELEASE - le32_to_cpu(head->num);
1051 }
1052
1053 while (session->s_num_cap_releases < session->s_nr_caps + extra) {
1054 spin_unlock(&session->s_cap_lock);
1055 msg = ceph_msg_new(CEPH_MSG_CLIENT_CAPRELEASE, PAGE_CACHE_SIZE,
1056 0, 0, NULL);
1057 if (!msg)
1058 goto out_unlocked;
1059 dout("add_cap_releases %p msg %p now %d\n", session, msg,
1060 (int)msg->front.iov_len);
1061 head = msg->front.iov_base;
1062 head->num = cpu_to_le32(0);
1063 msg->front.iov_len = sizeof(*head);
1064 spin_lock(&session->s_cap_lock);
1065 list_add(&msg->list_head, &session->s_cap_releases);
1066 session->s_num_cap_releases += CEPH_CAPS_PER_RELEASE;
1067 }
1068
1069 if (!list_empty(&session->s_cap_releases)) {
1070 msg = list_first_entry(&session->s_cap_releases,
1071 struct ceph_msg,
1072 list_head);
1073 head = msg->front.iov_base;
1074 if (head->num) {
1075 dout(" queueing non-full %p (%d)\n", msg,
1076 le32_to_cpu(head->num));
1077 list_move_tail(&msg->list_head,
1078 &session->s_cap_releases_done);
1079 session->s_num_cap_releases -=
1080 CEPH_CAPS_PER_RELEASE - le32_to_cpu(head->num);
1081 }
1082 }
1083 err = 0;
1084 spin_unlock(&session->s_cap_lock);
1085out_unlocked:
1086 return err;
1087}
1088
1089/*
1090 * flush all dirty inode data to disk.
1091 *
1092 * returns true if we've flushed through want_flush_seq
1093 */
1094static int check_cap_flush(struct ceph_mds_client *mdsc, u64 want_flush_seq)
1095{
1096 int mds, ret = 1;
1097
1098 dout("check_cap_flush want %lld\n", want_flush_seq);
1099 mutex_lock(&mdsc->mutex);
1100 for (mds = 0; ret && mds < mdsc->max_sessions; mds++) {
1101 struct ceph_mds_session *session = mdsc->sessions[mds];
1102
1103 if (!session)
1104 continue;
1105 get_session(session);
1106 mutex_unlock(&mdsc->mutex);
1107
1108 mutex_lock(&session->s_mutex);
1109 if (!list_empty(&session->s_cap_flushing)) {
1110 struct ceph_inode_info *ci =
1111 list_entry(session->s_cap_flushing.next,
1112 struct ceph_inode_info,
1113 i_flushing_item);
1114 struct inode *inode = &ci->vfs_inode;
1115
1116 spin_lock(&inode->i_lock);
1117 if (ci->i_cap_flush_seq <= want_flush_seq) {
1118 dout("check_cap_flush still flushing %p "
1119 "seq %lld <= %lld to mds%d\n", inode,
1120 ci->i_cap_flush_seq, want_flush_seq,
1121 session->s_mds);
1122 ret = 0;
1123 }
1124 spin_unlock(&inode->i_lock);
1125 }
1126 mutex_unlock(&session->s_mutex);
1127 ceph_put_mds_session(session);
1128
1129 if (!ret)
1130 return ret;
1131 mutex_lock(&mdsc->mutex);
1132 }
1133
1134 mutex_unlock(&mdsc->mutex);
1135 dout("check_cap_flush ok, flushed thru %lld\n", want_flush_seq);
1136 return ret;
1137}
1138
1139/*
1140 * called under s_mutex
1141 */
1142static void send_cap_releases(struct ceph_mds_client *mdsc,
1143 struct ceph_mds_session *session)
1144{
1145 struct ceph_msg *msg;
1146
1147 dout("send_cap_releases mds%d\n", session->s_mds);
1148 while (1) {
1149 spin_lock(&session->s_cap_lock);
1150 if (list_empty(&session->s_cap_releases_done))
1151 break;
1152 msg = list_first_entry(&session->s_cap_releases_done,
1153 struct ceph_msg, list_head);
1154 list_del_init(&msg->list_head);
1155 spin_unlock(&session->s_cap_lock);
1156 msg->hdr.front_len = cpu_to_le32(msg->front.iov_len);
1157 dout("send_cap_releases mds%d %p\n", session->s_mds, msg);
1158 ceph_con_send(&session->s_con, msg);
1159 }
1160 spin_unlock(&session->s_cap_lock);
1161}
1162
1163/*
1164 * requests
1165 */
1166
1167/*
1168 * Create an mds request.
1169 */
1170struct ceph_mds_request *
1171ceph_mdsc_create_request(struct ceph_mds_client *mdsc, int op, int mode)
1172{
1173 struct ceph_mds_request *req = kzalloc(sizeof(*req), GFP_NOFS);
1174
1175 if (!req)
1176 return ERR_PTR(-ENOMEM);
1177
1178 req->r_started = jiffies;
1179 req->r_resend_mds = -1;
1180 INIT_LIST_HEAD(&req->r_unsafe_dir_item);
1181 req->r_fmode = -1;
1182 kref_init(&req->r_kref);
1183 INIT_LIST_HEAD(&req->r_wait);
1184 init_completion(&req->r_completion);
1185 init_completion(&req->r_safe_completion);
1186 INIT_LIST_HEAD(&req->r_unsafe_item);
1187
1188 req->r_op = op;
1189 req->r_direct_mode = mode;
1190 return req;
1191}
1192
1193/*
1194 * return oldest (lowest) request, tid in request tree, 0 if none.
1195 *
1196 * called under mdsc->mutex.
1197 */
1198static struct ceph_mds_request *__get_oldest_req(struct ceph_mds_client *mdsc)
1199{
1200 if (RB_EMPTY_ROOT(&mdsc->request_tree))
1201 return NULL;
1202 return rb_entry(rb_first(&mdsc->request_tree),
1203 struct ceph_mds_request, r_node);
1204}
1205
1206static u64 __get_oldest_tid(struct ceph_mds_client *mdsc)
1207{
1208 struct ceph_mds_request *req = __get_oldest_req(mdsc);
1209
1210 if (req)
1211 return req->r_tid;
1212 return 0;
1213}
1214
1215/*
1216 * Build a dentry's path. Allocate on heap; caller must kfree. Based
1217 * on build_path_from_dentry in fs/cifs/dir.c.
1218 *
1219 * If @stop_on_nosnap, generate path relative to the first non-snapped
1220 * inode.
1221 *
1222 * Encode hidden .snap dirs as a double /, i.e.
1223 * foo/.snap/bar -> foo//bar
1224 */
1225char *ceph_mdsc_build_path(struct dentry *dentry, int *plen, u64 *base,
1226 int stop_on_nosnap)
1227{
1228 struct dentry *temp;
1229 char *path;
1230 int len, pos;
1231
1232 if (dentry == NULL)
1233 return ERR_PTR(-EINVAL);
1234
1235retry:
1236 len = 0;
1237 for (temp = dentry; !IS_ROOT(temp);) {
1238 struct inode *inode = temp->d_inode;
1239 if (inode && ceph_snap(inode) == CEPH_SNAPDIR)
1240 len++; /* slash only */
1241 else if (stop_on_nosnap && inode &&
1242 ceph_snap(inode) == CEPH_NOSNAP)
1243 break;
1244 else
1245 len += 1 + temp->d_name.len;
1246 temp = temp->d_parent;
1247 if (temp == NULL) {
1248 pr_err("build_path_dentry corrupt dentry %p\n", dentry);
1249 return ERR_PTR(-EINVAL);
1250 }
1251 }
1252 if (len)
1253 len--; /* no leading '/' */
1254
1255 path = kmalloc(len+1, GFP_NOFS);
1256 if (path == NULL)
1257 return ERR_PTR(-ENOMEM);
1258 pos = len;
1259 path[pos] = 0; /* trailing null */
1260 for (temp = dentry; !IS_ROOT(temp) && pos != 0; ) {
1261 struct inode *inode = temp->d_inode;
1262
1263 if (inode && ceph_snap(inode) == CEPH_SNAPDIR) {
1264 dout("build_path_dentry path+%d: %p SNAPDIR\n",
1265 pos, temp);
1266 } else if (stop_on_nosnap && inode &&
1267 ceph_snap(inode) == CEPH_NOSNAP) {
1268 break;
1269 } else {
1270 pos -= temp->d_name.len;
1271 if (pos < 0)
1272 break;
1273 strncpy(path + pos, temp->d_name.name,
1274 temp->d_name.len);
1275 dout("build_path_dentry path+%d: %p '%.*s'\n",
1276 pos, temp, temp->d_name.len, path + pos);
1277 }
1278 if (pos)
1279 path[--pos] = '/';
1280 temp = temp->d_parent;
1281 if (temp == NULL) {
1282 pr_err("build_path_dentry corrupt dentry\n");
1283 kfree(path);
1284 return ERR_PTR(-EINVAL);
1285 }
1286 }
1287 if (pos != 0) {
1288 pr_err("build_path_dentry did not end path lookup where "
1289 "expected, namelen is %d, pos is %d\n", len, pos);
1290 /* presumably this is only possible if racing with a
1291 rename of one of the parent directories (we can not
1292 lock the dentries above us to prevent this, but
1293 retrying should be harmless) */
1294 kfree(path);
1295 goto retry;
1296 }
1297
1298 *base = ceph_ino(temp->d_inode);
1299 *plen = len;
1300 dout("build_path_dentry on %p %d built %llx '%.*s'\n",
1301 dentry, atomic_read(&dentry->d_count), *base, len, path);
1302 return path;
1303}
1304
1305static int build_dentry_path(struct dentry *dentry,
1306 const char **ppath, int *ppathlen, u64 *pino,
1307 int *pfreepath)
1308{
1309 char *path;
1310
1311 if (ceph_snap(dentry->d_parent->d_inode) == CEPH_NOSNAP) {
1312 *pino = ceph_ino(dentry->d_parent->d_inode);
1313 *ppath = dentry->d_name.name;
1314 *ppathlen = dentry->d_name.len;
1315 return 0;
1316 }
1317 path = ceph_mdsc_build_path(dentry, ppathlen, pino, 1);
1318 if (IS_ERR(path))
1319 return PTR_ERR(path);
1320 *ppath = path;
1321 *pfreepath = 1;
1322 return 0;
1323}
1324
1325static int build_inode_path(struct inode *inode,
1326 const char **ppath, int *ppathlen, u64 *pino,
1327 int *pfreepath)
1328{
1329 struct dentry *dentry;
1330 char *path;
1331
1332 if (ceph_snap(inode) == CEPH_NOSNAP) {
1333 *pino = ceph_ino(inode);
1334 *ppathlen = 0;
1335 return 0;
1336 }
1337 dentry = d_find_alias(inode);
1338 path = ceph_mdsc_build_path(dentry, ppathlen, pino, 1);
1339 dput(dentry);
1340 if (IS_ERR(path))
1341 return PTR_ERR(path);
1342 *ppath = path;
1343 *pfreepath = 1;
1344 return 0;
1345}
1346
1347/*
1348 * request arguments may be specified via an inode *, a dentry *, or
1349 * an explicit ino+path.
1350 */
1351static int set_request_path_attr(struct inode *rinode, struct dentry *rdentry,
1352 const char *rpath, u64 rino,
1353 const char **ppath, int *pathlen,
1354 u64 *ino, int *freepath)
1355{
1356 int r = 0;
1357
1358 if (rinode) {
1359 r = build_inode_path(rinode, ppath, pathlen, ino, freepath);
1360 dout(" inode %p %llx.%llx\n", rinode, ceph_ino(rinode),
1361 ceph_snap(rinode));
1362 } else if (rdentry) {
1363 r = build_dentry_path(rdentry, ppath, pathlen, ino, freepath);
1364 dout(" dentry %p %llx/%.*s\n", rdentry, *ino, *pathlen,
1365 *ppath);
1366 } else if (rpath) {
1367 *ino = rino;
1368 *ppath = rpath;
1369 *pathlen = strlen(rpath);
1370 dout(" path %.*s\n", *pathlen, rpath);
1371 }
1372
1373 return r;
1374}
1375
1376/*
1377 * called under mdsc->mutex
1378 */
1379static struct ceph_msg *create_request_message(struct ceph_mds_client *mdsc,
1380 struct ceph_mds_request *req,
1381 int mds)
1382{
1383 struct ceph_msg *msg;
1384 struct ceph_mds_request_head *head;
1385 const char *path1 = NULL;
1386 const char *path2 = NULL;
1387 u64 ino1 = 0, ino2 = 0;
1388 int pathlen1 = 0, pathlen2 = 0;
1389 int freepath1 = 0, freepath2 = 0;
1390 int len;
1391 u16 releases;
1392 void *p, *end;
1393 int ret;
1394
1395 ret = set_request_path_attr(req->r_inode, req->r_dentry,
1396 req->r_path1, req->r_ino1.ino,
1397 &path1, &pathlen1, &ino1, &freepath1);
1398 if (ret < 0) {
1399 msg = ERR_PTR(ret);
1400 goto out;
1401 }
1402
1403 ret = set_request_path_attr(NULL, req->r_old_dentry,
1404 req->r_path2, req->r_ino2.ino,
1405 &path2, &pathlen2, &ino2, &freepath2);
1406 if (ret < 0) {
1407 msg = ERR_PTR(ret);
1408 goto out_free1;
1409 }
1410
1411 len = sizeof(*head) +
1412 pathlen1 + pathlen2 + 2*(1 + sizeof(u32) + sizeof(u64));
1413
1414 /* calculate (max) length for cap releases */
1415 len += sizeof(struct ceph_mds_request_release) *
1416 (!!req->r_inode_drop + !!req->r_dentry_drop +
1417 !!req->r_old_inode_drop + !!req->r_old_dentry_drop);
1418 if (req->r_dentry_drop)
1419 len += req->r_dentry->d_name.len;
1420 if (req->r_old_dentry_drop)
1421 len += req->r_old_dentry->d_name.len;
1422
1423 msg = ceph_msg_new(CEPH_MSG_CLIENT_REQUEST, len, 0, 0, NULL);
1424 if (IS_ERR(msg))
1425 goto out_free2;
1426
1427 msg->hdr.tid = cpu_to_le64(req->r_tid);
1428
1429 head = msg->front.iov_base;
1430 p = msg->front.iov_base + sizeof(*head);
1431 end = msg->front.iov_base + msg->front.iov_len;
1432
1433 head->mdsmap_epoch = cpu_to_le32(mdsc->mdsmap->m_epoch);
1434 head->op = cpu_to_le32(req->r_op);
1435 head->caller_uid = cpu_to_le32(current_fsuid());
1436 head->caller_gid = cpu_to_le32(current_fsgid());
1437 head->args = req->r_args;
1438
1439 ceph_encode_filepath(&p, end, ino1, path1);
1440 ceph_encode_filepath(&p, end, ino2, path2);
1441
1442 /* cap releases */
1443 releases = 0;
1444 if (req->r_inode_drop)
1445 releases += ceph_encode_inode_release(&p,
1446 req->r_inode ? req->r_inode : req->r_dentry->d_inode,
1447 mds, req->r_inode_drop, req->r_inode_unless, 0);
1448 if (req->r_dentry_drop)
1449 releases += ceph_encode_dentry_release(&p, req->r_dentry,
1450 mds, req->r_dentry_drop, req->r_dentry_unless);
1451 if (req->r_old_dentry_drop)
1452 releases += ceph_encode_dentry_release(&p, req->r_old_dentry,
1453 mds, req->r_old_dentry_drop, req->r_old_dentry_unless);
1454 if (req->r_old_inode_drop)
1455 releases += ceph_encode_inode_release(&p,
1456 req->r_old_dentry->d_inode,
1457 mds, req->r_old_inode_drop, req->r_old_inode_unless, 0);
1458 head->num_releases = cpu_to_le16(releases);
1459
1460 BUG_ON(p > end);
1461 msg->front.iov_len = p - msg->front.iov_base;
1462 msg->hdr.front_len = cpu_to_le32(msg->front.iov_len);
1463
1464 msg->pages = req->r_pages;
1465 msg->nr_pages = req->r_num_pages;
1466 msg->hdr.data_len = cpu_to_le32(req->r_data_len);
1467 msg->hdr.data_off = cpu_to_le16(0);
1468
1469out_free2:
1470 if (freepath2)
1471 kfree((char *)path2);
1472out_free1:
1473 if (freepath1)
1474 kfree((char *)path1);
1475out:
1476 return msg;
1477}
1478
1479/*
1480 * called under mdsc->mutex if error, under no mutex if
1481 * success.
1482 */
1483static void complete_request(struct ceph_mds_client *mdsc,
1484 struct ceph_mds_request *req)
1485{
1486 if (req->r_callback)
1487 req->r_callback(mdsc, req);
1488 else
1489 complete(&req->r_completion);
1490}
1491
1492/*
1493 * called under mdsc->mutex
1494 */
1495static int __prepare_send_request(struct ceph_mds_client *mdsc,
1496 struct ceph_mds_request *req,
1497 int mds)
1498{
1499 struct ceph_mds_request_head *rhead;
1500 struct ceph_msg *msg;
1501 int flags = 0;
1502
1503 req->r_mds = mds;
1504 req->r_attempts++;
1505 dout("prepare_send_request %p tid %lld %s (attempt %d)\n", req,
1506 req->r_tid, ceph_mds_op_name(req->r_op), req->r_attempts);
1507
1508 if (req->r_request) {
1509 ceph_msg_put(req->r_request);
1510 req->r_request = NULL;
1511 }
1512 msg = create_request_message(mdsc, req, mds);
1513 if (IS_ERR(msg)) {
1514 req->r_reply = ERR_PTR(PTR_ERR(msg));
1515 complete_request(mdsc, req);
1516 return -PTR_ERR(msg);
1517 }
1518 req->r_request = msg;
1519
1520 rhead = msg->front.iov_base;
1521 rhead->oldest_client_tid = cpu_to_le64(__get_oldest_tid(mdsc));
1522 if (req->r_got_unsafe)
1523 flags |= CEPH_MDS_FLAG_REPLAY;
1524 if (req->r_locked_dir)
1525 flags |= CEPH_MDS_FLAG_WANT_DENTRY;
1526 rhead->flags = cpu_to_le32(flags);
1527 rhead->num_fwd = req->r_num_fwd;
1528 rhead->num_retry = req->r_attempts - 1;
1529
1530 dout(" r_locked_dir = %p\n", req->r_locked_dir);
1531
1532 if (req->r_target_inode && req->r_got_unsafe)
1533 rhead->ino = cpu_to_le64(ceph_ino(req->r_target_inode));
1534 else
1535 rhead->ino = 0;
1536 return 0;
1537}
1538
1539/*
1540 * send request, or put it on the appropriate wait list.
1541 */
1542static int __do_request(struct ceph_mds_client *mdsc,
1543 struct ceph_mds_request *req)
1544{
1545 struct ceph_mds_session *session = NULL;
1546 int mds = -1;
1547 int err = -EAGAIN;
1548
1549 if (req->r_reply)
1550 goto out;
1551
1552 if (req->r_timeout &&
1553 time_after_eq(jiffies, req->r_started + req->r_timeout)) {
1554 dout("do_request timed out\n");
1555 err = -EIO;
1556 goto finish;
1557 }
1558
1559 mds = __choose_mds(mdsc, req);
1560 if (mds < 0 ||
1561 ceph_mdsmap_get_state(mdsc->mdsmap, mds) < CEPH_MDS_STATE_ACTIVE) {
1562 dout("do_request no mds or not active, waiting for map\n");
1563 list_add(&req->r_wait, &mdsc->waiting_for_map);
1564 goto out;
1565 }
1566
1567 /* get, open session */
1568 session = __ceph_lookup_mds_session(mdsc, mds);
1569 if (!session)
1570 session = register_session(mdsc, mds);
1571 dout("do_request mds%d session %p state %s\n", mds, session,
1572 session_state_name(session->s_state));
1573 if (session->s_state != CEPH_MDS_SESSION_OPEN &&
1574 session->s_state != CEPH_MDS_SESSION_HUNG) {
1575 if (session->s_state == CEPH_MDS_SESSION_NEW ||
1576 session->s_state == CEPH_MDS_SESSION_CLOSING)
1577 __open_session(mdsc, session);
1578 list_add(&req->r_wait, &session->s_waiting);
1579 goto out_session;
1580 }
1581
1582 /* send request */
1583 req->r_session = get_session(session);
1584 req->r_resend_mds = -1; /* forget any previous mds hint */
1585
1586 if (req->r_request_started == 0) /* note request start time */
1587 req->r_request_started = jiffies;
1588
1589 err = __prepare_send_request(mdsc, req, mds);
1590 if (!err) {
1591 ceph_msg_get(req->r_request);
1592 ceph_con_send(&session->s_con, req->r_request);
1593 }
1594
1595out_session:
1596 ceph_put_mds_session(session);
1597out:
1598 return err;
1599
1600finish:
1601 req->r_reply = ERR_PTR(err);
1602 complete_request(mdsc, req);
1603 goto out;
1604}
1605
1606/*
1607 * called under mdsc->mutex
1608 */
1609static void __wake_requests(struct ceph_mds_client *mdsc,
1610 struct list_head *head)
1611{
1612 struct ceph_mds_request *req, *nreq;
1613
1614 list_for_each_entry_safe(req, nreq, head, r_wait) {
1615 list_del_init(&req->r_wait);
1616 __do_request(mdsc, req);
1617 }
1618}
1619
1620/*
1621 * Wake up threads with requests pending for @mds, so that they can
1622 * resubmit their requests to a possibly different mds. If @all is set,
1623 * wake up if their requests has been forwarded to @mds, too.
1624 */
1625static void kick_requests(struct ceph_mds_client *mdsc, int mds, int all)
1626{
1627 struct ceph_mds_request *req;
1628 struct rb_node *p;
1629
1630 dout("kick_requests mds%d\n", mds);
1631 for (p = rb_first(&mdsc->request_tree); p; p = rb_next(p)) {
1632 req = rb_entry(p, struct ceph_mds_request, r_node);
1633 if (req->r_got_unsafe)
1634 continue;
1635 if (req->r_session &&
1636 req->r_session->s_mds == mds) {
1637 dout(" kicking tid %llu\n", req->r_tid);
1638 put_request_session(req);
1639 __do_request(mdsc, req);
1640 }
1641 }
1642}
1643
1644void ceph_mdsc_submit_request(struct ceph_mds_client *mdsc,
1645 struct ceph_mds_request *req)
1646{
1647 dout("submit_request on %p\n", req);
1648 mutex_lock(&mdsc->mutex);
1649 __register_request(mdsc, req, NULL);
1650 __do_request(mdsc, req);
1651 mutex_unlock(&mdsc->mutex);
1652}
1653
1654/*
1655 * Synchrously perform an mds request. Take care of all of the
1656 * session setup, forwarding, retry details.
1657 */
1658int ceph_mdsc_do_request(struct ceph_mds_client *mdsc,
1659 struct inode *dir,
1660 struct ceph_mds_request *req)
1661{
1662 int err;
1663
1664 dout("do_request on %p\n", req);
1665
1666 /* take CAP_PIN refs for r_inode, r_locked_dir, r_old_dentry */
1667 if (req->r_inode)
1668 ceph_get_cap_refs(ceph_inode(req->r_inode), CEPH_CAP_PIN);
1669 if (req->r_locked_dir)
1670 ceph_get_cap_refs(ceph_inode(req->r_locked_dir), CEPH_CAP_PIN);
1671 if (req->r_old_dentry)
1672 ceph_get_cap_refs(
1673 ceph_inode(req->r_old_dentry->d_parent->d_inode),
1674 CEPH_CAP_PIN);
1675
1676 /* issue */
1677 mutex_lock(&mdsc->mutex);
1678 __register_request(mdsc, req, dir);
1679 __do_request(mdsc, req);
1680
1681 /* wait */
1682 if (!req->r_reply) {
1683 mutex_unlock(&mdsc->mutex);
1684 if (req->r_timeout) {
1685 err = (long)wait_for_completion_interruptible_timeout(
1686 &req->r_completion, req->r_timeout);
1687 if (err == 0)
1688 req->r_reply = ERR_PTR(-EIO);
1689 else if (err < 0)
1690 req->r_reply = ERR_PTR(err);
1691 } else {
1692 err = wait_for_completion_interruptible(
1693 &req->r_completion);
1694 if (err)
1695 req->r_reply = ERR_PTR(err);
1696 }
1697 mutex_lock(&mdsc->mutex);
1698 }
1699
1700 if (IS_ERR(req->r_reply)) {
1701 err = PTR_ERR(req->r_reply);
1702 req->r_reply = NULL;
1703
1704 if (err == -ERESTARTSYS) {
1705 /* aborted */
1706 req->r_aborted = true;
1707
1708 if (req->r_locked_dir &&
1709 (req->r_op & CEPH_MDS_OP_WRITE)) {
1710 struct ceph_inode_info *ci =
1711 ceph_inode(req->r_locked_dir);
1712
1713 dout("aborted, clearing I_COMPLETE on %p\n",
1714 req->r_locked_dir);
1715 spin_lock(&req->r_locked_dir->i_lock);
1716 ci->i_ceph_flags &= ~CEPH_I_COMPLETE;
1717 ci->i_release_count++;
1718 spin_unlock(&req->r_locked_dir->i_lock);
1719 }
1720 } else {
1721 /* clean up this request */
1722 __unregister_request(mdsc, req);
1723 if (!list_empty(&req->r_unsafe_item))
1724 list_del_init(&req->r_unsafe_item);
1725 complete(&req->r_safe_completion);
1726 }
1727 } else if (req->r_err) {
1728 err = req->r_err;
1729 } else {
1730 err = le32_to_cpu(req->r_reply_info.head->result);
1731 }
1732 mutex_unlock(&mdsc->mutex);
1733
1734 dout("do_request %p done, result %d\n", req, err);
1735 return err;
1736}
1737
1738/*
1739 * Handle mds reply.
1740 *
1741 * We take the session mutex and parse and process the reply immediately.
1742 * This preserves the logical ordering of replies, capabilities, etc., sent
1743 * by the MDS as they are applied to our local cache.
1744 */
1745static void handle_reply(struct ceph_mds_session *session, struct ceph_msg *msg)
1746{
1747 struct ceph_mds_client *mdsc = session->s_mdsc;
1748 struct ceph_mds_request *req;
1749 struct ceph_mds_reply_head *head = msg->front.iov_base;
1750 struct ceph_mds_reply_info_parsed *rinfo; /* parsed reply info */
1751 u64 tid;
1752 int err, result;
1753 int mds = session->s_mds;
1754
1755 if (msg->front.iov_len < sizeof(*head)) {
1756 pr_err("mdsc_handle_reply got corrupt (short) reply\n");
1757 ceph_msg_dump(msg);
1758 return;
1759 }
1760
1761 /* get request, session */
1762 tid = le64_to_cpu(msg->hdr.tid);
1763 mutex_lock(&mdsc->mutex);
1764 req = __lookup_request(mdsc, tid);
1765 if (!req) {
1766 dout("handle_reply on unknown tid %llu\n", tid);
1767 mutex_unlock(&mdsc->mutex);
1768 return;
1769 }
1770 dout("handle_reply %p\n", req);
1771
1772 /* correct session? */
1773 if (!req->r_session && req->r_session != session) {
1774 pr_err("mdsc_handle_reply got %llu on session mds%d"
1775 " not mds%d\n", tid, session->s_mds,
1776 req->r_session ? req->r_session->s_mds : -1);
1777 mutex_unlock(&mdsc->mutex);
1778 goto out;
1779 }
1780
1781 /* dup? */
1782 if ((req->r_got_unsafe && !head->safe) ||
1783 (req->r_got_safe && head->safe)) {
1784 pr_warning("got a dup %s reply on %llu from mds%d\n",
1785 head->safe ? "safe" : "unsafe", tid, mds);
1786 mutex_unlock(&mdsc->mutex);
1787 goto out;
1788 }
1789
1790 result = le32_to_cpu(head->result);
1791
1792 /*
1793 * Tolerate 2 consecutive ESTALEs from the same mds.
1794 * FIXME: we should be looking at the cap migrate_seq.
1795 */
1796 if (result == -ESTALE) {
1797 req->r_direct_mode = USE_AUTH_MDS;
1798 req->r_num_stale++;
1799 if (req->r_num_stale <= 2) {
1800 __do_request(mdsc, req);
1801 mutex_unlock(&mdsc->mutex);
1802 goto out;
1803 }
1804 } else {
1805 req->r_num_stale = 0;
1806 }
1807
1808 if (head->safe) {
1809 req->r_got_safe = true;
1810 __unregister_request(mdsc, req);
1811 complete(&req->r_safe_completion);
1812
1813 if (req->r_got_unsafe) {
1814 /*
1815 * We already handled the unsafe response, now do the
1816 * cleanup. No need to examine the response; the MDS
1817 * doesn't include any result info in the safe
1818 * response. And even if it did, there is nothing
1819 * useful we could do with a revised return value.
1820 */
1821 dout("got safe reply %llu, mds%d\n", tid, mds);
1822 list_del_init(&req->r_unsafe_item);
1823
1824 /* last unsafe request during umount? */
1825 if (mdsc->stopping && !__get_oldest_req(mdsc))
1826 complete(&mdsc->safe_umount_waiters);
1827 mutex_unlock(&mdsc->mutex);
1828 goto out;
1829 }
1830 }
1831
1832 BUG_ON(req->r_reply);
1833
1834 if (!head->safe) {
1835 req->r_got_unsafe = true;
1836 list_add_tail(&req->r_unsafe_item, &req->r_session->s_unsafe);
1837 }
1838
1839 dout("handle_reply tid %lld result %d\n", tid, result);
1840 rinfo = &req->r_reply_info;
1841 err = parse_reply_info(msg, rinfo);
1842 mutex_unlock(&mdsc->mutex);
1843
1844 mutex_lock(&session->s_mutex);
1845 if (err < 0) {
1846 pr_err("mdsc_handle_reply got corrupt reply mds%d\n", mds);
1847 ceph_msg_dump(msg);
1848 goto out_err;
1849 }
1850
1851 /* snap trace */
1852 if (rinfo->snapblob_len) {
1853 down_write(&mdsc->snap_rwsem);
1854 ceph_update_snap_trace(mdsc, rinfo->snapblob,
1855 rinfo->snapblob + rinfo->snapblob_len,
1856 le32_to_cpu(head->op) == CEPH_MDS_OP_RMSNAP);
1857 downgrade_write(&mdsc->snap_rwsem);
1858 } else {
1859 down_read(&mdsc->snap_rwsem);
1860 }
1861
1862 /* insert trace into our cache */
1863 err = ceph_fill_trace(mdsc->client->sb, req, req->r_session);
1864 if (err == 0) {
1865 if (result == 0 && rinfo->dir_nr)
1866 ceph_readdir_prepopulate(req, req->r_session);
1867 ceph_unreserve_caps(&req->r_caps_reservation);
1868 }
1869
1870 up_read(&mdsc->snap_rwsem);
1871out_err:
1872 if (err) {
1873 req->r_err = err;
1874 } else {
1875 req->r_reply = msg;
1876 ceph_msg_get(msg);
1877 }
1878
1879 add_cap_releases(mdsc, req->r_session, -1);
1880 mutex_unlock(&session->s_mutex);
1881
1882 /* kick calling process */
1883 complete_request(mdsc, req);
1884out:
1885 ceph_mdsc_put_request(req);
1886 return;
1887}
1888
1889
1890
1891/*
1892 * handle mds notification that our request has been forwarded.
1893 */
1894static void handle_forward(struct ceph_mds_client *mdsc,
1895 struct ceph_mds_session *session,
1896 struct ceph_msg *msg)
1897{
1898 struct ceph_mds_request *req;
1899 u64 tid = le64_to_cpu(msg->hdr.tid);
1900 u32 next_mds;
1901 u32 fwd_seq;
1902 int err = -EINVAL;
1903 void *p = msg->front.iov_base;
1904 void *end = p + msg->front.iov_len;
1905
1906 ceph_decode_need(&p, end, 2*sizeof(u32), bad);
1907 next_mds = ceph_decode_32(&p);
1908 fwd_seq = ceph_decode_32(&p);
1909
1910 mutex_lock(&mdsc->mutex);
1911 req = __lookup_request(mdsc, tid);
1912 if (!req) {
1913 dout("forward %llu to mds%d - req dne\n", tid, next_mds);
1914 goto out; /* dup reply? */
1915 }
1916
1917 if (fwd_seq <= req->r_num_fwd) {
1918 dout("forward %llu to mds%d - old seq %d <= %d\n",
1919 tid, next_mds, req->r_num_fwd, fwd_seq);
1920 } else {
1921 /* resend. forward race not possible; mds would drop */
1922 dout("forward %llu to mds%d (we resend)\n", tid, next_mds);
1923 req->r_num_fwd = fwd_seq;
1924 req->r_resend_mds = next_mds;
1925 put_request_session(req);
1926 __do_request(mdsc, req);
1927 }
1928 ceph_mdsc_put_request(req);
1929out:
1930 mutex_unlock(&mdsc->mutex);
1931 return;
1932
1933bad:
1934 pr_err("mdsc_handle_forward decode error err=%d\n", err);
1935}
1936
1937/*
1938 * handle a mds session control message
1939 */
1940static void handle_session(struct ceph_mds_session *session,
1941 struct ceph_msg *msg)
1942{
1943 struct ceph_mds_client *mdsc = session->s_mdsc;
1944 u32 op;
1945 u64 seq;
1946 int mds = session->s_mds;
1947 struct ceph_mds_session_head *h = msg->front.iov_base;
1948 int wake = 0;
1949
1950 /* decode */
1951 if (msg->front.iov_len != sizeof(*h))
1952 goto bad;
1953 op = le32_to_cpu(h->op);
1954 seq = le64_to_cpu(h->seq);
1955
1956 mutex_lock(&mdsc->mutex);
1957 if (op == CEPH_SESSION_CLOSE)
1958 __unregister_session(mdsc, session);
1959 /* FIXME: this ttl calculation is generous */
1960 session->s_ttl = jiffies + HZ*mdsc->mdsmap->m_session_autoclose;
1961 mutex_unlock(&mdsc->mutex);
1962
1963 mutex_lock(&session->s_mutex);
1964
1965 dout("handle_session mds%d %s %p state %s seq %llu\n",
1966 mds, ceph_session_op_name(op), session,
1967 session_state_name(session->s_state), seq);
1968
1969 if (session->s_state == CEPH_MDS_SESSION_HUNG) {
1970 session->s_state = CEPH_MDS_SESSION_OPEN;
1971 pr_info("mds%d came back\n", session->s_mds);
1972 }
1973
1974 switch (op) {
1975 case CEPH_SESSION_OPEN:
1976 session->s_state = CEPH_MDS_SESSION_OPEN;
1977 renewed_caps(mdsc, session, 0);
1978 wake = 1;
1979 if (mdsc->stopping)
1980 __close_session(mdsc, session);
1981 break;
1982
1983 case CEPH_SESSION_RENEWCAPS:
1984 if (session->s_renew_seq == seq)
1985 renewed_caps(mdsc, session, 1);
1986 break;
1987
1988 case CEPH_SESSION_CLOSE:
1989 remove_session_caps(session);
1990 wake = 1; /* for good measure */
1991 complete(&mdsc->session_close_waiters);
1992 kick_requests(mdsc, mds, 0); /* cur only */
1993 break;
1994
1995 case CEPH_SESSION_STALE:
1996 pr_info("mds%d caps went stale, renewing\n",
1997 session->s_mds);
1998 spin_lock(&session->s_cap_lock);
1999 session->s_cap_gen++;
2000 session->s_cap_ttl = 0;
2001 spin_unlock(&session->s_cap_lock);
2002 send_renew_caps(mdsc, session);
2003 break;
2004
2005 case CEPH_SESSION_RECALL_STATE:
2006 trim_caps(mdsc, session, le32_to_cpu(h->max_caps));
2007 break;
2008
2009 default:
2010 pr_err("mdsc_handle_session bad op %d mds%d\n", op, mds);
2011 WARN_ON(1);
2012 }
2013
2014 mutex_unlock(&session->s_mutex);
2015 if (wake) {
2016 mutex_lock(&mdsc->mutex);
2017 __wake_requests(mdsc, &session->s_waiting);
2018 mutex_unlock(&mdsc->mutex);
2019 }
2020 return;
2021
2022bad:
2023 pr_err("mdsc_handle_session corrupt message mds%d len %d\n", mds,
2024 (int)msg->front.iov_len);
2025 ceph_msg_dump(msg);
2026 return;
2027}
2028
2029
2030/*
2031 * called under session->mutex.
2032 */
2033static void replay_unsafe_requests(struct ceph_mds_client *mdsc,
2034 struct ceph_mds_session *session)
2035{
2036 struct ceph_mds_request *req, *nreq;
2037 int err;
2038
2039 dout("replay_unsafe_requests mds%d\n", session->s_mds);
2040
2041 mutex_lock(&mdsc->mutex);
2042 list_for_each_entry_safe(req, nreq, &session->s_unsafe, r_unsafe_item) {
2043 err = __prepare_send_request(mdsc, req, session->s_mds);
2044 if (!err) {
2045 ceph_msg_get(req->r_request);
2046 ceph_con_send(&session->s_con, req->r_request);
2047 }
2048 }
2049 mutex_unlock(&mdsc->mutex);
2050}
2051
2052/*
2053 * Encode information about a cap for a reconnect with the MDS.
2054 */
2055static int encode_caps_cb(struct inode *inode, struct ceph_cap *cap,
2056 void *arg)
2057{
2058 struct ceph_mds_cap_reconnect rec;
2059 struct ceph_inode_info *ci;
2060 struct ceph_pagelist *pagelist = arg;
2061 char *path;
2062 int pathlen, err;
2063 u64 pathbase;
2064 struct dentry *dentry;
2065
2066 ci = cap->ci;
2067
2068 dout(" adding %p ino %llx.%llx cap %p %lld %s\n",
2069 inode, ceph_vinop(inode), cap, cap->cap_id,
2070 ceph_cap_string(cap->issued));
2071 err = ceph_pagelist_encode_64(pagelist, ceph_ino(inode));
2072 if (err)
2073 return err;
2074
2075 dentry = d_find_alias(inode);
2076 if (dentry) {
2077 path = ceph_mdsc_build_path(dentry, &pathlen, &pathbase, 0);
2078 if (IS_ERR(path)) {
2079 err = PTR_ERR(path);
2080 BUG_ON(err);
2081 }
2082 } else {
2083 path = NULL;
2084 pathlen = 0;
2085 }
2086 err = ceph_pagelist_encode_string(pagelist, path, pathlen);
2087 if (err)
2088 goto out;
2089
2090 spin_lock(&inode->i_lock);
2091 cap->seq = 0; /* reset cap seq */
2092 cap->issue_seq = 0; /* and issue_seq */
2093 rec.cap_id = cpu_to_le64(cap->cap_id);
2094 rec.pathbase = cpu_to_le64(pathbase);
2095 rec.wanted = cpu_to_le32(__ceph_caps_wanted(ci));
2096 rec.issued = cpu_to_le32(cap->issued);
2097 rec.size = cpu_to_le64(inode->i_size);
2098 ceph_encode_timespec(&rec.mtime, &inode->i_mtime);
2099 ceph_encode_timespec(&rec.atime, &inode->i_atime);
2100 rec.snaprealm = cpu_to_le64(ci->i_snap_realm->ino);
2101 spin_unlock(&inode->i_lock);
2102
2103 err = ceph_pagelist_append(pagelist, &rec, sizeof(rec));
2104
2105out:
2106 kfree(path);
2107 dput(dentry);
2108 return err;
2109}
2110
2111
2112/*
2113 * If an MDS fails and recovers, clients need to reconnect in order to
2114 * reestablish shared state. This includes all caps issued through
2115 * this session _and_ the snap_realm hierarchy. Because it's not
2116 * clear which snap realms the mds cares about, we send everything we
2117 * know about.. that ensures we'll then get any new info the
2118 * recovering MDS might have.
2119 *
2120 * This is a relatively heavyweight operation, but it's rare.
2121 *
2122 * called with mdsc->mutex held.
2123 */
2124static void send_mds_reconnect(struct ceph_mds_client *mdsc, int mds)
2125{
2126 struct ceph_mds_session *session = NULL;
2127 struct ceph_msg *reply;
2128 struct rb_node *p;
2129 int err;
2130 struct ceph_pagelist *pagelist;
2131
2132 pr_info("reconnect to recovering mds%d\n", mds);
2133
2134 pagelist = kmalloc(sizeof(*pagelist), GFP_NOFS);
2135 if (!pagelist)
2136 goto fail_nopagelist;
2137 ceph_pagelist_init(pagelist);
2138
2139 reply = ceph_msg_new(CEPH_MSG_CLIENT_RECONNECT, 0, 0, 0, NULL);
2140 if (IS_ERR(reply)) {
2141 err = PTR_ERR(reply);
2142 goto fail_nomsg;
2143 }
2144
2145 /* find session */
2146 session = __ceph_lookup_mds_session(mdsc, mds);
2147 mutex_unlock(&mdsc->mutex); /* drop lock for duration */
2148
2149 if (session) {
2150 mutex_lock(&session->s_mutex);
2151
2152 session->s_state = CEPH_MDS_SESSION_RECONNECTING;
2153 session->s_seq = 0;
2154
2155 ceph_con_open(&session->s_con,
2156 ceph_mdsmap_get_addr(mdsc->mdsmap, mds));
2157
2158 /* replay unsafe requests */
2159 replay_unsafe_requests(mdsc, session);
2160 } else {
2161 dout("no session for mds%d, will send short reconnect\n",
2162 mds);
2163 }
2164
2165 down_read(&mdsc->snap_rwsem);
2166
2167 if (!session)
2168 goto send;
2169 dout("session %p state %s\n", session,
2170 session_state_name(session->s_state));
2171
2172 /* traverse this session's caps */
2173 err = ceph_pagelist_encode_32(pagelist, session->s_nr_caps);
2174 if (err)
2175 goto fail;
2176 err = iterate_session_caps(session, encode_caps_cb, pagelist);
2177 if (err < 0)
2178 goto out;
2179
2180 /*
2181 * snaprealms. we provide mds with the ino, seq (version), and
2182 * parent for all of our realms. If the mds has any newer info,
2183 * it will tell us.
2184 */
2185 for (p = rb_first(&mdsc->snap_realms); p; p = rb_next(p)) {
2186 struct ceph_snap_realm *realm =
2187 rb_entry(p, struct ceph_snap_realm, node);
2188 struct ceph_mds_snaprealm_reconnect sr_rec;
2189
2190 dout(" adding snap realm %llx seq %lld parent %llx\n",
2191 realm->ino, realm->seq, realm->parent_ino);
2192 sr_rec.ino = cpu_to_le64(realm->ino);
2193 sr_rec.seq = cpu_to_le64(realm->seq);
2194 sr_rec.parent = cpu_to_le64(realm->parent_ino);
2195 err = ceph_pagelist_append(pagelist, &sr_rec, sizeof(sr_rec));
2196 if (err)
2197 goto fail;
2198 }
2199
2200send:
2201 reply->pagelist = pagelist;
2202 reply->hdr.data_len = cpu_to_le32(pagelist->length);
2203 reply->nr_pages = calc_pages_for(0, pagelist->length);
2204 ceph_con_send(&session->s_con, reply);
2205
2206 if (session) {
2207 session->s_state = CEPH_MDS_SESSION_OPEN;
2208 __wake_requests(mdsc, &session->s_waiting);
2209 }
2210
2211out:
2212 up_read(&mdsc->snap_rwsem);
2213 if (session) {
2214 mutex_unlock(&session->s_mutex);
2215 ceph_put_mds_session(session);
2216 }
2217 mutex_lock(&mdsc->mutex);
2218 return;
2219
2220fail:
2221 ceph_msg_put(reply);
2222fail_nomsg:
2223 ceph_pagelist_release(pagelist);
2224 kfree(pagelist);
2225fail_nopagelist:
2226 pr_err("ENOMEM preparing reconnect for mds%d\n", mds);
2227 goto out;
2228}
2229
2230
2231/*
2232 * compare old and new mdsmaps, kicking requests
2233 * and closing out old connections as necessary
2234 *
2235 * called under mdsc->mutex.
2236 */
2237static void check_new_map(struct ceph_mds_client *mdsc,
2238 struct ceph_mdsmap *newmap,
2239 struct ceph_mdsmap *oldmap)
2240{
2241 int i;
2242 int oldstate, newstate;
2243 struct ceph_mds_session *s;
2244
2245 dout("check_new_map new %u old %u\n",
2246 newmap->m_epoch, oldmap->m_epoch);
2247
2248 for (i = 0; i < oldmap->m_max_mds && i < mdsc->max_sessions; i++) {
2249 if (mdsc->sessions[i] == NULL)
2250 continue;
2251 s = mdsc->sessions[i];
2252 oldstate = ceph_mdsmap_get_state(oldmap, i);
2253 newstate = ceph_mdsmap_get_state(newmap, i);
2254
2255 dout("check_new_map mds%d state %s -> %s (session %s)\n",
2256 i, ceph_mds_state_name(oldstate),
2257 ceph_mds_state_name(newstate),
2258 session_state_name(s->s_state));
2259
2260 if (memcmp(ceph_mdsmap_get_addr(oldmap, i),
2261 ceph_mdsmap_get_addr(newmap, i),
2262 sizeof(struct ceph_entity_addr))) {
2263 if (s->s_state == CEPH_MDS_SESSION_OPENING) {
2264 /* the session never opened, just close it
2265 * out now */
2266 __wake_requests(mdsc, &s->s_waiting);
2267 __unregister_session(mdsc, s);
2268 } else {
2269 /* just close it */
2270 mutex_unlock(&mdsc->mutex);
2271 mutex_lock(&s->s_mutex);
2272 mutex_lock(&mdsc->mutex);
2273 ceph_con_close(&s->s_con);
2274 mutex_unlock(&s->s_mutex);
2275 s->s_state = CEPH_MDS_SESSION_RESTARTING;
2276 }
2277
2278 /* kick any requests waiting on the recovering mds */
2279 kick_requests(mdsc, i, 1);
2280 } else if (oldstate == newstate) {
2281 continue; /* nothing new with this mds */
2282 }
2283
2284 /*
2285 * send reconnect?
2286 */
2287 if (s->s_state == CEPH_MDS_SESSION_RESTARTING &&
2288 newstate >= CEPH_MDS_STATE_RECONNECT)
2289 send_mds_reconnect(mdsc, i);
2290
2291 /*
2292 * kick requests on any mds that has gone active.
2293 *
2294 * kick requests on cur or forwarder: we may have sent
2295 * the request to mds1, mds1 told us it forwarded it
2296 * to mds2, but then we learn mds1 failed and can't be
2297 * sure it successfully forwarded our request before
2298 * it died.
2299 */
2300 if (oldstate < CEPH_MDS_STATE_ACTIVE &&
2301 newstate >= CEPH_MDS_STATE_ACTIVE) {
2302 pr_info("mds%d reconnect completed\n", s->s_mds);
2303 kick_requests(mdsc, i, 1);
2304 ceph_kick_flushing_caps(mdsc, s);
2305 wake_up_session_caps(s, 1);
2306 }
2307 }
2308}
2309
2310
2311
2312/*
2313 * leases
2314 */
2315
2316/*
2317 * caller must hold session s_mutex, dentry->d_lock
2318 */
2319void __ceph_mdsc_drop_dentry_lease(struct dentry *dentry)
2320{
2321 struct ceph_dentry_info *di = ceph_dentry(dentry);
2322
2323 ceph_put_mds_session(di->lease_session);
2324 di->lease_session = NULL;
2325}
2326
2327static void handle_lease(struct ceph_mds_client *mdsc,
2328 struct ceph_mds_session *session,
2329 struct ceph_msg *msg)
2330{
2331 struct super_block *sb = mdsc->client->sb;
2332 struct inode *inode;
2333 struct ceph_inode_info *ci;
2334 struct dentry *parent, *dentry;
2335 struct ceph_dentry_info *di;
2336 int mds = session->s_mds;
2337 struct ceph_mds_lease *h = msg->front.iov_base;
2338 struct ceph_vino vino;
2339 int mask;
2340 struct qstr dname;
2341 int release = 0;
2342
2343 dout("handle_lease from mds%d\n", mds);
2344
2345 /* decode */
2346 if (msg->front.iov_len < sizeof(*h) + sizeof(u32))
2347 goto bad;
2348 vino.ino = le64_to_cpu(h->ino);
2349 vino.snap = CEPH_NOSNAP;
2350 mask = le16_to_cpu(h->mask);
2351 dname.name = (void *)h + sizeof(*h) + sizeof(u32);
2352 dname.len = msg->front.iov_len - sizeof(*h) - sizeof(u32);
2353 if (dname.len != get_unaligned_le32(h+1))
2354 goto bad;
2355
2356 mutex_lock(&session->s_mutex);
2357 session->s_seq++;
2358
2359 /* lookup inode */
2360 inode = ceph_find_inode(sb, vino);
2361 dout("handle_lease '%s', mask %d, ino %llx %p\n",
2362 ceph_lease_op_name(h->action), mask, vino.ino, inode);
2363 if (inode == NULL) {
2364 dout("handle_lease no inode %llx\n", vino.ino);
2365 goto release;
2366 }
2367 ci = ceph_inode(inode);
2368
2369 /* dentry */
2370 parent = d_find_alias(inode);
2371 if (!parent) {
2372 dout("no parent dentry on inode %p\n", inode);
2373 WARN_ON(1);
2374 goto release; /* hrm... */
2375 }
2376 dname.hash = full_name_hash(dname.name, dname.len);
2377 dentry = d_lookup(parent, &dname);
2378 dput(parent);
2379 if (!dentry)
2380 goto release;
2381
2382 spin_lock(&dentry->d_lock);
2383 di = ceph_dentry(dentry);
2384 switch (h->action) {
2385 case CEPH_MDS_LEASE_REVOKE:
2386 if (di && di->lease_session == session) {
2387 h->seq = cpu_to_le32(di->lease_seq);
2388 __ceph_mdsc_drop_dentry_lease(dentry);
2389 }
2390 release = 1;
2391 break;
2392
2393 case CEPH_MDS_LEASE_RENEW:
2394 if (di && di->lease_session == session &&
2395 di->lease_gen == session->s_cap_gen &&
2396 di->lease_renew_from &&
2397 di->lease_renew_after == 0) {
2398 unsigned long duration =
2399 le32_to_cpu(h->duration_ms) * HZ / 1000;
2400
2401 di->lease_seq = le32_to_cpu(h->seq);
2402 dentry->d_time = di->lease_renew_from + duration;
2403 di->lease_renew_after = di->lease_renew_from +
2404 (duration >> 1);
2405 di->lease_renew_from = 0;
2406 }
2407 break;
2408 }
2409 spin_unlock(&dentry->d_lock);
2410 dput(dentry);
2411
2412 if (!release)
2413 goto out;
2414
2415release:
2416 /* let's just reuse the same message */
2417 h->action = CEPH_MDS_LEASE_REVOKE_ACK;
2418 ceph_msg_get(msg);
2419 ceph_con_send(&session->s_con, msg);
2420
2421out:
2422 iput(inode);
2423 mutex_unlock(&session->s_mutex);
2424 return;
2425
2426bad:
2427 pr_err("corrupt lease message\n");
2428 ceph_msg_dump(msg);
2429}
2430
2431void ceph_mdsc_lease_send_msg(struct ceph_mds_session *session,
2432 struct inode *inode,
2433 struct dentry *dentry, char action,
2434 u32 seq)
2435{
2436 struct ceph_msg *msg;
2437 struct ceph_mds_lease *lease;
2438 int len = sizeof(*lease) + sizeof(u32);
2439 int dnamelen = 0;
2440
2441 dout("lease_send_msg inode %p dentry %p %s to mds%d\n",
2442 inode, dentry, ceph_lease_op_name(action), session->s_mds);
2443 dnamelen = dentry->d_name.len;
2444 len += dnamelen;
2445
2446 msg = ceph_msg_new(CEPH_MSG_CLIENT_LEASE, len, 0, 0, NULL);
2447 if (IS_ERR(msg))
2448 return;
2449 lease = msg->front.iov_base;
2450 lease->action = action;
2451 lease->mask = cpu_to_le16(CEPH_LOCK_DN);
2452 lease->ino = cpu_to_le64(ceph_vino(inode).ino);
2453 lease->first = lease->last = cpu_to_le64(ceph_vino(inode).snap);
2454 lease->seq = cpu_to_le32(seq);
2455 put_unaligned_le32(dnamelen, lease + 1);
2456 memcpy((void *)(lease + 1) + 4, dentry->d_name.name, dnamelen);
2457
2458 /*
2459 * if this is a preemptive lease RELEASE, no need to
2460 * flush request stream, since the actual request will
2461 * soon follow.
2462 */
2463 msg->more_to_follow = (action == CEPH_MDS_LEASE_RELEASE);
2464
2465 ceph_con_send(&session->s_con, msg);
2466}
2467
2468/*
2469 * Preemptively release a lease we expect to invalidate anyway.
2470 * Pass @inode always, @dentry is optional.
2471 */
2472void ceph_mdsc_lease_release(struct ceph_mds_client *mdsc, struct inode *inode,
2473 struct dentry *dentry, int mask)
2474{
2475 struct ceph_dentry_info *di;
2476 struct ceph_mds_session *session;
2477 u32 seq;
2478
2479 BUG_ON(inode == NULL);
2480 BUG_ON(dentry == NULL);
2481 BUG_ON(mask != CEPH_LOCK_DN);
2482
2483 /* is dentry lease valid? */
2484 spin_lock(&dentry->d_lock);
2485 di = ceph_dentry(dentry);
2486 if (!di || !di->lease_session ||
2487 di->lease_session->s_mds < 0 ||
2488 di->lease_gen != di->lease_session->s_cap_gen ||
2489 !time_before(jiffies, dentry->d_time)) {
2490 dout("lease_release inode %p dentry %p -- "
2491 "no lease on %d\n",
2492 inode, dentry, mask);
2493 spin_unlock(&dentry->d_lock);
2494 return;
2495 }
2496
2497 /* we do have a lease on this dentry; note mds and seq */
2498 session = ceph_get_mds_session(di->lease_session);
2499 seq = di->lease_seq;
2500 __ceph_mdsc_drop_dentry_lease(dentry);
2501 spin_unlock(&dentry->d_lock);
2502
2503 dout("lease_release inode %p dentry %p mask %d to mds%d\n",
2504 inode, dentry, mask, session->s_mds);
2505 ceph_mdsc_lease_send_msg(session, inode, dentry,
2506 CEPH_MDS_LEASE_RELEASE, seq);
2507 ceph_put_mds_session(session);
2508}
2509
2510/*
2511 * drop all leases (and dentry refs) in preparation for umount
2512 */
2513static void drop_leases(struct ceph_mds_client *mdsc)
2514{
2515 int i;
2516
2517 dout("drop_leases\n");
2518 mutex_lock(&mdsc->mutex);
2519 for (i = 0; i < mdsc->max_sessions; i++) {
2520 struct ceph_mds_session *s = __ceph_lookup_mds_session(mdsc, i);
2521 if (!s)
2522 continue;
2523 mutex_unlock(&mdsc->mutex);
2524 mutex_lock(&s->s_mutex);
2525 mutex_unlock(&s->s_mutex);
2526 ceph_put_mds_session(s);
2527 mutex_lock(&mdsc->mutex);
2528 }
2529 mutex_unlock(&mdsc->mutex);
2530}
2531
2532
2533
2534/*
2535 * delayed work -- periodically trim expired leases, renew caps with mds
2536 */
2537static void schedule_delayed(struct ceph_mds_client *mdsc)
2538{
2539 int delay = 5;
2540 unsigned hz = round_jiffies_relative(HZ * delay);
2541 schedule_delayed_work(&mdsc->delayed_work, hz);
2542}
2543
2544static void delayed_work(struct work_struct *work)
2545{
2546 int i;
2547 struct ceph_mds_client *mdsc =
2548 container_of(work, struct ceph_mds_client, delayed_work.work);
2549 int renew_interval;
2550 int renew_caps;
2551
2552 dout("mdsc delayed_work\n");
2553 ceph_check_delayed_caps(mdsc);
2554
2555 mutex_lock(&mdsc->mutex);
2556 renew_interval = mdsc->mdsmap->m_session_timeout >> 2;
2557 renew_caps = time_after_eq(jiffies, HZ*renew_interval +
2558 mdsc->last_renew_caps);
2559 if (renew_caps)
2560 mdsc->last_renew_caps = jiffies;
2561
2562 for (i = 0; i < mdsc->max_sessions; i++) {
2563 struct ceph_mds_session *s = __ceph_lookup_mds_session(mdsc, i);
2564 if (s == NULL)
2565 continue;
2566 if (s->s_state == CEPH_MDS_SESSION_CLOSING) {
2567 dout("resending session close request for mds%d\n",
2568 s->s_mds);
2569 request_close_session(mdsc, s);
2570 ceph_put_mds_session(s);
2571 continue;
2572 }
2573 if (s->s_ttl && time_after(jiffies, s->s_ttl)) {
2574 if (s->s_state == CEPH_MDS_SESSION_OPEN) {
2575 s->s_state = CEPH_MDS_SESSION_HUNG;
2576 pr_info("mds%d hung\n", s->s_mds);
2577 }
2578 }
2579 if (s->s_state < CEPH_MDS_SESSION_OPEN) {
2580 /* this mds is failed or recovering, just wait */
2581 ceph_put_mds_session(s);
2582 continue;
2583 }
2584 mutex_unlock(&mdsc->mutex);
2585
2586 mutex_lock(&s->s_mutex);
2587 if (renew_caps)
2588 send_renew_caps(mdsc, s);
2589 else
2590 ceph_con_keepalive(&s->s_con);
2591 add_cap_releases(mdsc, s, -1);
2592 send_cap_releases(mdsc, s);
2593 mutex_unlock(&s->s_mutex);
2594 ceph_put_mds_session(s);
2595
2596 mutex_lock(&mdsc->mutex);
2597 }
2598 mutex_unlock(&mdsc->mutex);
2599
2600 schedule_delayed(mdsc);
2601}
2602
2603
2604int ceph_mdsc_init(struct ceph_mds_client *mdsc, struct ceph_client *client)
2605{
2606 mdsc->client = client;
2607 mutex_init(&mdsc->mutex);
2608 mdsc->mdsmap = kzalloc(sizeof(*mdsc->mdsmap), GFP_NOFS);
2609 init_completion(&mdsc->safe_umount_waiters);
2610 init_completion(&mdsc->session_close_waiters);
2611 INIT_LIST_HEAD(&mdsc->waiting_for_map);
2612 mdsc->sessions = NULL;
2613 mdsc->max_sessions = 0;
2614 mdsc->stopping = 0;
2615 init_rwsem(&mdsc->snap_rwsem);
2616 mdsc->snap_realms = RB_ROOT;
2617 INIT_LIST_HEAD(&mdsc->snap_empty);
2618 spin_lock_init(&mdsc->snap_empty_lock);
2619 mdsc->last_tid = 0;
2620 mdsc->request_tree = RB_ROOT;
2621 INIT_DELAYED_WORK(&mdsc->delayed_work, delayed_work);
2622 mdsc->last_renew_caps = jiffies;
2623 INIT_LIST_HEAD(&mdsc->cap_delay_list);
2624 spin_lock_init(&mdsc->cap_delay_lock);
2625 INIT_LIST_HEAD(&mdsc->snap_flush_list);
2626 spin_lock_init(&mdsc->snap_flush_lock);
2627 mdsc->cap_flush_seq = 0;
2628 INIT_LIST_HEAD(&mdsc->cap_dirty);
2629 mdsc->num_cap_flushing = 0;
2630 spin_lock_init(&mdsc->cap_dirty_lock);
2631 init_waitqueue_head(&mdsc->cap_flushing_wq);
2632 spin_lock_init(&mdsc->dentry_lru_lock);
2633 INIT_LIST_HEAD(&mdsc->dentry_lru);
2634 return 0;
2635}
2636
2637/*
2638 * Wait for safe replies on open mds requests. If we time out, drop
2639 * all requests from the tree to avoid dangling dentry refs.
2640 */
2641static void wait_requests(struct ceph_mds_client *mdsc)
2642{
2643 struct ceph_mds_request *req;
2644 struct ceph_client *client = mdsc->client;
2645
2646 mutex_lock(&mdsc->mutex);
2647 if (__get_oldest_req(mdsc)) {
2648 mutex_unlock(&mdsc->mutex);
2649
2650 dout("wait_requests waiting for requests\n");
2651 wait_for_completion_timeout(&mdsc->safe_umount_waiters,
2652 client->mount_args->mount_timeout * HZ);
2653
2654 /* tear down remaining requests */
2655 mutex_lock(&mdsc->mutex);
2656 while ((req = __get_oldest_req(mdsc))) {
2657 dout("wait_requests timed out on tid %llu\n",
2658 req->r_tid);
2659 __unregister_request(mdsc, req);
2660 }
2661 }
2662 mutex_unlock(&mdsc->mutex);
2663 dout("wait_requests done\n");
2664}
2665
2666/*
2667 * called before mount is ro, and before dentries are torn down.
2668 * (hmm, does this still race with new lookups?)
2669 */
2670void ceph_mdsc_pre_umount(struct ceph_mds_client *mdsc)
2671{
2672 dout("pre_umount\n");
2673 mdsc->stopping = 1;
2674
2675 drop_leases(mdsc);
2676 ceph_flush_dirty_caps(mdsc);
2677 wait_requests(mdsc);
2678}
2679
2680/*
2681 * wait for all write mds requests to flush.
2682 */
2683static void wait_unsafe_requests(struct ceph_mds_client *mdsc, u64 want_tid)
2684{
2685 struct ceph_mds_request *req = NULL;
2686 struct rb_node *n;
2687
2688 mutex_lock(&mdsc->mutex);
2689 dout("wait_unsafe_requests want %lld\n", want_tid);
2690 req = __get_oldest_req(mdsc);
2691 while (req && req->r_tid <= want_tid) {
2692 if ((req->r_op & CEPH_MDS_OP_WRITE)) {
2693 /* write op */
2694 ceph_mdsc_get_request(req);
2695 mutex_unlock(&mdsc->mutex);
2696 dout("wait_unsafe_requests wait on %llu (want %llu)\n",
2697 req->r_tid, want_tid);
2698 wait_for_completion(&req->r_safe_completion);
2699 mutex_lock(&mdsc->mutex);
2700 n = rb_next(&req->r_node);
2701 ceph_mdsc_put_request(req);
2702 } else {
2703 n = rb_next(&req->r_node);
2704 }
2705 if (!n)
2706 break;
2707 req = rb_entry(n, struct ceph_mds_request, r_node);
2708 }
2709 mutex_unlock(&mdsc->mutex);
2710 dout("wait_unsafe_requests done\n");
2711}
2712
2713void ceph_mdsc_sync(struct ceph_mds_client *mdsc)
2714{
2715 u64 want_tid, want_flush;
2716
2717 dout("sync\n");
2718 mutex_lock(&mdsc->mutex);
2719 want_tid = mdsc->last_tid;
2720 want_flush = mdsc->cap_flush_seq;
2721 mutex_unlock(&mdsc->mutex);
2722 dout("sync want tid %lld flush_seq %lld\n", want_tid, want_flush);
2723
2724 ceph_flush_dirty_caps(mdsc);
2725
2726 wait_unsafe_requests(mdsc, want_tid);
2727 wait_event(mdsc->cap_flushing_wq, check_cap_flush(mdsc, want_flush));
2728}
2729
2730
2731/*
2732 * called after sb is ro.
2733 */
2734void ceph_mdsc_close_sessions(struct ceph_mds_client *mdsc)
2735{
2736 struct ceph_mds_session *session;
2737 int i;
2738 int n;
2739 struct ceph_client *client = mdsc->client;
2740 unsigned long started, timeout = client->mount_args->mount_timeout * HZ;
2741
2742 dout("close_sessions\n");
2743
2744 mutex_lock(&mdsc->mutex);
2745
2746 /* close sessions */
2747 started = jiffies;
2748 while (time_before(jiffies, started + timeout)) {
2749 dout("closing sessions\n");
2750 n = 0;
2751 for (i = 0; i < mdsc->max_sessions; i++) {
2752 session = __ceph_lookup_mds_session(mdsc, i);
2753 if (!session)
2754 continue;
2755 mutex_unlock(&mdsc->mutex);
2756 mutex_lock(&session->s_mutex);
2757 __close_session(mdsc, session);
2758 mutex_unlock(&session->s_mutex);
2759 ceph_put_mds_session(session);
2760 mutex_lock(&mdsc->mutex);
2761 n++;
2762 }
2763 if (n == 0)
2764 break;
2765
2766 if (client->mount_state == CEPH_MOUNT_SHUTDOWN)
2767 break;
2768
2769 dout("waiting for sessions to close\n");
2770 mutex_unlock(&mdsc->mutex);
2771 wait_for_completion_timeout(&mdsc->session_close_waiters,
2772 timeout);
2773 mutex_lock(&mdsc->mutex);
2774 }
2775
2776 /* tear down remaining sessions */
2777 for (i = 0; i < mdsc->max_sessions; i++) {
2778 if (mdsc->sessions[i]) {
2779 session = get_session(mdsc->sessions[i]);
2780 __unregister_session(mdsc, session);
2781 mutex_unlock(&mdsc->mutex);
2782 mutex_lock(&session->s_mutex);
2783 remove_session_caps(session);
2784 mutex_unlock(&session->s_mutex);
2785 ceph_put_mds_session(session);
2786 mutex_lock(&mdsc->mutex);
2787 }
2788 }
2789
2790 WARN_ON(!list_empty(&mdsc->cap_delay_list));
2791
2792 mutex_unlock(&mdsc->mutex);
2793
2794 ceph_cleanup_empty_realms(mdsc);
2795
2796 cancel_delayed_work_sync(&mdsc->delayed_work); /* cancel timer */
2797
2798 dout("stopped\n");
2799}
2800
2801void ceph_mdsc_stop(struct ceph_mds_client *mdsc)
2802{
2803 dout("stop\n");
2804 cancel_delayed_work_sync(&mdsc->delayed_work); /* cancel timer */
2805 if (mdsc->mdsmap)
2806 ceph_mdsmap_destroy(mdsc->mdsmap);
2807 kfree(mdsc->sessions);
2808}
2809
2810
2811/*
2812 * handle mds map update.
2813 */
2814void ceph_mdsc_handle_map(struct ceph_mds_client *mdsc, struct ceph_msg *msg)
2815{
2816 u32 epoch;
2817 u32 maplen;
2818 void *p = msg->front.iov_base;
2819 void *end = p + msg->front.iov_len;
2820 struct ceph_mdsmap *newmap, *oldmap;
2821 struct ceph_fsid fsid;
2822 int err = -EINVAL;
2823
2824 ceph_decode_need(&p, end, sizeof(fsid)+2*sizeof(u32), bad);
2825 ceph_decode_copy(&p, &fsid, sizeof(fsid));
2826 if (ceph_check_fsid(mdsc->client, &fsid) < 0)
2827 return;
2828 epoch = ceph_decode_32(&p);
2829 maplen = ceph_decode_32(&p);
2830 dout("handle_map epoch %u len %d\n", epoch, (int)maplen);
2831
2832 /* do we need it? */
2833 ceph_monc_got_mdsmap(&mdsc->client->monc, epoch);
2834 mutex_lock(&mdsc->mutex);
2835 if (mdsc->mdsmap && epoch <= mdsc->mdsmap->m_epoch) {
2836 dout("handle_map epoch %u <= our %u\n",
2837 epoch, mdsc->mdsmap->m_epoch);
2838 mutex_unlock(&mdsc->mutex);
2839 return;
2840 }
2841
2842 newmap = ceph_mdsmap_decode(&p, end);
2843 if (IS_ERR(newmap)) {
2844 err = PTR_ERR(newmap);
2845 goto bad_unlock;
2846 }
2847
2848 /* swap into place */
2849 if (mdsc->mdsmap) {
2850 oldmap = mdsc->mdsmap;
2851 mdsc->mdsmap = newmap;
2852 check_new_map(mdsc, newmap, oldmap);
2853 ceph_mdsmap_destroy(oldmap);
2854 } else {
2855 mdsc->mdsmap = newmap; /* first mds map */
2856 }
2857 mdsc->client->sb->s_maxbytes = mdsc->mdsmap->m_max_file_size;
2858
2859 __wake_requests(mdsc, &mdsc->waiting_for_map);
2860
2861 mutex_unlock(&mdsc->mutex);
2862 schedule_delayed(mdsc);
2863 return;
2864
2865bad_unlock:
2866 mutex_unlock(&mdsc->mutex);
2867bad:
2868 pr_err("error decoding mdsmap %d\n", err);
2869 return;
2870}
2871
2872static struct ceph_connection *con_get(struct ceph_connection *con)
2873{
2874 struct ceph_mds_session *s = con->private;
2875
2876 if (get_session(s)) {
2877 dout("mdsc con_get %p ok (%d)\n", s, atomic_read(&s->s_ref));
2878 return con;
2879 }
2880 dout("mdsc con_get %p FAIL\n", s);
2881 return NULL;
2882}
2883
2884static void con_put(struct ceph_connection *con)
2885{
2886 struct ceph_mds_session *s = con->private;
2887
2888 ceph_put_mds_session(s);
2889 dout("mdsc con_put %p (%d)\n", s, atomic_read(&s->s_ref));
2890}
2891
2892/*
2893 * if the client is unresponsive for long enough, the mds will kill
2894 * the session entirely.
2895 */
2896static void peer_reset(struct ceph_connection *con)
2897{
2898 struct ceph_mds_session *s = con->private;
2899
2900 pr_err("mds%d gave us the boot. IMPLEMENT RECONNECT.\n",
2901 s->s_mds);
2902}
2903
2904static void dispatch(struct ceph_connection *con, struct ceph_msg *msg)
2905{
2906 struct ceph_mds_session *s = con->private;
2907 struct ceph_mds_client *mdsc = s->s_mdsc;
2908 int type = le16_to_cpu(msg->hdr.type);
2909
2910 mutex_lock(&mdsc->mutex);
2911 if (__verify_registered_session(mdsc, s) < 0) {
2912 mutex_unlock(&mdsc->mutex);
2913 goto out;
2914 }
2915 mutex_unlock(&mdsc->mutex);
2916
2917 switch (type) {
2918 case CEPH_MSG_MDS_MAP:
2919 ceph_mdsc_handle_map(mdsc, msg);
2920 break;
2921 case CEPH_MSG_CLIENT_SESSION:
2922 handle_session(s, msg);
2923 break;
2924 case CEPH_MSG_CLIENT_REPLY:
2925 handle_reply(s, msg);
2926 break;
2927 case CEPH_MSG_CLIENT_REQUEST_FORWARD:
2928 handle_forward(mdsc, s, msg);
2929 break;
2930 case CEPH_MSG_CLIENT_CAPS:
2931 ceph_handle_caps(s, msg);
2932 break;
2933 case CEPH_MSG_CLIENT_SNAP:
2934 ceph_handle_snap(mdsc, s, msg);
2935 break;
2936 case CEPH_MSG_CLIENT_LEASE:
2937 handle_lease(mdsc, s, msg);
2938 break;
2939
2940 default:
2941 pr_err("received unknown message type %d %s\n", type,
2942 ceph_msg_type_name(type));
2943 }
2944out:
2945 ceph_msg_put(msg);
2946}
2947
2948/*
2949 * authentication
2950 */
2951static int get_authorizer(struct ceph_connection *con,
2952 void **buf, int *len, int *proto,
2953 void **reply_buf, int *reply_len, int force_new)
2954{
2955 struct ceph_mds_session *s = con->private;
2956 struct ceph_mds_client *mdsc = s->s_mdsc;
2957 struct ceph_auth_client *ac = mdsc->client->monc.auth;
2958 int ret = 0;
2959
2960 if (force_new && s->s_authorizer) {
2961 ac->ops->destroy_authorizer(ac, s->s_authorizer);
2962 s->s_authorizer = NULL;
2963 }
2964 if (s->s_authorizer == NULL) {
2965 if (ac->ops->create_authorizer) {
2966 ret = ac->ops->create_authorizer(
2967 ac, CEPH_ENTITY_TYPE_MDS,
2968 &s->s_authorizer,
2969 &s->s_authorizer_buf,
2970 &s->s_authorizer_buf_len,
2971 &s->s_authorizer_reply_buf,
2972 &s->s_authorizer_reply_buf_len);
2973 if (ret)
2974 return ret;
2975 }
2976 }
2977
2978 *proto = ac->protocol;
2979 *buf = s->s_authorizer_buf;
2980 *len = s->s_authorizer_buf_len;
2981 *reply_buf = s->s_authorizer_reply_buf;
2982 *reply_len = s->s_authorizer_reply_buf_len;
2983 return 0;
2984}
2985
2986
2987static int verify_authorizer_reply(struct ceph_connection *con, int len)
2988{
2989 struct ceph_mds_session *s = con->private;
2990 struct ceph_mds_client *mdsc = s->s_mdsc;
2991 struct ceph_auth_client *ac = mdsc->client->monc.auth;
2992
2993 return ac->ops->verify_authorizer_reply(ac, s->s_authorizer, len);
2994}
2995
2996static int invalidate_authorizer(struct ceph_connection *con)
2997{
2998 struct ceph_mds_session *s = con->private;
2999 struct ceph_mds_client *mdsc = s->s_mdsc;
3000 struct ceph_auth_client *ac = mdsc->client->monc.auth;
3001
3002 if (ac->ops->invalidate_authorizer)
3003 ac->ops->invalidate_authorizer(ac, CEPH_ENTITY_TYPE_MDS);
3004
3005 return ceph_monc_validate_auth(&mdsc->client->monc);
3006}
3007
3008const static struct ceph_connection_operations mds_con_ops = {
3009 .get = con_get,
3010 .put = con_put,
3011 .dispatch = dispatch,
3012 .get_authorizer = get_authorizer,
3013 .verify_authorizer_reply = verify_authorizer_reply,
3014 .invalidate_authorizer = invalidate_authorizer,
3015 .peer_reset = peer_reset,
3016};
3017
3018
3019
3020
3021/* eof */
diff --git a/fs/ceph/mds_client.h b/fs/ceph/mds_client.h
new file mode 100644
index 000000000000..961cc6f65878
--- /dev/null
+++ b/fs/ceph/mds_client.h
@@ -0,0 +1,335 @@
1#ifndef _FS_CEPH_MDS_CLIENT_H
2#define _FS_CEPH_MDS_CLIENT_H
3
4#include <linux/completion.h>
5#include <linux/kref.h>
6#include <linux/list.h>
7#include <linux/mutex.h>
8#include <linux/rbtree.h>
9#include <linux/spinlock.h>
10
11#include "types.h"
12#include "messenger.h"
13#include "mdsmap.h"
14
15/*
16 * Some lock dependencies:
17 *
18 * session->s_mutex
19 * mdsc->mutex
20 *
21 * mdsc->snap_rwsem
22 *
23 * inode->i_lock
24 * mdsc->snap_flush_lock
25 * mdsc->cap_delay_lock
26 *
27 */
28
29struct ceph_client;
30struct ceph_cap;
31
32/*
33 * parsed info about a single inode. pointers are into the encoded
34 * on-wire structures within the mds reply message payload.
35 */
36struct ceph_mds_reply_info_in {
37 struct ceph_mds_reply_inode *in;
38 u32 symlink_len;
39 char *symlink;
40 u32 xattr_len;
41 char *xattr_data;
42};
43
44/*
45 * parsed info about an mds reply, including information about the
46 * target inode and/or its parent directory and dentry, and directory
47 * contents (for readdir results).
48 */
49struct ceph_mds_reply_info_parsed {
50 struct ceph_mds_reply_head *head;
51
52 struct ceph_mds_reply_info_in diri, targeti;
53 struct ceph_mds_reply_dirfrag *dirfrag;
54 char *dname;
55 u32 dname_len;
56 struct ceph_mds_reply_lease *dlease;
57
58 struct ceph_mds_reply_dirfrag *dir_dir;
59 int dir_nr;
60 char **dir_dname;
61 u32 *dir_dname_len;
62 struct ceph_mds_reply_lease **dir_dlease;
63 struct ceph_mds_reply_info_in *dir_in;
64 u8 dir_complete, dir_end;
65
66 /* encoded blob describing snapshot contexts for certain
67 operations (e.g., open) */
68 void *snapblob;
69 int snapblob_len;
70};
71
72
73/*
74 * cap releases are batched and sent to the MDS en masse.
75 */
76#define CEPH_CAPS_PER_RELEASE ((PAGE_CACHE_SIZE - \
77 sizeof(struct ceph_mds_cap_release)) / \
78 sizeof(struct ceph_mds_cap_item))
79
80
81/*
82 * state associated with each MDS<->client session
83 */
84enum {
85 CEPH_MDS_SESSION_NEW = 1,
86 CEPH_MDS_SESSION_OPENING = 2,
87 CEPH_MDS_SESSION_OPEN = 3,
88 CEPH_MDS_SESSION_HUNG = 4,
89 CEPH_MDS_SESSION_CLOSING = 5,
90 CEPH_MDS_SESSION_RESTARTING = 6,
91 CEPH_MDS_SESSION_RECONNECTING = 7,
92};
93
94struct ceph_mds_session {
95 struct ceph_mds_client *s_mdsc;
96 int s_mds;
97 int s_state;
98 unsigned long s_ttl; /* time until mds kills us */
99 u64 s_seq; /* incoming msg seq # */
100 struct mutex s_mutex; /* serialize session messages */
101
102 struct ceph_connection s_con;
103
104 struct ceph_authorizer *s_authorizer;
105 void *s_authorizer_buf, *s_authorizer_reply_buf;
106 size_t s_authorizer_buf_len, s_authorizer_reply_buf_len;
107
108 /* protected by s_cap_lock */
109 spinlock_t s_cap_lock;
110 u32 s_cap_gen; /* inc each time we get mds stale msg */
111 unsigned long s_cap_ttl; /* when session caps expire */
112 struct list_head s_caps; /* all caps issued by this session */
113 int s_nr_caps, s_trim_caps;
114 int s_num_cap_releases;
115 struct list_head s_cap_releases; /* waiting cap_release messages */
116 struct list_head s_cap_releases_done; /* ready to send */
117 struct ceph_cap *s_cap_iterator;
118
119 /* protected by mutex */
120 struct list_head s_cap_flushing; /* inodes w/ flushing caps */
121 struct list_head s_cap_snaps_flushing;
122 unsigned long s_renew_requested; /* last time we sent a renew req */
123 u64 s_renew_seq;
124
125 atomic_t s_ref;
126 struct list_head s_waiting; /* waiting requests */
127 struct list_head s_unsafe; /* unsafe requests */
128};
129
130/*
131 * modes of choosing which MDS to send a request to
132 */
133enum {
134 USE_ANY_MDS,
135 USE_RANDOM_MDS,
136 USE_AUTH_MDS, /* prefer authoritative mds for this metadata item */
137};
138
139struct ceph_mds_request;
140struct ceph_mds_client;
141
142/*
143 * request completion callback
144 */
145typedef void (*ceph_mds_request_callback_t) (struct ceph_mds_client *mdsc,
146 struct ceph_mds_request *req);
147
148/*
149 * an in-flight mds request
150 */
151struct ceph_mds_request {
152 u64 r_tid; /* transaction id */
153 struct rb_node r_node;
154
155 int r_op; /* mds op code */
156 int r_mds;
157
158 /* operation on what? */
159 struct inode *r_inode; /* arg1 */
160 struct dentry *r_dentry; /* arg1 */
161 struct dentry *r_old_dentry; /* arg2: rename from or link from */
162 char *r_path1, *r_path2;
163 struct ceph_vino r_ino1, r_ino2;
164
165 struct inode *r_locked_dir; /* dir (if any) i_mutex locked by vfs */
166 struct inode *r_target_inode; /* resulting inode */
167
168 union ceph_mds_request_args r_args;
169 int r_fmode; /* file mode, if expecting cap */
170
171 /* for choosing which mds to send this request to */
172 int r_direct_mode;
173 u32 r_direct_hash; /* choose dir frag based on this dentry hash */
174 bool r_direct_is_hash; /* true if r_direct_hash is valid */
175
176 /* data payload is used for xattr ops */
177 struct page **r_pages;
178 int r_num_pages;
179 int r_data_len;
180
181 /* what caps shall we drop? */
182 int r_inode_drop, r_inode_unless;
183 int r_dentry_drop, r_dentry_unless;
184 int r_old_dentry_drop, r_old_dentry_unless;
185 struct inode *r_old_inode;
186 int r_old_inode_drop, r_old_inode_unless;
187
188 struct ceph_msg *r_request; /* original request */
189 struct ceph_msg *r_reply;
190 struct ceph_mds_reply_info_parsed r_reply_info;
191 int r_err;
192 bool r_aborted;
193
194 unsigned long r_timeout; /* optional. jiffies */
195 unsigned long r_started; /* start time to measure timeout against */
196 unsigned long r_request_started; /* start time for mds request only,
197 used to measure lease durations */
198
199 /* link unsafe requests to parent directory, for fsync */
200 struct inode *r_unsafe_dir;
201 struct list_head r_unsafe_dir_item;
202
203 struct ceph_mds_session *r_session;
204
205 int r_attempts; /* resend attempts */
206 int r_num_fwd; /* number of forward attempts */
207 int r_num_stale;
208 int r_resend_mds; /* mds to resend to next, if any*/
209
210 struct kref r_kref;
211 struct list_head r_wait;
212 struct completion r_completion;
213 struct completion r_safe_completion;
214 ceph_mds_request_callback_t r_callback;
215 struct list_head r_unsafe_item; /* per-session unsafe list item */
216 bool r_got_unsafe, r_got_safe;
217
218 bool r_did_prepopulate;
219 u32 r_readdir_offset;
220
221 struct ceph_cap_reservation r_caps_reservation;
222 int r_num_caps;
223};
224
225/*
226 * mds client state
227 */
228struct ceph_mds_client {
229 struct ceph_client *client;
230 struct mutex mutex; /* all nested structures */
231
232 struct ceph_mdsmap *mdsmap;
233 struct completion safe_umount_waiters, session_close_waiters;
234 struct list_head waiting_for_map;
235
236 struct ceph_mds_session **sessions; /* NULL for mds if no session */
237 int max_sessions; /* len of s_mds_sessions */
238 int stopping; /* true if shutting down */
239
240 /*
241 * snap_rwsem will cover cap linkage into snaprealms, and
242 * realm snap contexts. (later, we can do per-realm snap
243 * contexts locks..) the empty list contains realms with no
244 * references (implying they contain no inodes with caps) that
245 * should be destroyed.
246 */
247 struct rw_semaphore snap_rwsem;
248 struct rb_root snap_realms;
249 struct list_head snap_empty;
250 spinlock_t snap_empty_lock; /* protect snap_empty */
251
252 u64 last_tid; /* most recent mds request */
253 struct rb_root request_tree; /* pending mds requests */
254 struct delayed_work delayed_work; /* delayed work */
255 unsigned long last_renew_caps; /* last time we renewed our caps */
256 struct list_head cap_delay_list; /* caps with delayed release */
257 spinlock_t cap_delay_lock; /* protects cap_delay_list */
258 struct list_head snap_flush_list; /* cap_snaps ready to flush */
259 spinlock_t snap_flush_lock;
260
261 u64 cap_flush_seq;
262 struct list_head cap_dirty; /* inodes with dirty caps */
263 int num_cap_flushing; /* # caps we are flushing */
264 spinlock_t cap_dirty_lock; /* protects above items */
265 wait_queue_head_t cap_flushing_wq;
266
267#ifdef CONFIG_DEBUG_FS
268 struct dentry *debugfs_file;
269#endif
270
271 spinlock_t dentry_lru_lock;
272 struct list_head dentry_lru;
273 int num_dentry;
274};
275
276extern const char *ceph_mds_op_name(int op);
277
278extern struct ceph_mds_session *
279__ceph_lookup_mds_session(struct ceph_mds_client *, int mds);
280
281static inline struct ceph_mds_session *
282ceph_get_mds_session(struct ceph_mds_session *s)
283{
284 atomic_inc(&s->s_ref);
285 return s;
286}
287
288extern void ceph_put_mds_session(struct ceph_mds_session *s);
289
290extern int ceph_send_msg_mds(struct ceph_mds_client *mdsc,
291 struct ceph_msg *msg, int mds);
292
293extern int ceph_mdsc_init(struct ceph_mds_client *mdsc,
294 struct ceph_client *client);
295extern void ceph_mdsc_close_sessions(struct ceph_mds_client *mdsc);
296extern void ceph_mdsc_stop(struct ceph_mds_client *mdsc);
297
298extern void ceph_mdsc_sync(struct ceph_mds_client *mdsc);
299
300extern void ceph_mdsc_lease_release(struct ceph_mds_client *mdsc,
301 struct inode *inode,
302 struct dentry *dn, int mask);
303
304extern struct ceph_mds_request *
305ceph_mdsc_create_request(struct ceph_mds_client *mdsc, int op, int mode);
306extern void ceph_mdsc_submit_request(struct ceph_mds_client *mdsc,
307 struct ceph_mds_request *req);
308extern int ceph_mdsc_do_request(struct ceph_mds_client *mdsc,
309 struct inode *dir,
310 struct ceph_mds_request *req);
311static inline void ceph_mdsc_get_request(struct ceph_mds_request *req)
312{
313 kref_get(&req->r_kref);
314}
315extern void ceph_mdsc_release_request(struct kref *kref);
316static inline void ceph_mdsc_put_request(struct ceph_mds_request *req)
317{
318 kref_put(&req->r_kref, ceph_mdsc_release_request);
319}
320
321extern void ceph_mdsc_pre_umount(struct ceph_mds_client *mdsc);
322
323extern char *ceph_mdsc_build_path(struct dentry *dentry, int *plen, u64 *base,
324 int stop_on_nosnap);
325
326extern void __ceph_mdsc_drop_dentry_lease(struct dentry *dentry);
327extern void ceph_mdsc_lease_send_msg(struct ceph_mds_session *session,
328 struct inode *inode,
329 struct dentry *dentry, char action,
330 u32 seq);
331
332extern void ceph_mdsc_handle_map(struct ceph_mds_client *mdsc,
333 struct ceph_msg *msg);
334
335#endif
diff --git a/fs/ceph/mdsmap.c b/fs/ceph/mdsmap.c
new file mode 100644
index 000000000000..c4c498e6dfef
--- /dev/null
+++ b/fs/ceph/mdsmap.c
@@ -0,0 +1,174 @@
1#include "ceph_debug.h"
2
3#include <linux/bug.h>
4#include <linux/err.h>
5#include <linux/random.h>
6#include <linux/slab.h>
7#include <linux/types.h>
8
9#include "mdsmap.h"
10#include "messenger.h"
11#include "decode.h"
12
13#include "super.h"
14
15
16/*
17 * choose a random mds that is "up" (i.e. has a state > 0), or -1.
18 */
19int ceph_mdsmap_get_random_mds(struct ceph_mdsmap *m)
20{
21 int n = 0;
22 int i;
23 char r;
24
25 /* count */
26 for (i = 0; i < m->m_max_mds; i++)
27 if (m->m_info[i].state > 0)
28 n++;
29 if (n == 0)
30 return -1;
31
32 /* pick */
33 get_random_bytes(&r, 1);
34 n = r % n;
35 i = 0;
36 for (i = 0; n > 0; i++, n--)
37 while (m->m_info[i].state <= 0)
38 i++;
39
40 return i;
41}
42
43/*
44 * Decode an MDS map
45 *
46 * Ignore any fields we don't care about (there are quite a few of
47 * them).
48 */
49struct ceph_mdsmap *ceph_mdsmap_decode(void **p, void *end)
50{
51 struct ceph_mdsmap *m;
52 const void *start = *p;
53 int i, j, n;
54 int err = -EINVAL;
55 u16 version;
56
57 m = kzalloc(sizeof(*m), GFP_NOFS);
58 if (m == NULL)
59 return ERR_PTR(-ENOMEM);
60
61 ceph_decode_16_safe(p, end, version, bad);
62
63 ceph_decode_need(p, end, 8*sizeof(u32) + sizeof(u64), bad);
64 m->m_epoch = ceph_decode_32(p);
65 m->m_client_epoch = ceph_decode_32(p);
66 m->m_last_failure = ceph_decode_32(p);
67 m->m_root = ceph_decode_32(p);
68 m->m_session_timeout = ceph_decode_32(p);
69 m->m_session_autoclose = ceph_decode_32(p);
70 m->m_max_file_size = ceph_decode_64(p);
71 m->m_max_mds = ceph_decode_32(p);
72
73 m->m_info = kcalloc(m->m_max_mds, sizeof(*m->m_info), GFP_NOFS);
74 if (m->m_info == NULL)
75 goto badmem;
76
77 /* pick out active nodes from mds_info (state > 0) */
78 n = ceph_decode_32(p);
79 for (i = 0; i < n; i++) {
80 u64 global_id;
81 u32 namelen;
82 s32 mds, inc, state;
83 u64 state_seq;
84 u8 infoversion;
85 struct ceph_entity_addr addr;
86 u32 num_export_targets;
87 void *pexport_targets = NULL;
88
89 ceph_decode_need(p, end, sizeof(u64)*2 + 1 + sizeof(u32), bad);
90 global_id = ceph_decode_64(p);
91 infoversion = ceph_decode_8(p);
92 *p += sizeof(u64);
93 namelen = ceph_decode_32(p); /* skip mds name */
94 *p += namelen;
95
96 ceph_decode_need(p, end,
97 4*sizeof(u32) + sizeof(u64) +
98 sizeof(addr) + sizeof(struct ceph_timespec),
99 bad);
100 mds = ceph_decode_32(p);
101 inc = ceph_decode_32(p);
102 state = ceph_decode_32(p);
103 state_seq = ceph_decode_64(p);
104 ceph_decode_copy(p, &addr, sizeof(addr));
105 ceph_decode_addr(&addr);
106 *p += sizeof(struct ceph_timespec);
107 *p += sizeof(u32);
108 ceph_decode_32_safe(p, end, namelen, bad);
109 *p += namelen;
110 if (infoversion >= 2) {
111 ceph_decode_32_safe(p, end, num_export_targets, bad);
112 pexport_targets = *p;
113 *p += num_export_targets * sizeof(u32);
114 } else {
115 num_export_targets = 0;
116 }
117
118 dout("mdsmap_decode %d/%d %lld mds%d.%d %s %s\n",
119 i+1, n, global_id, mds, inc, pr_addr(&addr.in_addr),
120 ceph_mds_state_name(state));
121 if (mds >= 0 && mds < m->m_max_mds && state > 0) {
122 m->m_info[mds].global_id = global_id;
123 m->m_info[mds].state = state;
124 m->m_info[mds].addr = addr;
125 m->m_info[mds].num_export_targets = num_export_targets;
126 if (num_export_targets) {
127 m->m_info[mds].export_targets =
128 kcalloc(num_export_targets, sizeof(u32),
129 GFP_NOFS);
130 for (j = 0; j < num_export_targets; j++)
131 m->m_info[mds].export_targets[j] =
132 ceph_decode_32(&pexport_targets);
133 } else {
134 m->m_info[mds].export_targets = NULL;
135 }
136 }
137 }
138
139 /* pg_pools */
140 ceph_decode_32_safe(p, end, n, bad);
141 m->m_num_data_pg_pools = n;
142 m->m_data_pg_pools = kcalloc(n, sizeof(u32), GFP_NOFS);
143 if (!m->m_data_pg_pools)
144 goto badmem;
145 ceph_decode_need(p, end, sizeof(u32)*(n+1), bad);
146 for (i = 0; i < n; i++)
147 m->m_data_pg_pools[i] = ceph_decode_32(p);
148 m->m_cas_pg_pool = ceph_decode_32(p);
149
150 /* ok, we don't care about the rest. */
151 dout("mdsmap_decode success epoch %u\n", m->m_epoch);
152 return m;
153
154badmem:
155 err = -ENOMEM;
156bad:
157 pr_err("corrupt mdsmap\n");
158 print_hex_dump(KERN_DEBUG, "mdsmap: ",
159 DUMP_PREFIX_OFFSET, 16, 1,
160 start, end - start, true);
161 ceph_mdsmap_destroy(m);
162 return ERR_PTR(-EINVAL);
163}
164
165void ceph_mdsmap_destroy(struct ceph_mdsmap *m)
166{
167 int i;
168
169 for (i = 0; i < m->m_max_mds; i++)
170 kfree(m->m_info[i].export_targets);
171 kfree(m->m_info);
172 kfree(m->m_data_pg_pools);
173 kfree(m);
174}
diff --git a/fs/ceph/mdsmap.h b/fs/ceph/mdsmap.h
new file mode 100644
index 000000000000..eacc131aa5cb
--- /dev/null
+++ b/fs/ceph/mdsmap.h
@@ -0,0 +1,54 @@
1#ifndef _FS_CEPH_MDSMAP_H
2#define _FS_CEPH_MDSMAP_H
3
4#include "types.h"
5
6/*
7 * mds map - describe servers in the mds cluster.
8 *
9 * we limit fields to those the client actually xcares about
10 */
11struct ceph_mds_info {
12 u64 global_id;
13 struct ceph_entity_addr addr;
14 s32 state;
15 int num_export_targets;
16 u32 *export_targets;
17};
18
19struct ceph_mdsmap {
20 u32 m_epoch, m_client_epoch, m_last_failure;
21 u32 m_root;
22 u32 m_session_timeout; /* seconds */
23 u32 m_session_autoclose; /* seconds */
24 u64 m_max_file_size;
25 u32 m_max_mds; /* size of m_addr, m_state arrays */
26 struct ceph_mds_info *m_info;
27
28 /* which object pools file data can be stored in */
29 int m_num_data_pg_pools;
30 u32 *m_data_pg_pools;
31 u32 m_cas_pg_pool;
32};
33
34static inline struct ceph_entity_addr *
35ceph_mdsmap_get_addr(struct ceph_mdsmap *m, int w)
36{
37 if (w >= m->m_max_mds)
38 return NULL;
39 return &m->m_info[w].addr;
40}
41
42static inline int ceph_mdsmap_get_state(struct ceph_mdsmap *m, int w)
43{
44 BUG_ON(w < 0);
45 if (w >= m->m_max_mds)
46 return CEPH_MDS_STATE_DNE;
47 return m->m_info[w].state;
48}
49
50extern int ceph_mdsmap_get_random_mds(struct ceph_mdsmap *m);
51extern struct ceph_mdsmap *ceph_mdsmap_decode(void **p, void *end);
52extern void ceph_mdsmap_destroy(struct ceph_mdsmap *m);
53
54#endif
diff --git a/fs/ceph/messenger.c b/fs/ceph/messenger.c
new file mode 100644
index 000000000000..781656a49bf8
--- /dev/null
+++ b/fs/ceph/messenger.c
@@ -0,0 +1,2240 @@
1#include "ceph_debug.h"
2
3#include <linux/crc32c.h>
4#include <linux/ctype.h>
5#include <linux/highmem.h>
6#include <linux/inet.h>
7#include <linux/kthread.h>
8#include <linux/net.h>
9#include <linux/socket.h>
10#include <linux/string.h>
11#include <net/tcp.h>
12
13#include "super.h"
14#include "messenger.h"
15#include "decode.h"
16#include "pagelist.h"
17
18/*
19 * Ceph uses the messenger to exchange ceph_msg messages with other
20 * hosts in the system. The messenger provides ordered and reliable
21 * delivery. We tolerate TCP disconnects by reconnecting (with
22 * exponential backoff) in the case of a fault (disconnection, bad
23 * crc, protocol error). Acks allow sent messages to be discarded by
24 * the sender.
25 */
26
27/* static tag bytes (protocol control messages) */
28static char tag_msg = CEPH_MSGR_TAG_MSG;
29static char tag_ack = CEPH_MSGR_TAG_ACK;
30static char tag_keepalive = CEPH_MSGR_TAG_KEEPALIVE;
31
32
33static void queue_con(struct ceph_connection *con);
34static void con_work(struct work_struct *);
35static void ceph_fault(struct ceph_connection *con);
36
37const char *ceph_name_type_str(int t)
38{
39 switch (t) {
40 case CEPH_ENTITY_TYPE_MON: return "mon";
41 case CEPH_ENTITY_TYPE_MDS: return "mds";
42 case CEPH_ENTITY_TYPE_OSD: return "osd";
43 case CEPH_ENTITY_TYPE_CLIENT: return "client";
44 case CEPH_ENTITY_TYPE_ADMIN: return "admin";
45 default: return "???";
46 }
47}
48
49/*
50 * nicely render a sockaddr as a string.
51 */
52#define MAX_ADDR_STR 20
53static char addr_str[MAX_ADDR_STR][40];
54static DEFINE_SPINLOCK(addr_str_lock);
55static int last_addr_str;
56
57const char *pr_addr(const struct sockaddr_storage *ss)
58{
59 int i;
60 char *s;
61 struct sockaddr_in *in4 = (void *)ss;
62 unsigned char *quad = (void *)&in4->sin_addr.s_addr;
63 struct sockaddr_in6 *in6 = (void *)ss;
64
65 spin_lock(&addr_str_lock);
66 i = last_addr_str++;
67 if (last_addr_str == MAX_ADDR_STR)
68 last_addr_str = 0;
69 spin_unlock(&addr_str_lock);
70 s = addr_str[i];
71
72 switch (ss->ss_family) {
73 case AF_INET:
74 sprintf(s, "%u.%u.%u.%u:%u",
75 (unsigned int)quad[0],
76 (unsigned int)quad[1],
77 (unsigned int)quad[2],
78 (unsigned int)quad[3],
79 (unsigned int)ntohs(in4->sin_port));
80 break;
81
82 case AF_INET6:
83 sprintf(s, "%04x:%04x:%04x:%04x:%04x:%04x:%04x:%04x:%u",
84 in6->sin6_addr.s6_addr16[0],
85 in6->sin6_addr.s6_addr16[1],
86 in6->sin6_addr.s6_addr16[2],
87 in6->sin6_addr.s6_addr16[3],
88 in6->sin6_addr.s6_addr16[4],
89 in6->sin6_addr.s6_addr16[5],
90 in6->sin6_addr.s6_addr16[6],
91 in6->sin6_addr.s6_addr16[7],
92 (unsigned int)ntohs(in6->sin6_port));
93 break;
94
95 default:
96 sprintf(s, "(unknown sockaddr family %d)", (int)ss->ss_family);
97 }
98
99 return s;
100}
101
102static void encode_my_addr(struct ceph_messenger *msgr)
103{
104 memcpy(&msgr->my_enc_addr, &msgr->inst.addr, sizeof(msgr->my_enc_addr));
105 ceph_encode_addr(&msgr->my_enc_addr);
106}
107
108/*
109 * work queue for all reading and writing to/from the socket.
110 */
111struct workqueue_struct *ceph_msgr_wq;
112
113int __init ceph_msgr_init(void)
114{
115 ceph_msgr_wq = create_workqueue("ceph-msgr");
116 if (IS_ERR(ceph_msgr_wq)) {
117 int ret = PTR_ERR(ceph_msgr_wq);
118 pr_err("msgr_init failed to create workqueue: %d\n", ret);
119 ceph_msgr_wq = NULL;
120 return ret;
121 }
122 return 0;
123}
124
125void ceph_msgr_exit(void)
126{
127 destroy_workqueue(ceph_msgr_wq);
128}
129
130/*
131 * socket callback functions
132 */
133
134/* data available on socket, or listen socket received a connect */
135static void ceph_data_ready(struct sock *sk, int count_unused)
136{
137 struct ceph_connection *con =
138 (struct ceph_connection *)sk->sk_user_data;
139 if (sk->sk_state != TCP_CLOSE_WAIT) {
140 dout("ceph_data_ready on %p state = %lu, queueing work\n",
141 con, con->state);
142 queue_con(con);
143 }
144}
145
146/* socket has buffer space for writing */
147static void ceph_write_space(struct sock *sk)
148{
149 struct ceph_connection *con =
150 (struct ceph_connection *)sk->sk_user_data;
151
152 /* only queue to workqueue if there is data we want to write. */
153 if (test_bit(WRITE_PENDING, &con->state)) {
154 dout("ceph_write_space %p queueing write work\n", con);
155 queue_con(con);
156 } else {
157 dout("ceph_write_space %p nothing to write\n", con);
158 }
159
160 /* since we have our own write_space, clear the SOCK_NOSPACE flag */
161 clear_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
162}
163
164/* socket's state has changed */
165static void ceph_state_change(struct sock *sk)
166{
167 struct ceph_connection *con =
168 (struct ceph_connection *)sk->sk_user_data;
169
170 dout("ceph_state_change %p state = %lu sk_state = %u\n",
171 con, con->state, sk->sk_state);
172
173 if (test_bit(CLOSED, &con->state))
174 return;
175
176 switch (sk->sk_state) {
177 case TCP_CLOSE:
178 dout("ceph_state_change TCP_CLOSE\n");
179 case TCP_CLOSE_WAIT:
180 dout("ceph_state_change TCP_CLOSE_WAIT\n");
181 if (test_and_set_bit(SOCK_CLOSED, &con->state) == 0) {
182 if (test_bit(CONNECTING, &con->state))
183 con->error_msg = "connection failed";
184 else
185 con->error_msg = "socket closed";
186 queue_con(con);
187 }
188 break;
189 case TCP_ESTABLISHED:
190 dout("ceph_state_change TCP_ESTABLISHED\n");
191 queue_con(con);
192 break;
193 }
194}
195
196/*
197 * set up socket callbacks
198 */
199static void set_sock_callbacks(struct socket *sock,
200 struct ceph_connection *con)
201{
202 struct sock *sk = sock->sk;
203 sk->sk_user_data = (void *)con;
204 sk->sk_data_ready = ceph_data_ready;
205 sk->sk_write_space = ceph_write_space;
206 sk->sk_state_change = ceph_state_change;
207}
208
209
210/*
211 * socket helpers
212 */
213
214/*
215 * initiate connection to a remote socket.
216 */
217static struct socket *ceph_tcp_connect(struct ceph_connection *con)
218{
219 struct sockaddr *paddr = (struct sockaddr *)&con->peer_addr.in_addr;
220 struct socket *sock;
221 int ret;
222
223 BUG_ON(con->sock);
224 ret = sock_create_kern(AF_INET, SOCK_STREAM, IPPROTO_TCP, &sock);
225 if (ret)
226 return ERR_PTR(ret);
227 con->sock = sock;
228 sock->sk->sk_allocation = GFP_NOFS;
229
230 set_sock_callbacks(sock, con);
231
232 dout("connect %s\n", pr_addr(&con->peer_addr.in_addr));
233
234 ret = sock->ops->connect(sock, paddr, sizeof(*paddr), O_NONBLOCK);
235 if (ret == -EINPROGRESS) {
236 dout("connect %s EINPROGRESS sk_state = %u\n",
237 pr_addr(&con->peer_addr.in_addr),
238 sock->sk->sk_state);
239 ret = 0;
240 }
241 if (ret < 0) {
242 pr_err("connect %s error %d\n",
243 pr_addr(&con->peer_addr.in_addr), ret);
244 sock_release(sock);
245 con->sock = NULL;
246 con->error_msg = "connect error";
247 }
248
249 if (ret < 0)
250 return ERR_PTR(ret);
251 return sock;
252}
253
254static int ceph_tcp_recvmsg(struct socket *sock, void *buf, size_t len)
255{
256 struct kvec iov = {buf, len};
257 struct msghdr msg = { .msg_flags = MSG_DONTWAIT | MSG_NOSIGNAL };
258
259 return kernel_recvmsg(sock, &msg, &iov, 1, len, msg.msg_flags);
260}
261
262/*
263 * write something. @more is true if caller will be sending more data
264 * shortly.
265 */
266static int ceph_tcp_sendmsg(struct socket *sock, struct kvec *iov,
267 size_t kvlen, size_t len, int more)
268{
269 struct msghdr msg = { .msg_flags = MSG_DONTWAIT | MSG_NOSIGNAL };
270
271 if (more)
272 msg.msg_flags |= MSG_MORE;
273 else
274 msg.msg_flags |= MSG_EOR; /* superfluous, but what the hell */
275
276 return kernel_sendmsg(sock, &msg, iov, kvlen, len);
277}
278
279
280/*
281 * Shutdown/close the socket for the given connection.
282 */
283static int con_close_socket(struct ceph_connection *con)
284{
285 int rc;
286
287 dout("con_close_socket on %p sock %p\n", con, con->sock);
288 if (!con->sock)
289 return 0;
290 set_bit(SOCK_CLOSED, &con->state);
291 rc = con->sock->ops->shutdown(con->sock, SHUT_RDWR);
292 sock_release(con->sock);
293 con->sock = NULL;
294 clear_bit(SOCK_CLOSED, &con->state);
295 return rc;
296}
297
298/*
299 * Reset a connection. Discard all incoming and outgoing messages
300 * and clear *_seq state.
301 */
302static void ceph_msg_remove(struct ceph_msg *msg)
303{
304 list_del_init(&msg->list_head);
305 ceph_msg_put(msg);
306}
307static void ceph_msg_remove_list(struct list_head *head)
308{
309 while (!list_empty(head)) {
310 struct ceph_msg *msg = list_first_entry(head, struct ceph_msg,
311 list_head);
312 ceph_msg_remove(msg);
313 }
314}
315
316static void reset_connection(struct ceph_connection *con)
317{
318 /* reset connection, out_queue, msg_ and connect_seq */
319 /* discard existing out_queue and msg_seq */
320 ceph_msg_remove_list(&con->out_queue);
321 ceph_msg_remove_list(&con->out_sent);
322
323 if (con->in_msg) {
324 ceph_msg_put(con->in_msg);
325 con->in_msg = NULL;
326 }
327
328 con->connect_seq = 0;
329 con->out_seq = 0;
330 if (con->out_msg) {
331 ceph_msg_put(con->out_msg);
332 con->out_msg = NULL;
333 }
334 con->in_seq = 0;
335}
336
337/*
338 * mark a peer down. drop any open connections.
339 */
340void ceph_con_close(struct ceph_connection *con)
341{
342 dout("con_close %p peer %s\n", con, pr_addr(&con->peer_addr.in_addr));
343 set_bit(CLOSED, &con->state); /* in case there's queued work */
344 clear_bit(STANDBY, &con->state); /* avoid connect_seq bump */
345 clear_bit(LOSSYTX, &con->state); /* so we retry next connect */
346 clear_bit(KEEPALIVE_PENDING, &con->state);
347 clear_bit(WRITE_PENDING, &con->state);
348 mutex_lock(&con->mutex);
349 reset_connection(con);
350 cancel_delayed_work(&con->work);
351 mutex_unlock(&con->mutex);
352 queue_con(con);
353}
354
355/*
356 * Reopen a closed connection, with a new peer address.
357 */
358void ceph_con_open(struct ceph_connection *con, struct ceph_entity_addr *addr)
359{
360 dout("con_open %p %s\n", con, pr_addr(&addr->in_addr));
361 set_bit(OPENING, &con->state);
362 clear_bit(CLOSED, &con->state);
363 memcpy(&con->peer_addr, addr, sizeof(*addr));
364 con->delay = 0; /* reset backoff memory */
365 queue_con(con);
366}
367
368/*
369 * generic get/put
370 */
371struct ceph_connection *ceph_con_get(struct ceph_connection *con)
372{
373 dout("con_get %p nref = %d -> %d\n", con,
374 atomic_read(&con->nref), atomic_read(&con->nref) + 1);
375 if (atomic_inc_not_zero(&con->nref))
376 return con;
377 return NULL;
378}
379
380void ceph_con_put(struct ceph_connection *con)
381{
382 dout("con_put %p nref = %d -> %d\n", con,
383 atomic_read(&con->nref), atomic_read(&con->nref) - 1);
384 BUG_ON(atomic_read(&con->nref) == 0);
385 if (atomic_dec_and_test(&con->nref)) {
386 BUG_ON(con->sock);
387 kfree(con);
388 }
389}
390
391/*
392 * initialize a new connection.
393 */
394void ceph_con_init(struct ceph_messenger *msgr, struct ceph_connection *con)
395{
396 dout("con_init %p\n", con);
397 memset(con, 0, sizeof(*con));
398 atomic_set(&con->nref, 1);
399 con->msgr = msgr;
400 mutex_init(&con->mutex);
401 INIT_LIST_HEAD(&con->out_queue);
402 INIT_LIST_HEAD(&con->out_sent);
403 INIT_DELAYED_WORK(&con->work, con_work);
404}
405
406
407/*
408 * We maintain a global counter to order connection attempts. Get
409 * a unique seq greater than @gt.
410 */
411static u32 get_global_seq(struct ceph_messenger *msgr, u32 gt)
412{
413 u32 ret;
414
415 spin_lock(&msgr->global_seq_lock);
416 if (msgr->global_seq < gt)
417 msgr->global_seq = gt;
418 ret = ++msgr->global_seq;
419 spin_unlock(&msgr->global_seq_lock);
420 return ret;
421}
422
423
424/*
425 * Prepare footer for currently outgoing message, and finish things
426 * off. Assumes out_kvec* are already valid.. we just add on to the end.
427 */
428static void prepare_write_message_footer(struct ceph_connection *con, int v)
429{
430 struct ceph_msg *m = con->out_msg;
431
432 dout("prepare_write_message_footer %p\n", con);
433 con->out_kvec_is_msg = true;
434 con->out_kvec[v].iov_base = &m->footer;
435 con->out_kvec[v].iov_len = sizeof(m->footer);
436 con->out_kvec_bytes += sizeof(m->footer);
437 con->out_kvec_left++;
438 con->out_more = m->more_to_follow;
439 con->out_msg_done = true;
440}
441
442/*
443 * Prepare headers for the next outgoing message.
444 */
445static void prepare_write_message(struct ceph_connection *con)
446{
447 struct ceph_msg *m;
448 int v = 0;
449
450 con->out_kvec_bytes = 0;
451 con->out_kvec_is_msg = true;
452 con->out_msg_done = false;
453
454 /* Sneak an ack in there first? If we can get it into the same
455 * TCP packet that's a good thing. */
456 if (con->in_seq > con->in_seq_acked) {
457 con->in_seq_acked = con->in_seq;
458 con->out_kvec[v].iov_base = &tag_ack;
459 con->out_kvec[v++].iov_len = 1;
460 con->out_temp_ack = cpu_to_le64(con->in_seq_acked);
461 con->out_kvec[v].iov_base = &con->out_temp_ack;
462 con->out_kvec[v++].iov_len = sizeof(con->out_temp_ack);
463 con->out_kvec_bytes = 1 + sizeof(con->out_temp_ack);
464 }
465
466 m = list_first_entry(&con->out_queue,
467 struct ceph_msg, list_head);
468 con->out_msg = m;
469 if (test_bit(LOSSYTX, &con->state)) {
470 list_del_init(&m->list_head);
471 } else {
472 /* put message on sent list */
473 ceph_msg_get(m);
474 list_move_tail(&m->list_head, &con->out_sent);
475 }
476
477 m->hdr.seq = cpu_to_le64(++con->out_seq);
478
479 dout("prepare_write_message %p seq %lld type %d len %d+%d+%d %d pgs\n",
480 m, con->out_seq, le16_to_cpu(m->hdr.type),
481 le32_to_cpu(m->hdr.front_len), le32_to_cpu(m->hdr.middle_len),
482 le32_to_cpu(m->hdr.data_len),
483 m->nr_pages);
484 BUG_ON(le32_to_cpu(m->hdr.front_len) != m->front.iov_len);
485
486 /* tag + hdr + front + middle */
487 con->out_kvec[v].iov_base = &tag_msg;
488 con->out_kvec[v++].iov_len = 1;
489 con->out_kvec[v].iov_base = &m->hdr;
490 con->out_kvec[v++].iov_len = sizeof(m->hdr);
491 con->out_kvec[v++] = m->front;
492 if (m->middle)
493 con->out_kvec[v++] = m->middle->vec;
494 con->out_kvec_left = v;
495 con->out_kvec_bytes += 1 + sizeof(m->hdr) + m->front.iov_len +
496 (m->middle ? m->middle->vec.iov_len : 0);
497 con->out_kvec_cur = con->out_kvec;
498
499 /* fill in crc (except data pages), footer */
500 con->out_msg->hdr.crc =
501 cpu_to_le32(crc32c(0, (void *)&m->hdr,
502 sizeof(m->hdr) - sizeof(m->hdr.crc)));
503 con->out_msg->footer.flags = CEPH_MSG_FOOTER_COMPLETE;
504 con->out_msg->footer.front_crc =
505 cpu_to_le32(crc32c(0, m->front.iov_base, m->front.iov_len));
506 if (m->middle)
507 con->out_msg->footer.middle_crc =
508 cpu_to_le32(crc32c(0, m->middle->vec.iov_base,
509 m->middle->vec.iov_len));
510 else
511 con->out_msg->footer.middle_crc = 0;
512 con->out_msg->footer.data_crc = 0;
513 dout("prepare_write_message front_crc %u data_crc %u\n",
514 le32_to_cpu(con->out_msg->footer.front_crc),
515 le32_to_cpu(con->out_msg->footer.middle_crc));
516
517 /* is there a data payload? */
518 if (le32_to_cpu(m->hdr.data_len) > 0) {
519 /* initialize page iterator */
520 con->out_msg_pos.page = 0;
521 con->out_msg_pos.page_pos =
522 le16_to_cpu(m->hdr.data_off) & ~PAGE_MASK;
523 con->out_msg_pos.data_pos = 0;
524 con->out_msg_pos.did_page_crc = 0;
525 con->out_more = 1; /* data + footer will follow */
526 } else {
527 /* no, queue up footer too and be done */
528 prepare_write_message_footer(con, v);
529 }
530
531 set_bit(WRITE_PENDING, &con->state);
532}
533
534/*
535 * Prepare an ack.
536 */
537static void prepare_write_ack(struct ceph_connection *con)
538{
539 dout("prepare_write_ack %p %llu -> %llu\n", con,
540 con->in_seq_acked, con->in_seq);
541 con->in_seq_acked = con->in_seq;
542
543 con->out_kvec[0].iov_base = &tag_ack;
544 con->out_kvec[0].iov_len = 1;
545 con->out_temp_ack = cpu_to_le64(con->in_seq_acked);
546 con->out_kvec[1].iov_base = &con->out_temp_ack;
547 con->out_kvec[1].iov_len = sizeof(con->out_temp_ack);
548 con->out_kvec_left = 2;
549 con->out_kvec_bytes = 1 + sizeof(con->out_temp_ack);
550 con->out_kvec_cur = con->out_kvec;
551 con->out_more = 1; /* more will follow.. eventually.. */
552 set_bit(WRITE_PENDING, &con->state);
553}
554
555/*
556 * Prepare to write keepalive byte.
557 */
558static void prepare_write_keepalive(struct ceph_connection *con)
559{
560 dout("prepare_write_keepalive %p\n", con);
561 con->out_kvec[0].iov_base = &tag_keepalive;
562 con->out_kvec[0].iov_len = 1;
563 con->out_kvec_left = 1;
564 con->out_kvec_bytes = 1;
565 con->out_kvec_cur = con->out_kvec;
566 set_bit(WRITE_PENDING, &con->state);
567}
568
569/*
570 * Connection negotiation.
571 */
572
573static void prepare_connect_authorizer(struct ceph_connection *con)
574{
575 void *auth_buf;
576 int auth_len = 0;
577 int auth_protocol = 0;
578
579 mutex_unlock(&con->mutex);
580 if (con->ops->get_authorizer)
581 con->ops->get_authorizer(con, &auth_buf, &auth_len,
582 &auth_protocol, &con->auth_reply_buf,
583 &con->auth_reply_buf_len,
584 con->auth_retry);
585 mutex_lock(&con->mutex);
586
587 con->out_connect.authorizer_protocol = cpu_to_le32(auth_protocol);
588 con->out_connect.authorizer_len = cpu_to_le32(auth_len);
589
590 con->out_kvec[con->out_kvec_left].iov_base = auth_buf;
591 con->out_kvec[con->out_kvec_left].iov_len = auth_len;
592 con->out_kvec_left++;
593 con->out_kvec_bytes += auth_len;
594}
595
596/*
597 * We connected to a peer and are saying hello.
598 */
599static void prepare_write_banner(struct ceph_messenger *msgr,
600 struct ceph_connection *con)
601{
602 int len = strlen(CEPH_BANNER);
603
604 con->out_kvec[0].iov_base = CEPH_BANNER;
605 con->out_kvec[0].iov_len = len;
606 con->out_kvec[1].iov_base = &msgr->my_enc_addr;
607 con->out_kvec[1].iov_len = sizeof(msgr->my_enc_addr);
608 con->out_kvec_left = 2;
609 con->out_kvec_bytes = len + sizeof(msgr->my_enc_addr);
610 con->out_kvec_cur = con->out_kvec;
611 con->out_more = 0;
612 set_bit(WRITE_PENDING, &con->state);
613}
614
615static void prepare_write_connect(struct ceph_messenger *msgr,
616 struct ceph_connection *con,
617 int after_banner)
618{
619 unsigned global_seq = get_global_seq(con->msgr, 0);
620 int proto;
621
622 switch (con->peer_name.type) {
623 case CEPH_ENTITY_TYPE_MON:
624 proto = CEPH_MONC_PROTOCOL;
625 break;
626 case CEPH_ENTITY_TYPE_OSD:
627 proto = CEPH_OSDC_PROTOCOL;
628 break;
629 case CEPH_ENTITY_TYPE_MDS:
630 proto = CEPH_MDSC_PROTOCOL;
631 break;
632 default:
633 BUG();
634 }
635
636 dout("prepare_write_connect %p cseq=%d gseq=%d proto=%d\n", con,
637 con->connect_seq, global_seq, proto);
638
639 con->out_connect.features = CEPH_FEATURE_SUPPORTED;
640 con->out_connect.host_type = cpu_to_le32(CEPH_ENTITY_TYPE_CLIENT);
641 con->out_connect.connect_seq = cpu_to_le32(con->connect_seq);
642 con->out_connect.global_seq = cpu_to_le32(global_seq);
643 con->out_connect.protocol_version = cpu_to_le32(proto);
644 con->out_connect.flags = 0;
645
646 if (!after_banner) {
647 con->out_kvec_left = 0;
648 con->out_kvec_bytes = 0;
649 }
650 con->out_kvec[con->out_kvec_left].iov_base = &con->out_connect;
651 con->out_kvec[con->out_kvec_left].iov_len = sizeof(con->out_connect);
652 con->out_kvec_left++;
653 con->out_kvec_bytes += sizeof(con->out_connect);
654 con->out_kvec_cur = con->out_kvec;
655 con->out_more = 0;
656 set_bit(WRITE_PENDING, &con->state);
657
658 prepare_connect_authorizer(con);
659}
660
661
662/*
663 * write as much of pending kvecs to the socket as we can.
664 * 1 -> done
665 * 0 -> socket full, but more to do
666 * <0 -> error
667 */
668static int write_partial_kvec(struct ceph_connection *con)
669{
670 int ret;
671
672 dout("write_partial_kvec %p %d left\n", con, con->out_kvec_bytes);
673 while (con->out_kvec_bytes > 0) {
674 ret = ceph_tcp_sendmsg(con->sock, con->out_kvec_cur,
675 con->out_kvec_left, con->out_kvec_bytes,
676 con->out_more);
677 if (ret <= 0)
678 goto out;
679 con->out_kvec_bytes -= ret;
680 if (con->out_kvec_bytes == 0)
681 break; /* done */
682 while (ret > 0) {
683 if (ret >= con->out_kvec_cur->iov_len) {
684 ret -= con->out_kvec_cur->iov_len;
685 con->out_kvec_cur++;
686 con->out_kvec_left--;
687 } else {
688 con->out_kvec_cur->iov_len -= ret;
689 con->out_kvec_cur->iov_base += ret;
690 ret = 0;
691 break;
692 }
693 }
694 }
695 con->out_kvec_left = 0;
696 con->out_kvec_is_msg = false;
697 ret = 1;
698out:
699 dout("write_partial_kvec %p %d left in %d kvecs ret = %d\n", con,
700 con->out_kvec_bytes, con->out_kvec_left, ret);
701 return ret; /* done! */
702}
703
704/*
705 * Write as much message data payload as we can. If we finish, queue
706 * up the footer.
707 * 1 -> done, footer is now queued in out_kvec[].
708 * 0 -> socket full, but more to do
709 * <0 -> error
710 */
711static int write_partial_msg_pages(struct ceph_connection *con)
712{
713 struct ceph_msg *msg = con->out_msg;
714 unsigned data_len = le32_to_cpu(msg->hdr.data_len);
715 size_t len;
716 int crc = con->msgr->nocrc;
717 int ret;
718
719 dout("write_partial_msg_pages %p msg %p page %d/%d offset %d\n",
720 con, con->out_msg, con->out_msg_pos.page, con->out_msg->nr_pages,
721 con->out_msg_pos.page_pos);
722
723 while (con->out_msg_pos.page < con->out_msg->nr_pages) {
724 struct page *page = NULL;
725 void *kaddr = NULL;
726
727 /*
728 * if we are calculating the data crc (the default), we need
729 * to map the page. if our pages[] has been revoked, use the
730 * zero page.
731 */
732 if (msg->pages) {
733 page = msg->pages[con->out_msg_pos.page];
734 if (crc)
735 kaddr = kmap(page);
736 } else if (msg->pagelist) {
737 page = list_first_entry(&msg->pagelist->head,
738 struct page, lru);
739 if (crc)
740 kaddr = kmap(page);
741 } else {
742 page = con->msgr->zero_page;
743 if (crc)
744 kaddr = page_address(con->msgr->zero_page);
745 }
746 len = min((int)(PAGE_SIZE - con->out_msg_pos.page_pos),
747 (int)(data_len - con->out_msg_pos.data_pos));
748 if (crc && !con->out_msg_pos.did_page_crc) {
749 void *base = kaddr + con->out_msg_pos.page_pos;
750 u32 tmpcrc = le32_to_cpu(con->out_msg->footer.data_crc);
751
752 BUG_ON(kaddr == NULL);
753 con->out_msg->footer.data_crc =
754 cpu_to_le32(crc32c(tmpcrc, base, len));
755 con->out_msg_pos.did_page_crc = 1;
756 }
757
758 ret = kernel_sendpage(con->sock, page,
759 con->out_msg_pos.page_pos, len,
760 MSG_DONTWAIT | MSG_NOSIGNAL |
761 MSG_MORE);
762
763 if (crc && (msg->pages || msg->pagelist))
764 kunmap(page);
765
766 if (ret <= 0)
767 goto out;
768
769 con->out_msg_pos.data_pos += ret;
770 con->out_msg_pos.page_pos += ret;
771 if (ret == len) {
772 con->out_msg_pos.page_pos = 0;
773 con->out_msg_pos.page++;
774 con->out_msg_pos.did_page_crc = 0;
775 if (msg->pagelist)
776 list_move_tail(&page->lru,
777 &msg->pagelist->head);
778 }
779 }
780
781 dout("write_partial_msg_pages %p msg %p done\n", con, msg);
782
783 /* prepare and queue up footer, too */
784 if (!crc)
785 con->out_msg->footer.flags |= CEPH_MSG_FOOTER_NOCRC;
786 con->out_kvec_bytes = 0;
787 con->out_kvec_left = 0;
788 con->out_kvec_cur = con->out_kvec;
789 prepare_write_message_footer(con, 0);
790 ret = 1;
791out:
792 return ret;
793}
794
795/*
796 * write some zeros
797 */
798static int write_partial_skip(struct ceph_connection *con)
799{
800 int ret;
801
802 while (con->out_skip > 0) {
803 struct kvec iov = {
804 .iov_base = page_address(con->msgr->zero_page),
805 .iov_len = min(con->out_skip, (int)PAGE_CACHE_SIZE)
806 };
807
808 ret = ceph_tcp_sendmsg(con->sock, &iov, 1, iov.iov_len, 1);
809 if (ret <= 0)
810 goto out;
811 con->out_skip -= ret;
812 }
813 ret = 1;
814out:
815 return ret;
816}
817
818/*
819 * Prepare to read connection handshake, or an ack.
820 */
821static void prepare_read_banner(struct ceph_connection *con)
822{
823 dout("prepare_read_banner %p\n", con);
824 con->in_base_pos = 0;
825}
826
827static void prepare_read_connect(struct ceph_connection *con)
828{
829 dout("prepare_read_connect %p\n", con);
830 con->in_base_pos = 0;
831}
832
833static void prepare_read_connect_retry(struct ceph_connection *con)
834{
835 dout("prepare_read_connect_retry %p\n", con);
836 con->in_base_pos = strlen(CEPH_BANNER) + sizeof(con->actual_peer_addr)
837 + sizeof(con->peer_addr_for_me);
838}
839
840static void prepare_read_ack(struct ceph_connection *con)
841{
842 dout("prepare_read_ack %p\n", con);
843 con->in_base_pos = 0;
844}
845
846static void prepare_read_tag(struct ceph_connection *con)
847{
848 dout("prepare_read_tag %p\n", con);
849 con->in_base_pos = 0;
850 con->in_tag = CEPH_MSGR_TAG_READY;
851}
852
853/*
854 * Prepare to read a message.
855 */
856static int prepare_read_message(struct ceph_connection *con)
857{
858 dout("prepare_read_message %p\n", con);
859 BUG_ON(con->in_msg != NULL);
860 con->in_base_pos = 0;
861 con->in_front_crc = con->in_middle_crc = con->in_data_crc = 0;
862 return 0;
863}
864
865
866static int read_partial(struct ceph_connection *con,
867 int *to, int size, void *object)
868{
869 *to += size;
870 while (con->in_base_pos < *to) {
871 int left = *to - con->in_base_pos;
872 int have = size - left;
873 int ret = ceph_tcp_recvmsg(con->sock, object + have, left);
874 if (ret <= 0)
875 return ret;
876 con->in_base_pos += ret;
877 }
878 return 1;
879}
880
881
882/*
883 * Read all or part of the connect-side handshake on a new connection
884 */
885static int read_partial_banner(struct ceph_connection *con)
886{
887 int ret, to = 0;
888
889 dout("read_partial_banner %p at %d\n", con, con->in_base_pos);
890
891 /* peer's banner */
892 ret = read_partial(con, &to, strlen(CEPH_BANNER), con->in_banner);
893 if (ret <= 0)
894 goto out;
895 ret = read_partial(con, &to, sizeof(con->actual_peer_addr),
896 &con->actual_peer_addr);
897 if (ret <= 0)
898 goto out;
899 ret = read_partial(con, &to, sizeof(con->peer_addr_for_me),
900 &con->peer_addr_for_me);
901 if (ret <= 0)
902 goto out;
903out:
904 return ret;
905}
906
907static int read_partial_connect(struct ceph_connection *con)
908{
909 int ret, to = 0;
910
911 dout("read_partial_connect %p at %d\n", con, con->in_base_pos);
912
913 ret = read_partial(con, &to, sizeof(con->in_reply), &con->in_reply);
914 if (ret <= 0)
915 goto out;
916 ret = read_partial(con, &to, le32_to_cpu(con->in_reply.authorizer_len),
917 con->auth_reply_buf);
918 if (ret <= 0)
919 goto out;
920
921 dout("read_partial_connect %p tag %d, con_seq = %u, g_seq = %u\n",
922 con, (int)con->in_reply.tag,
923 le32_to_cpu(con->in_reply.connect_seq),
924 le32_to_cpu(con->in_reply.global_seq));
925out:
926 return ret;
927
928}
929
930/*
931 * Verify the hello banner looks okay.
932 */
933static int verify_hello(struct ceph_connection *con)
934{
935 if (memcmp(con->in_banner, CEPH_BANNER, strlen(CEPH_BANNER))) {
936 pr_err("connect to %s got bad banner\n",
937 pr_addr(&con->peer_addr.in_addr));
938 con->error_msg = "protocol error, bad banner";
939 return -1;
940 }
941 return 0;
942}
943
944static bool addr_is_blank(struct sockaddr_storage *ss)
945{
946 switch (ss->ss_family) {
947 case AF_INET:
948 return ((struct sockaddr_in *)ss)->sin_addr.s_addr == 0;
949 case AF_INET6:
950 return
951 ((struct sockaddr_in6 *)ss)->sin6_addr.s6_addr32[0] == 0 &&
952 ((struct sockaddr_in6 *)ss)->sin6_addr.s6_addr32[1] == 0 &&
953 ((struct sockaddr_in6 *)ss)->sin6_addr.s6_addr32[2] == 0 &&
954 ((struct sockaddr_in6 *)ss)->sin6_addr.s6_addr32[3] == 0;
955 }
956 return false;
957}
958
959static int addr_port(struct sockaddr_storage *ss)
960{
961 switch (ss->ss_family) {
962 case AF_INET:
963 return ntohs(((struct sockaddr_in *)ss)->sin_port);
964 case AF_INET6:
965 return ntohs(((struct sockaddr_in6 *)ss)->sin6_port);
966 }
967 return 0;
968}
969
970static void addr_set_port(struct sockaddr_storage *ss, int p)
971{
972 switch (ss->ss_family) {
973 case AF_INET:
974 ((struct sockaddr_in *)ss)->sin_port = htons(p);
975 case AF_INET6:
976 ((struct sockaddr_in6 *)ss)->sin6_port = htons(p);
977 }
978}
979
980/*
981 * Parse an ip[:port] list into an addr array. Use the default
982 * monitor port if a port isn't specified.
983 */
984int ceph_parse_ips(const char *c, const char *end,
985 struct ceph_entity_addr *addr,
986 int max_count, int *count)
987{
988 int i;
989 const char *p = c;
990
991 dout("parse_ips on '%.*s'\n", (int)(end-c), c);
992 for (i = 0; i < max_count; i++) {
993 const char *ipend;
994 struct sockaddr_storage *ss = &addr[i].in_addr;
995 struct sockaddr_in *in4 = (void *)ss;
996 struct sockaddr_in6 *in6 = (void *)ss;
997 int port;
998
999 memset(ss, 0, sizeof(*ss));
1000 if (in4_pton(p, end - p, (u8 *)&in4->sin_addr.s_addr,
1001 ',', &ipend)) {
1002 ss->ss_family = AF_INET;
1003 } else if (in6_pton(p, end - p, (u8 *)&in6->sin6_addr.s6_addr,
1004 ',', &ipend)) {
1005 ss->ss_family = AF_INET6;
1006 } else {
1007 goto bad;
1008 }
1009 p = ipend;
1010
1011 /* port? */
1012 if (p < end && *p == ':') {
1013 port = 0;
1014 p++;
1015 while (p < end && *p >= '0' && *p <= '9') {
1016 port = (port * 10) + (*p - '0');
1017 p++;
1018 }
1019 if (port > 65535 || port == 0)
1020 goto bad;
1021 } else {
1022 port = CEPH_MON_PORT;
1023 }
1024
1025 addr_set_port(ss, port);
1026
1027 dout("parse_ips got %s\n", pr_addr(ss));
1028
1029 if (p == end)
1030 break;
1031 if (*p != ',')
1032 goto bad;
1033 p++;
1034 }
1035
1036 if (p != end)
1037 goto bad;
1038
1039 if (count)
1040 *count = i + 1;
1041 return 0;
1042
1043bad:
1044 pr_err("parse_ips bad ip '%s'\n", c);
1045 return -EINVAL;
1046}
1047
1048static int process_banner(struct ceph_connection *con)
1049{
1050 dout("process_banner on %p\n", con);
1051
1052 if (verify_hello(con) < 0)
1053 return -1;
1054
1055 ceph_decode_addr(&con->actual_peer_addr);
1056 ceph_decode_addr(&con->peer_addr_for_me);
1057
1058 /*
1059 * Make sure the other end is who we wanted. note that the other
1060 * end may not yet know their ip address, so if it's 0.0.0.0, give
1061 * them the benefit of the doubt.
1062 */
1063 if (memcmp(&con->peer_addr, &con->actual_peer_addr,
1064 sizeof(con->peer_addr)) != 0 &&
1065 !(addr_is_blank(&con->actual_peer_addr.in_addr) &&
1066 con->actual_peer_addr.nonce == con->peer_addr.nonce)) {
1067 pr_warning("wrong peer, want %s/%lld, got %s/%lld\n",
1068 pr_addr(&con->peer_addr.in_addr),
1069 le64_to_cpu(con->peer_addr.nonce),
1070 pr_addr(&con->actual_peer_addr.in_addr),
1071 le64_to_cpu(con->actual_peer_addr.nonce));
1072 con->error_msg = "wrong peer at address";
1073 return -1;
1074 }
1075
1076 /*
1077 * did we learn our address?
1078 */
1079 if (addr_is_blank(&con->msgr->inst.addr.in_addr)) {
1080 int port = addr_port(&con->msgr->inst.addr.in_addr);
1081
1082 memcpy(&con->msgr->inst.addr.in_addr,
1083 &con->peer_addr_for_me.in_addr,
1084 sizeof(con->peer_addr_for_me.in_addr));
1085 addr_set_port(&con->msgr->inst.addr.in_addr, port);
1086 encode_my_addr(con->msgr);
1087 dout("process_banner learned my addr is %s\n",
1088 pr_addr(&con->msgr->inst.addr.in_addr));
1089 }
1090
1091 set_bit(NEGOTIATING, &con->state);
1092 prepare_read_connect(con);
1093 return 0;
1094}
1095
1096static void fail_protocol(struct ceph_connection *con)
1097{
1098 reset_connection(con);
1099 set_bit(CLOSED, &con->state); /* in case there's queued work */
1100
1101 mutex_unlock(&con->mutex);
1102 if (con->ops->bad_proto)
1103 con->ops->bad_proto(con);
1104 mutex_lock(&con->mutex);
1105}
1106
1107static int process_connect(struct ceph_connection *con)
1108{
1109 u64 sup_feat = CEPH_FEATURE_SUPPORTED;
1110 u64 req_feat = CEPH_FEATURE_REQUIRED;
1111 u64 server_feat = le64_to_cpu(con->in_reply.features);
1112
1113 dout("process_connect on %p tag %d\n", con, (int)con->in_tag);
1114
1115 switch (con->in_reply.tag) {
1116 case CEPH_MSGR_TAG_FEATURES:
1117 pr_err("%s%lld %s feature set mismatch,"
1118 " my %llx < server's %llx, missing %llx\n",
1119 ENTITY_NAME(con->peer_name),
1120 pr_addr(&con->peer_addr.in_addr),
1121 sup_feat, server_feat, server_feat & ~sup_feat);
1122 con->error_msg = "missing required protocol features";
1123 fail_protocol(con);
1124 return -1;
1125
1126 case CEPH_MSGR_TAG_BADPROTOVER:
1127 pr_err("%s%lld %s protocol version mismatch,"
1128 " my %d != server's %d\n",
1129 ENTITY_NAME(con->peer_name),
1130 pr_addr(&con->peer_addr.in_addr),
1131 le32_to_cpu(con->out_connect.protocol_version),
1132 le32_to_cpu(con->in_reply.protocol_version));
1133 con->error_msg = "protocol version mismatch";
1134 fail_protocol(con);
1135 return -1;
1136
1137 case CEPH_MSGR_TAG_BADAUTHORIZER:
1138 con->auth_retry++;
1139 dout("process_connect %p got BADAUTHORIZER attempt %d\n", con,
1140 con->auth_retry);
1141 if (con->auth_retry == 2) {
1142 con->error_msg = "connect authorization failure";
1143 reset_connection(con);
1144 set_bit(CLOSED, &con->state);
1145 return -1;
1146 }
1147 con->auth_retry = 1;
1148 prepare_write_connect(con->msgr, con, 0);
1149 prepare_read_connect_retry(con);
1150 break;
1151
1152 case CEPH_MSGR_TAG_RESETSESSION:
1153 /*
1154 * If we connected with a large connect_seq but the peer
1155 * has no record of a session with us (no connection, or
1156 * connect_seq == 0), they will send RESETSESION to indicate
1157 * that they must have reset their session, and may have
1158 * dropped messages.
1159 */
1160 dout("process_connect got RESET peer seq %u\n",
1161 le32_to_cpu(con->in_connect.connect_seq));
1162 pr_err("%s%lld %s connection reset\n",
1163 ENTITY_NAME(con->peer_name),
1164 pr_addr(&con->peer_addr.in_addr));
1165 reset_connection(con);
1166 prepare_write_connect(con->msgr, con, 0);
1167 prepare_read_connect(con);
1168
1169 /* Tell ceph about it. */
1170 mutex_unlock(&con->mutex);
1171 pr_info("reset on %s%lld\n", ENTITY_NAME(con->peer_name));
1172 if (con->ops->peer_reset)
1173 con->ops->peer_reset(con);
1174 mutex_lock(&con->mutex);
1175 break;
1176
1177 case CEPH_MSGR_TAG_RETRY_SESSION:
1178 /*
1179 * If we sent a smaller connect_seq than the peer has, try
1180 * again with a larger value.
1181 */
1182 dout("process_connect got RETRY my seq = %u, peer_seq = %u\n",
1183 le32_to_cpu(con->out_connect.connect_seq),
1184 le32_to_cpu(con->in_connect.connect_seq));
1185 con->connect_seq = le32_to_cpu(con->in_connect.connect_seq);
1186 prepare_write_connect(con->msgr, con, 0);
1187 prepare_read_connect(con);
1188 break;
1189
1190 case CEPH_MSGR_TAG_RETRY_GLOBAL:
1191 /*
1192 * If we sent a smaller global_seq than the peer has, try
1193 * again with a larger value.
1194 */
1195 dout("process_connect got RETRY_GLOBAL my %u peer_gseq %u\n",
1196 con->peer_global_seq,
1197 le32_to_cpu(con->in_connect.global_seq));
1198 get_global_seq(con->msgr,
1199 le32_to_cpu(con->in_connect.global_seq));
1200 prepare_write_connect(con->msgr, con, 0);
1201 prepare_read_connect(con);
1202 break;
1203
1204 case CEPH_MSGR_TAG_READY:
1205 if (req_feat & ~server_feat) {
1206 pr_err("%s%lld %s protocol feature mismatch,"
1207 " my required %llx > server's %llx, need %llx\n",
1208 ENTITY_NAME(con->peer_name),
1209 pr_addr(&con->peer_addr.in_addr),
1210 req_feat, server_feat, req_feat & ~server_feat);
1211 con->error_msg = "missing required protocol features";
1212 fail_protocol(con);
1213 return -1;
1214 }
1215 clear_bit(CONNECTING, &con->state);
1216 con->peer_global_seq = le32_to_cpu(con->in_reply.global_seq);
1217 con->connect_seq++;
1218 dout("process_connect got READY gseq %d cseq %d (%d)\n",
1219 con->peer_global_seq,
1220 le32_to_cpu(con->in_reply.connect_seq),
1221 con->connect_seq);
1222 WARN_ON(con->connect_seq !=
1223 le32_to_cpu(con->in_reply.connect_seq));
1224
1225 if (con->in_reply.flags & CEPH_MSG_CONNECT_LOSSY)
1226 set_bit(LOSSYTX, &con->state);
1227
1228 prepare_read_tag(con);
1229 break;
1230
1231 case CEPH_MSGR_TAG_WAIT:
1232 /*
1233 * If there is a connection race (we are opening
1234 * connections to each other), one of us may just have
1235 * to WAIT. This shouldn't happen if we are the
1236 * client.
1237 */
1238 pr_err("process_connect peer connecting WAIT\n");
1239
1240 default:
1241 pr_err("connect protocol error, will retry\n");
1242 con->error_msg = "protocol error, garbage tag during connect";
1243 return -1;
1244 }
1245 return 0;
1246}
1247
1248
1249/*
1250 * read (part of) an ack
1251 */
1252static int read_partial_ack(struct ceph_connection *con)
1253{
1254 int to = 0;
1255
1256 return read_partial(con, &to, sizeof(con->in_temp_ack),
1257 &con->in_temp_ack);
1258}
1259
1260
1261/*
1262 * We can finally discard anything that's been acked.
1263 */
1264static void process_ack(struct ceph_connection *con)
1265{
1266 struct ceph_msg *m;
1267 u64 ack = le64_to_cpu(con->in_temp_ack);
1268 u64 seq;
1269
1270 while (!list_empty(&con->out_sent)) {
1271 m = list_first_entry(&con->out_sent, struct ceph_msg,
1272 list_head);
1273 seq = le64_to_cpu(m->hdr.seq);
1274 if (seq > ack)
1275 break;
1276 dout("got ack for seq %llu type %d at %p\n", seq,
1277 le16_to_cpu(m->hdr.type), m);
1278 ceph_msg_remove(m);
1279 }
1280 prepare_read_tag(con);
1281}
1282
1283
1284
1285
1286static int read_partial_message_section(struct ceph_connection *con,
1287 struct kvec *section, unsigned int sec_len,
1288 u32 *crc)
1289{
1290 int left;
1291 int ret;
1292
1293 BUG_ON(!section);
1294
1295 while (section->iov_len < sec_len) {
1296 BUG_ON(section->iov_base == NULL);
1297 left = sec_len - section->iov_len;
1298 ret = ceph_tcp_recvmsg(con->sock, (char *)section->iov_base +
1299 section->iov_len, left);
1300 if (ret <= 0)
1301 return ret;
1302 section->iov_len += ret;
1303 if (section->iov_len == sec_len)
1304 *crc = crc32c(0, section->iov_base,
1305 section->iov_len);
1306 }
1307
1308 return 1;
1309}
1310
1311static struct ceph_msg *ceph_alloc_msg(struct ceph_connection *con,
1312 struct ceph_msg_header *hdr,
1313 int *skip);
1314/*
1315 * read (part of) a message.
1316 */
1317static int read_partial_message(struct ceph_connection *con)
1318{
1319 struct ceph_msg *m = con->in_msg;
1320 void *p;
1321 int ret;
1322 int to, left;
1323 unsigned front_len, middle_len, data_len, data_off;
1324 int datacrc = con->msgr->nocrc;
1325 int skip;
1326
1327 dout("read_partial_message con %p msg %p\n", con, m);
1328
1329 /* header */
1330 while (con->in_base_pos < sizeof(con->in_hdr)) {
1331 left = sizeof(con->in_hdr) - con->in_base_pos;
1332 ret = ceph_tcp_recvmsg(con->sock,
1333 (char *)&con->in_hdr + con->in_base_pos,
1334 left);
1335 if (ret <= 0)
1336 return ret;
1337 con->in_base_pos += ret;
1338 if (con->in_base_pos == sizeof(con->in_hdr)) {
1339 u32 crc = crc32c(0, (void *)&con->in_hdr,
1340 sizeof(con->in_hdr) - sizeof(con->in_hdr.crc));
1341 if (crc != le32_to_cpu(con->in_hdr.crc)) {
1342 pr_err("read_partial_message bad hdr "
1343 " crc %u != expected %u\n",
1344 crc, con->in_hdr.crc);
1345 return -EBADMSG;
1346 }
1347 }
1348 }
1349 front_len = le32_to_cpu(con->in_hdr.front_len);
1350 if (front_len > CEPH_MSG_MAX_FRONT_LEN)
1351 return -EIO;
1352 middle_len = le32_to_cpu(con->in_hdr.middle_len);
1353 if (middle_len > CEPH_MSG_MAX_DATA_LEN)
1354 return -EIO;
1355 data_len = le32_to_cpu(con->in_hdr.data_len);
1356 if (data_len > CEPH_MSG_MAX_DATA_LEN)
1357 return -EIO;
1358 data_off = le16_to_cpu(con->in_hdr.data_off);
1359
1360 /* allocate message? */
1361 if (!con->in_msg) {
1362 dout("got hdr type %d front %d data %d\n", con->in_hdr.type,
1363 con->in_hdr.front_len, con->in_hdr.data_len);
1364 con->in_msg = ceph_alloc_msg(con, &con->in_hdr, &skip);
1365 if (skip) {
1366 /* skip this message */
1367 dout("alloc_msg returned NULL, skipping message\n");
1368 con->in_base_pos = -front_len - middle_len - data_len -
1369 sizeof(m->footer);
1370 con->in_tag = CEPH_MSGR_TAG_READY;
1371 return 0;
1372 }
1373 if (IS_ERR(con->in_msg)) {
1374 ret = PTR_ERR(con->in_msg);
1375 con->in_msg = NULL;
1376 con->error_msg =
1377 "error allocating memory for incoming message";
1378 return ret;
1379 }
1380 m = con->in_msg;
1381 m->front.iov_len = 0; /* haven't read it yet */
1382 if (m->middle)
1383 m->middle->vec.iov_len = 0;
1384
1385 con->in_msg_pos.page = 0;
1386 con->in_msg_pos.page_pos = data_off & ~PAGE_MASK;
1387 con->in_msg_pos.data_pos = 0;
1388 }
1389
1390 /* front */
1391 ret = read_partial_message_section(con, &m->front, front_len,
1392 &con->in_front_crc);
1393 if (ret <= 0)
1394 return ret;
1395
1396 /* middle */
1397 if (m->middle) {
1398 ret = read_partial_message_section(con, &m->middle->vec, middle_len,
1399 &con->in_middle_crc);
1400 if (ret <= 0)
1401 return ret;
1402 }
1403
1404 /* (page) data */
1405 while (con->in_msg_pos.data_pos < data_len) {
1406 left = min((int)(data_len - con->in_msg_pos.data_pos),
1407 (int)(PAGE_SIZE - con->in_msg_pos.page_pos));
1408 BUG_ON(m->pages == NULL);
1409 p = kmap(m->pages[con->in_msg_pos.page]);
1410 ret = ceph_tcp_recvmsg(con->sock, p + con->in_msg_pos.page_pos,
1411 left);
1412 if (ret > 0 && datacrc)
1413 con->in_data_crc =
1414 crc32c(con->in_data_crc,
1415 p + con->in_msg_pos.page_pos, ret);
1416 kunmap(m->pages[con->in_msg_pos.page]);
1417 if (ret <= 0)
1418 return ret;
1419 con->in_msg_pos.data_pos += ret;
1420 con->in_msg_pos.page_pos += ret;
1421 if (con->in_msg_pos.page_pos == PAGE_SIZE) {
1422 con->in_msg_pos.page_pos = 0;
1423 con->in_msg_pos.page++;
1424 }
1425 }
1426
1427 /* footer */
1428 to = sizeof(m->hdr) + sizeof(m->footer);
1429 while (con->in_base_pos < to) {
1430 left = to - con->in_base_pos;
1431 ret = ceph_tcp_recvmsg(con->sock, (char *)&m->footer +
1432 (con->in_base_pos - sizeof(m->hdr)),
1433 left);
1434 if (ret <= 0)
1435 return ret;
1436 con->in_base_pos += ret;
1437 }
1438 dout("read_partial_message got msg %p %d (%u) + %d (%u) + %d (%u)\n",
1439 m, front_len, m->footer.front_crc, middle_len,
1440 m->footer.middle_crc, data_len, m->footer.data_crc);
1441
1442 /* crc ok? */
1443 if (con->in_front_crc != le32_to_cpu(m->footer.front_crc)) {
1444 pr_err("read_partial_message %p front crc %u != exp. %u\n",
1445 m, con->in_front_crc, m->footer.front_crc);
1446 return -EBADMSG;
1447 }
1448 if (con->in_middle_crc != le32_to_cpu(m->footer.middle_crc)) {
1449 pr_err("read_partial_message %p middle crc %u != exp %u\n",
1450 m, con->in_middle_crc, m->footer.middle_crc);
1451 return -EBADMSG;
1452 }
1453 if (datacrc &&
1454 (m->footer.flags & CEPH_MSG_FOOTER_NOCRC) == 0 &&
1455 con->in_data_crc != le32_to_cpu(m->footer.data_crc)) {
1456 pr_err("read_partial_message %p data crc %u != exp. %u\n", m,
1457 con->in_data_crc, le32_to_cpu(m->footer.data_crc));
1458 return -EBADMSG;
1459 }
1460
1461 return 1; /* done! */
1462}
1463
1464/*
1465 * Process message. This happens in the worker thread. The callback should
1466 * be careful not to do anything that waits on other incoming messages or it
1467 * may deadlock.
1468 */
1469static void process_message(struct ceph_connection *con)
1470{
1471 struct ceph_msg *msg;
1472
1473 msg = con->in_msg;
1474 con->in_msg = NULL;
1475
1476 /* if first message, set peer_name */
1477 if (con->peer_name.type == 0)
1478 con->peer_name = msg->hdr.src.name;
1479
1480 con->in_seq++;
1481 mutex_unlock(&con->mutex);
1482
1483 dout("===== %p %llu from %s%lld %d=%s len %d+%d (%u %u %u) =====\n",
1484 msg, le64_to_cpu(msg->hdr.seq),
1485 ENTITY_NAME(msg->hdr.src.name),
1486 le16_to_cpu(msg->hdr.type),
1487 ceph_msg_type_name(le16_to_cpu(msg->hdr.type)),
1488 le32_to_cpu(msg->hdr.front_len),
1489 le32_to_cpu(msg->hdr.data_len),
1490 con->in_front_crc, con->in_middle_crc, con->in_data_crc);
1491 con->ops->dispatch(con, msg);
1492
1493 mutex_lock(&con->mutex);
1494 prepare_read_tag(con);
1495}
1496
1497
1498/*
1499 * Write something to the socket. Called in a worker thread when the
1500 * socket appears to be writeable and we have something ready to send.
1501 */
1502static int try_write(struct ceph_connection *con)
1503{
1504 struct ceph_messenger *msgr = con->msgr;
1505 int ret = 1;
1506
1507 dout("try_write start %p state %lu nref %d\n", con, con->state,
1508 atomic_read(&con->nref));
1509
1510 mutex_lock(&con->mutex);
1511more:
1512 dout("try_write out_kvec_bytes %d\n", con->out_kvec_bytes);
1513
1514 /* open the socket first? */
1515 if (con->sock == NULL) {
1516 /*
1517 * if we were STANDBY and are reconnecting _this_
1518 * connection, bump connect_seq now. Always bump
1519 * global_seq.
1520 */
1521 if (test_and_clear_bit(STANDBY, &con->state))
1522 con->connect_seq++;
1523
1524 prepare_write_banner(msgr, con);
1525 prepare_write_connect(msgr, con, 1);
1526 prepare_read_banner(con);
1527 set_bit(CONNECTING, &con->state);
1528 clear_bit(NEGOTIATING, &con->state);
1529
1530 BUG_ON(con->in_msg);
1531 con->in_tag = CEPH_MSGR_TAG_READY;
1532 dout("try_write initiating connect on %p new state %lu\n",
1533 con, con->state);
1534 con->sock = ceph_tcp_connect(con);
1535 if (IS_ERR(con->sock)) {
1536 con->sock = NULL;
1537 con->error_msg = "connect error";
1538 ret = -1;
1539 goto out;
1540 }
1541 }
1542
1543more_kvec:
1544 /* kvec data queued? */
1545 if (con->out_skip) {
1546 ret = write_partial_skip(con);
1547 if (ret <= 0)
1548 goto done;
1549 if (ret < 0) {
1550 dout("try_write write_partial_skip err %d\n", ret);
1551 goto done;
1552 }
1553 }
1554 if (con->out_kvec_left) {
1555 ret = write_partial_kvec(con);
1556 if (ret <= 0)
1557 goto done;
1558 }
1559
1560 /* msg pages? */
1561 if (con->out_msg) {
1562 if (con->out_msg_done) {
1563 ceph_msg_put(con->out_msg);
1564 con->out_msg = NULL; /* we're done with this one */
1565 goto do_next;
1566 }
1567
1568 ret = write_partial_msg_pages(con);
1569 if (ret == 1)
1570 goto more_kvec; /* we need to send the footer, too! */
1571 if (ret == 0)
1572 goto done;
1573 if (ret < 0) {
1574 dout("try_write write_partial_msg_pages err %d\n",
1575 ret);
1576 goto done;
1577 }
1578 }
1579
1580do_next:
1581 if (!test_bit(CONNECTING, &con->state)) {
1582 /* is anything else pending? */
1583 if (!list_empty(&con->out_queue)) {
1584 prepare_write_message(con);
1585 goto more;
1586 }
1587 if (con->in_seq > con->in_seq_acked) {
1588 prepare_write_ack(con);
1589 goto more;
1590 }
1591 if (test_and_clear_bit(KEEPALIVE_PENDING, &con->state)) {
1592 prepare_write_keepalive(con);
1593 goto more;
1594 }
1595 }
1596
1597 /* Nothing to do! */
1598 clear_bit(WRITE_PENDING, &con->state);
1599 dout("try_write nothing else to write.\n");
1600done:
1601 ret = 0;
1602out:
1603 mutex_unlock(&con->mutex);
1604 dout("try_write done on %p\n", con);
1605 return ret;
1606}
1607
1608
1609
1610/*
1611 * Read what we can from the socket.
1612 */
1613static int try_read(struct ceph_connection *con)
1614{
1615 struct ceph_messenger *msgr;
1616 int ret = -1;
1617
1618 if (!con->sock)
1619 return 0;
1620
1621 if (test_bit(STANDBY, &con->state))
1622 return 0;
1623
1624 dout("try_read start on %p\n", con);
1625 msgr = con->msgr;
1626
1627 mutex_lock(&con->mutex);
1628
1629more:
1630 dout("try_read tag %d in_base_pos %d\n", (int)con->in_tag,
1631 con->in_base_pos);
1632 if (test_bit(CONNECTING, &con->state)) {
1633 if (!test_bit(NEGOTIATING, &con->state)) {
1634 dout("try_read connecting\n");
1635 ret = read_partial_banner(con);
1636 if (ret <= 0)
1637 goto done;
1638 if (process_banner(con) < 0) {
1639 ret = -1;
1640 goto out;
1641 }
1642 }
1643 ret = read_partial_connect(con);
1644 if (ret <= 0)
1645 goto done;
1646 if (process_connect(con) < 0) {
1647 ret = -1;
1648 goto out;
1649 }
1650 goto more;
1651 }
1652
1653 if (con->in_base_pos < 0) {
1654 /*
1655 * skipping + discarding content.
1656 *
1657 * FIXME: there must be a better way to do this!
1658 */
1659 static char buf[1024];
1660 int skip = min(1024, -con->in_base_pos);
1661 dout("skipping %d / %d bytes\n", skip, -con->in_base_pos);
1662 ret = ceph_tcp_recvmsg(con->sock, buf, skip);
1663 if (ret <= 0)
1664 goto done;
1665 con->in_base_pos += ret;
1666 if (con->in_base_pos)
1667 goto more;
1668 }
1669 if (con->in_tag == CEPH_MSGR_TAG_READY) {
1670 /*
1671 * what's next?
1672 */
1673 ret = ceph_tcp_recvmsg(con->sock, &con->in_tag, 1);
1674 if (ret <= 0)
1675 goto done;
1676 dout("try_read got tag %d\n", (int)con->in_tag);
1677 switch (con->in_tag) {
1678 case CEPH_MSGR_TAG_MSG:
1679 prepare_read_message(con);
1680 break;
1681 case CEPH_MSGR_TAG_ACK:
1682 prepare_read_ack(con);
1683 break;
1684 case CEPH_MSGR_TAG_CLOSE:
1685 set_bit(CLOSED, &con->state); /* fixme */
1686 goto done;
1687 default:
1688 goto bad_tag;
1689 }
1690 }
1691 if (con->in_tag == CEPH_MSGR_TAG_MSG) {
1692 ret = read_partial_message(con);
1693 if (ret <= 0) {
1694 switch (ret) {
1695 case -EBADMSG:
1696 con->error_msg = "bad crc";
1697 ret = -EIO;
1698 goto out;
1699 case -EIO:
1700 con->error_msg = "io error";
1701 goto out;
1702 default:
1703 goto done;
1704 }
1705 }
1706 if (con->in_tag == CEPH_MSGR_TAG_READY)
1707 goto more;
1708 process_message(con);
1709 goto more;
1710 }
1711 if (con->in_tag == CEPH_MSGR_TAG_ACK) {
1712 ret = read_partial_ack(con);
1713 if (ret <= 0)
1714 goto done;
1715 process_ack(con);
1716 goto more;
1717 }
1718
1719done:
1720 ret = 0;
1721out:
1722 mutex_unlock(&con->mutex);
1723 dout("try_read done on %p\n", con);
1724 return ret;
1725
1726bad_tag:
1727 pr_err("try_read bad con->in_tag = %d\n", (int)con->in_tag);
1728 con->error_msg = "protocol error, garbage tag";
1729 ret = -1;
1730 goto out;
1731}
1732
1733
1734/*
1735 * Atomically queue work on a connection. Bump @con reference to
1736 * avoid races with connection teardown.
1737 *
1738 * There is some trickery going on with QUEUED and BUSY because we
1739 * only want a _single_ thread operating on each connection at any
1740 * point in time, but we want to use all available CPUs.
1741 *
1742 * The worker thread only proceeds if it can atomically set BUSY. It
1743 * clears QUEUED and does it's thing. When it thinks it's done, it
1744 * clears BUSY, then rechecks QUEUED.. if it's set again, it loops
1745 * (tries again to set BUSY).
1746 *
1747 * To queue work, we first set QUEUED, _then_ if BUSY isn't set, we
1748 * try to queue work. If that fails (work is already queued, or BUSY)
1749 * we give up (work also already being done or is queued) but leave QUEUED
1750 * set so that the worker thread will loop if necessary.
1751 */
1752static void queue_con(struct ceph_connection *con)
1753{
1754 if (test_bit(DEAD, &con->state)) {
1755 dout("queue_con %p ignoring: DEAD\n",
1756 con);
1757 return;
1758 }
1759
1760 if (!con->ops->get(con)) {
1761 dout("queue_con %p ref count 0\n", con);
1762 return;
1763 }
1764
1765 set_bit(QUEUED, &con->state);
1766 if (test_bit(BUSY, &con->state)) {
1767 dout("queue_con %p - already BUSY\n", con);
1768 con->ops->put(con);
1769 } else if (!queue_work(ceph_msgr_wq, &con->work.work)) {
1770 dout("queue_con %p - already queued\n", con);
1771 con->ops->put(con);
1772 } else {
1773 dout("queue_con %p\n", con);
1774 }
1775}
1776
1777/*
1778 * Do some work on a connection. Drop a connection ref when we're done.
1779 */
1780static void con_work(struct work_struct *work)
1781{
1782 struct ceph_connection *con = container_of(work, struct ceph_connection,
1783 work.work);
1784 int backoff = 0;
1785
1786more:
1787 if (test_and_set_bit(BUSY, &con->state) != 0) {
1788 dout("con_work %p BUSY already set\n", con);
1789 goto out;
1790 }
1791 dout("con_work %p start, clearing QUEUED\n", con);
1792 clear_bit(QUEUED, &con->state);
1793
1794 if (test_bit(CLOSED, &con->state)) { /* e.g. if we are replaced */
1795 dout("con_work CLOSED\n");
1796 con_close_socket(con);
1797 goto done;
1798 }
1799 if (test_and_clear_bit(OPENING, &con->state)) {
1800 /* reopen w/ new peer */
1801 dout("con_work OPENING\n");
1802 con_close_socket(con);
1803 }
1804
1805 if (test_and_clear_bit(SOCK_CLOSED, &con->state) ||
1806 try_read(con) < 0 ||
1807 try_write(con) < 0) {
1808 backoff = 1;
1809 ceph_fault(con); /* error/fault path */
1810 }
1811
1812done:
1813 clear_bit(BUSY, &con->state);
1814 dout("con->state=%lu\n", con->state);
1815 if (test_bit(QUEUED, &con->state)) {
1816 if (!backoff || test_bit(OPENING, &con->state)) {
1817 dout("con_work %p QUEUED reset, looping\n", con);
1818 goto more;
1819 }
1820 dout("con_work %p QUEUED reset, but just faulted\n", con);
1821 clear_bit(QUEUED, &con->state);
1822 }
1823 dout("con_work %p done\n", con);
1824
1825out:
1826 con->ops->put(con);
1827}
1828
1829
1830/*
1831 * Generic error/fault handler. A retry mechanism is used with
1832 * exponential backoff
1833 */
1834static void ceph_fault(struct ceph_connection *con)
1835{
1836 pr_err("%s%lld %s %s\n", ENTITY_NAME(con->peer_name),
1837 pr_addr(&con->peer_addr.in_addr), con->error_msg);
1838 dout("fault %p state %lu to peer %s\n",
1839 con, con->state, pr_addr(&con->peer_addr.in_addr));
1840
1841 if (test_bit(LOSSYTX, &con->state)) {
1842 dout("fault on LOSSYTX channel\n");
1843 goto out;
1844 }
1845
1846 clear_bit(BUSY, &con->state); /* to avoid an improbable race */
1847
1848 mutex_lock(&con->mutex);
1849 if (test_bit(CLOSED, &con->state))
1850 goto out_unlock;
1851
1852 con_close_socket(con);
1853
1854 if (con->in_msg) {
1855 ceph_msg_put(con->in_msg);
1856 con->in_msg = NULL;
1857 }
1858
1859 /* Requeue anything that hasn't been acked */
1860 list_splice_init(&con->out_sent, &con->out_queue);
1861
1862 /* If there are no messages in the queue, place the connection
1863 * in a STANDBY state (i.e., don't try to reconnect just yet). */
1864 if (list_empty(&con->out_queue) && !con->out_keepalive_pending) {
1865 dout("fault setting STANDBY\n");
1866 set_bit(STANDBY, &con->state);
1867 } else {
1868 /* retry after a delay. */
1869 if (con->delay == 0)
1870 con->delay = BASE_DELAY_INTERVAL;
1871 else if (con->delay < MAX_DELAY_INTERVAL)
1872 con->delay *= 2;
1873 dout("fault queueing %p delay %lu\n", con, con->delay);
1874 con->ops->get(con);
1875 if (queue_delayed_work(ceph_msgr_wq, &con->work,
1876 round_jiffies_relative(con->delay)) == 0)
1877 con->ops->put(con);
1878 }
1879
1880out_unlock:
1881 mutex_unlock(&con->mutex);
1882out:
1883 /*
1884 * in case we faulted due to authentication, invalidate our
1885 * current tickets so that we can get new ones.
1886 */
1887 if (con->auth_retry && con->ops->invalidate_authorizer) {
1888 dout("calling invalidate_authorizer()\n");
1889 con->ops->invalidate_authorizer(con);
1890 }
1891
1892 if (con->ops->fault)
1893 con->ops->fault(con);
1894}
1895
1896
1897
1898/*
1899 * create a new messenger instance
1900 */
1901struct ceph_messenger *ceph_messenger_create(struct ceph_entity_addr *myaddr)
1902{
1903 struct ceph_messenger *msgr;
1904
1905 msgr = kzalloc(sizeof(*msgr), GFP_KERNEL);
1906 if (msgr == NULL)
1907 return ERR_PTR(-ENOMEM);
1908
1909 spin_lock_init(&msgr->global_seq_lock);
1910
1911 /* the zero page is needed if a request is "canceled" while the message
1912 * is being written over the socket */
1913 msgr->zero_page = alloc_page(GFP_KERNEL | __GFP_ZERO);
1914 if (!msgr->zero_page) {
1915 kfree(msgr);
1916 return ERR_PTR(-ENOMEM);
1917 }
1918 kmap(msgr->zero_page);
1919
1920 if (myaddr)
1921 msgr->inst.addr = *myaddr;
1922
1923 /* select a random nonce */
1924 msgr->inst.addr.type = 0;
1925 get_random_bytes(&msgr->inst.addr.nonce, sizeof(msgr->inst.addr.nonce));
1926 encode_my_addr(msgr);
1927
1928 dout("messenger_create %p\n", msgr);
1929 return msgr;
1930}
1931
1932void ceph_messenger_destroy(struct ceph_messenger *msgr)
1933{
1934 dout("destroy %p\n", msgr);
1935 kunmap(msgr->zero_page);
1936 __free_page(msgr->zero_page);
1937 kfree(msgr);
1938 dout("destroyed messenger %p\n", msgr);
1939}
1940
1941/*
1942 * Queue up an outgoing message on the given connection.
1943 */
1944void ceph_con_send(struct ceph_connection *con, struct ceph_msg *msg)
1945{
1946 if (test_bit(CLOSED, &con->state)) {
1947 dout("con_send %p closed, dropping %p\n", con, msg);
1948 ceph_msg_put(msg);
1949 return;
1950 }
1951
1952 /* set src+dst */
1953 msg->hdr.src.name = con->msgr->inst.name;
1954 msg->hdr.src.addr = con->msgr->my_enc_addr;
1955 msg->hdr.orig_src = msg->hdr.src;
1956
1957 BUG_ON(msg->front.iov_len != le32_to_cpu(msg->hdr.front_len));
1958
1959 /* queue */
1960 mutex_lock(&con->mutex);
1961 BUG_ON(!list_empty(&msg->list_head));
1962 list_add_tail(&msg->list_head, &con->out_queue);
1963 dout("----- %p to %s%lld %d=%s len %d+%d+%d -----\n", msg,
1964 ENTITY_NAME(con->peer_name), le16_to_cpu(msg->hdr.type),
1965 ceph_msg_type_name(le16_to_cpu(msg->hdr.type)),
1966 le32_to_cpu(msg->hdr.front_len),
1967 le32_to_cpu(msg->hdr.middle_len),
1968 le32_to_cpu(msg->hdr.data_len));
1969 mutex_unlock(&con->mutex);
1970
1971 /* if there wasn't anything waiting to send before, queue
1972 * new work */
1973 if (test_and_set_bit(WRITE_PENDING, &con->state) == 0)
1974 queue_con(con);
1975}
1976
1977/*
1978 * Revoke a message that was previously queued for send
1979 */
1980void ceph_con_revoke(struct ceph_connection *con, struct ceph_msg *msg)
1981{
1982 mutex_lock(&con->mutex);
1983 if (!list_empty(&msg->list_head)) {
1984 dout("con_revoke %p msg %p\n", con, msg);
1985 list_del_init(&msg->list_head);
1986 ceph_msg_put(msg);
1987 msg->hdr.seq = 0;
1988 if (con->out_msg == msg) {
1989 ceph_msg_put(con->out_msg);
1990 con->out_msg = NULL;
1991 }
1992 if (con->out_kvec_is_msg) {
1993 con->out_skip = con->out_kvec_bytes;
1994 con->out_kvec_is_msg = false;
1995 }
1996 } else {
1997 dout("con_revoke %p msg %p - not queued (sent?)\n", con, msg);
1998 }
1999 mutex_unlock(&con->mutex);
2000}
2001
2002/*
2003 * Revoke a message that we may be reading data into
2004 */
2005void ceph_con_revoke_message(struct ceph_connection *con, struct ceph_msg *msg)
2006{
2007 mutex_lock(&con->mutex);
2008 if (con->in_msg && con->in_msg == msg) {
2009 unsigned front_len = le32_to_cpu(con->in_hdr.front_len);
2010 unsigned middle_len = le32_to_cpu(con->in_hdr.middle_len);
2011 unsigned data_len = le32_to_cpu(con->in_hdr.data_len);
2012
2013 /* skip rest of message */
2014 dout("con_revoke_pages %p msg %p revoked\n", con, msg);
2015 con->in_base_pos = con->in_base_pos -
2016 sizeof(struct ceph_msg_header) -
2017 front_len -
2018 middle_len -
2019 data_len -
2020 sizeof(struct ceph_msg_footer);
2021 ceph_msg_put(con->in_msg);
2022 con->in_msg = NULL;
2023 con->in_tag = CEPH_MSGR_TAG_READY;
2024 } else {
2025 dout("con_revoke_pages %p msg %p pages %p no-op\n",
2026 con, con->in_msg, msg);
2027 }
2028 mutex_unlock(&con->mutex);
2029}
2030
2031/*
2032 * Queue a keepalive byte to ensure the tcp connection is alive.
2033 */
2034void ceph_con_keepalive(struct ceph_connection *con)
2035{
2036 if (test_and_set_bit(KEEPALIVE_PENDING, &con->state) == 0 &&
2037 test_and_set_bit(WRITE_PENDING, &con->state) == 0)
2038 queue_con(con);
2039}
2040
2041
2042/*
2043 * construct a new message with given type, size
2044 * the new msg has a ref count of 1.
2045 */
2046struct ceph_msg *ceph_msg_new(int type, int front_len,
2047 int page_len, int page_off, struct page **pages)
2048{
2049 struct ceph_msg *m;
2050
2051 m = kmalloc(sizeof(*m), GFP_NOFS);
2052 if (m == NULL)
2053 goto out;
2054 kref_init(&m->kref);
2055 INIT_LIST_HEAD(&m->list_head);
2056
2057 m->hdr.type = cpu_to_le16(type);
2058 m->hdr.front_len = cpu_to_le32(front_len);
2059 m->hdr.middle_len = 0;
2060 m->hdr.data_len = cpu_to_le32(page_len);
2061 m->hdr.data_off = cpu_to_le16(page_off);
2062 m->hdr.priority = cpu_to_le16(CEPH_MSG_PRIO_DEFAULT);
2063 m->footer.front_crc = 0;
2064 m->footer.middle_crc = 0;
2065 m->footer.data_crc = 0;
2066 m->front_max = front_len;
2067 m->front_is_vmalloc = false;
2068 m->more_to_follow = false;
2069 m->pool = NULL;
2070
2071 /* front */
2072 if (front_len) {
2073 if (front_len > PAGE_CACHE_SIZE) {
2074 m->front.iov_base = __vmalloc(front_len, GFP_NOFS,
2075 PAGE_KERNEL);
2076 m->front_is_vmalloc = true;
2077 } else {
2078 m->front.iov_base = kmalloc(front_len, GFP_NOFS);
2079 }
2080 if (m->front.iov_base == NULL) {
2081 pr_err("msg_new can't allocate %d bytes\n",
2082 front_len);
2083 goto out2;
2084 }
2085 } else {
2086 m->front.iov_base = NULL;
2087 }
2088 m->front.iov_len = front_len;
2089
2090 /* middle */
2091 m->middle = NULL;
2092
2093 /* data */
2094 m->nr_pages = calc_pages_for(page_off, page_len);
2095 m->pages = pages;
2096 m->pagelist = NULL;
2097
2098 dout("ceph_msg_new %p page %d~%d -> %d\n", m, page_off, page_len,
2099 m->nr_pages);
2100 return m;
2101
2102out2:
2103 ceph_msg_put(m);
2104out:
2105 pr_err("msg_new can't create type %d len %d\n", type, front_len);
2106 return ERR_PTR(-ENOMEM);
2107}
2108
2109/*
2110 * Allocate "middle" portion of a message, if it is needed and wasn't
2111 * allocated by alloc_msg. This allows us to read a small fixed-size
2112 * per-type header in the front and then gracefully fail (i.e.,
2113 * propagate the error to the caller based on info in the front) when
2114 * the middle is too large.
2115 */
2116static int ceph_alloc_middle(struct ceph_connection *con, struct ceph_msg *msg)
2117{
2118 int type = le16_to_cpu(msg->hdr.type);
2119 int middle_len = le32_to_cpu(msg->hdr.middle_len);
2120
2121 dout("alloc_middle %p type %d %s middle_len %d\n", msg, type,
2122 ceph_msg_type_name(type), middle_len);
2123 BUG_ON(!middle_len);
2124 BUG_ON(msg->middle);
2125
2126 msg->middle = ceph_buffer_new(middle_len, GFP_NOFS);
2127 if (!msg->middle)
2128 return -ENOMEM;
2129 return 0;
2130}
2131
2132/*
2133 * Generic message allocator, for incoming messages.
2134 */
2135static struct ceph_msg *ceph_alloc_msg(struct ceph_connection *con,
2136 struct ceph_msg_header *hdr,
2137 int *skip)
2138{
2139 int type = le16_to_cpu(hdr->type);
2140 int front_len = le32_to_cpu(hdr->front_len);
2141 int middle_len = le32_to_cpu(hdr->middle_len);
2142 struct ceph_msg *msg = NULL;
2143 int ret;
2144
2145 if (con->ops->alloc_msg) {
2146 mutex_unlock(&con->mutex);
2147 msg = con->ops->alloc_msg(con, hdr, skip);
2148 mutex_lock(&con->mutex);
2149 if (IS_ERR(msg))
2150 return msg;
2151
2152 if (*skip)
2153 return NULL;
2154 }
2155 if (!msg) {
2156 *skip = 0;
2157 msg = ceph_msg_new(type, front_len, 0, 0, NULL);
2158 if (!msg) {
2159 pr_err("unable to allocate msg type %d len %d\n",
2160 type, front_len);
2161 return ERR_PTR(-ENOMEM);
2162 }
2163 }
2164 memcpy(&msg->hdr, &con->in_hdr, sizeof(con->in_hdr));
2165
2166 if (middle_len) {
2167 ret = ceph_alloc_middle(con, msg);
2168
2169 if (ret < 0) {
2170 ceph_msg_put(msg);
2171 return msg;
2172 }
2173 }
2174
2175 return msg;
2176}
2177
2178
2179/*
2180 * Free a generically kmalloc'd message.
2181 */
2182void ceph_msg_kfree(struct ceph_msg *m)
2183{
2184 dout("msg_kfree %p\n", m);
2185 if (m->front_is_vmalloc)
2186 vfree(m->front.iov_base);
2187 else
2188 kfree(m->front.iov_base);
2189 kfree(m);
2190}
2191
2192/*
2193 * Drop a msg ref. Destroy as needed.
2194 */
2195void ceph_msg_last_put(struct kref *kref)
2196{
2197 struct ceph_msg *m = container_of(kref, struct ceph_msg, kref);
2198
2199 dout("ceph_msg_put last one on %p\n", m);
2200 WARN_ON(!list_empty(&m->list_head));
2201
2202 /* drop middle, data, if any */
2203 if (m->middle) {
2204 ceph_buffer_put(m->middle);
2205 m->middle = NULL;
2206 }
2207 m->nr_pages = 0;
2208 m->pages = NULL;
2209
2210 if (m->pagelist) {
2211 ceph_pagelist_release(m->pagelist);
2212 kfree(m->pagelist);
2213 m->pagelist = NULL;
2214 }
2215
2216 if (m->pool)
2217 ceph_msgpool_put(m->pool, m);
2218 else
2219 ceph_msg_kfree(m);
2220}
2221
2222void ceph_msg_dump(struct ceph_msg *msg)
2223{
2224 pr_debug("msg_dump %p (front_max %d nr_pages %d)\n", msg,
2225 msg->front_max, msg->nr_pages);
2226 print_hex_dump(KERN_DEBUG, "header: ",
2227 DUMP_PREFIX_OFFSET, 16, 1,
2228 &msg->hdr, sizeof(msg->hdr), true);
2229 print_hex_dump(KERN_DEBUG, " front: ",
2230 DUMP_PREFIX_OFFSET, 16, 1,
2231 msg->front.iov_base, msg->front.iov_len, true);
2232 if (msg->middle)
2233 print_hex_dump(KERN_DEBUG, "middle: ",
2234 DUMP_PREFIX_OFFSET, 16, 1,
2235 msg->middle->vec.iov_base,
2236 msg->middle->vec.iov_len, true);
2237 print_hex_dump(KERN_DEBUG, "footer: ",
2238 DUMP_PREFIX_OFFSET, 16, 1,
2239 &msg->footer, sizeof(msg->footer), true);
2240}
diff --git a/fs/ceph/messenger.h b/fs/ceph/messenger.h
new file mode 100644
index 000000000000..4caaa5911110
--- /dev/null
+++ b/fs/ceph/messenger.h
@@ -0,0 +1,254 @@
1#ifndef __FS_CEPH_MESSENGER_H
2#define __FS_CEPH_MESSENGER_H
3
4#include <linux/kref.h>
5#include <linux/mutex.h>
6#include <linux/net.h>
7#include <linux/radix-tree.h>
8#include <linux/uio.h>
9#include <linux/version.h>
10#include <linux/workqueue.h>
11
12#include "types.h"
13#include "buffer.h"
14
15struct ceph_msg;
16struct ceph_connection;
17
18extern struct workqueue_struct *ceph_msgr_wq; /* receive work queue */
19
20/*
21 * Ceph defines these callbacks for handling connection events.
22 */
23struct ceph_connection_operations {
24 struct ceph_connection *(*get)(struct ceph_connection *);
25 void (*put)(struct ceph_connection *);
26
27 /* handle an incoming message. */
28 void (*dispatch) (struct ceph_connection *con, struct ceph_msg *m);
29
30 /* authorize an outgoing connection */
31 int (*get_authorizer) (struct ceph_connection *con,
32 void **buf, int *len, int *proto,
33 void **reply_buf, int *reply_len, int force_new);
34 int (*verify_authorizer_reply) (struct ceph_connection *con, int len);
35 int (*invalidate_authorizer)(struct ceph_connection *con);
36
37 /* protocol version mismatch */
38 void (*bad_proto) (struct ceph_connection *con);
39
40 /* there was some error on the socket (disconnect, whatever) */
41 void (*fault) (struct ceph_connection *con);
42
43 /* a remote host as terminated a message exchange session, and messages
44 * we sent (or they tried to send us) may be lost. */
45 void (*peer_reset) (struct ceph_connection *con);
46
47 struct ceph_msg * (*alloc_msg) (struct ceph_connection *con,
48 struct ceph_msg_header *hdr,
49 int *skip);
50};
51
52extern const char *ceph_name_type_str(int t);
53
54/* use format string %s%d */
55#define ENTITY_NAME(n) ceph_name_type_str((n).type), le64_to_cpu((n).num)
56
57struct ceph_messenger {
58 struct ceph_entity_inst inst; /* my name+address */
59 struct ceph_entity_addr my_enc_addr;
60 struct page *zero_page; /* used in certain error cases */
61
62 bool nocrc;
63
64 /*
65 * the global_seq counts connections i (attempt to) initiate
66 * in order to disambiguate certain connect race conditions.
67 */
68 u32 global_seq;
69 spinlock_t global_seq_lock;
70};
71
72/*
73 * a single message. it contains a header (src, dest, message type, etc.),
74 * footer (crc values, mainly), a "front" message body, and possibly a
75 * data payload (stored in some number of pages).
76 */
77struct ceph_msg {
78 struct ceph_msg_header hdr; /* header */
79 struct ceph_msg_footer footer; /* footer */
80 struct kvec front; /* unaligned blobs of message */
81 struct ceph_buffer *middle;
82 struct page **pages; /* data payload. NOT OWNER. */
83 unsigned nr_pages; /* size of page array */
84 struct ceph_pagelist *pagelist; /* instead of pages */
85 struct list_head list_head;
86 struct kref kref;
87 bool front_is_vmalloc;
88 bool more_to_follow;
89 int front_max;
90
91 struct ceph_msgpool *pool;
92};
93
94struct ceph_msg_pos {
95 int page, page_pos; /* which page; offset in page */
96 int data_pos; /* offset in data payload */
97 int did_page_crc; /* true if we've calculated crc for current page */
98};
99
100/* ceph connection fault delay defaults, for exponential backoff */
101#define BASE_DELAY_INTERVAL (HZ/2)
102#define MAX_DELAY_INTERVAL (5 * 60 * HZ)
103
104/*
105 * ceph_connection state bit flags
106 *
107 * QUEUED and BUSY are used together to ensure that only a single
108 * thread is currently opening, reading or writing data to the socket.
109 */
110#define LOSSYTX 0 /* we can close channel or drop messages on errors */
111#define CONNECTING 1
112#define NEGOTIATING 2
113#define KEEPALIVE_PENDING 3
114#define WRITE_PENDING 4 /* we have data ready to send */
115#define QUEUED 5 /* there is work queued on this connection */
116#define BUSY 6 /* work is being done */
117#define STANDBY 8 /* no outgoing messages, socket closed. we keep
118 * the ceph_connection around to maintain shared
119 * state with the peer. */
120#define CLOSED 10 /* we've closed the connection */
121#define SOCK_CLOSED 11 /* socket state changed to closed */
122#define OPENING 13 /* open connection w/ (possibly new) peer */
123#define DEAD 14 /* dead, about to kfree */
124
125/*
126 * A single connection with another host.
127 *
128 * We maintain a queue of outgoing messages, and some session state to
129 * ensure that we can preserve the lossless, ordered delivery of
130 * messages in the case of a TCP disconnect.
131 */
132struct ceph_connection {
133 void *private;
134 atomic_t nref;
135
136 const struct ceph_connection_operations *ops;
137
138 struct ceph_messenger *msgr;
139 struct socket *sock;
140 unsigned long state; /* connection state (see flags above) */
141 const char *error_msg; /* error message, if any */
142
143 struct ceph_entity_addr peer_addr; /* peer address */
144 struct ceph_entity_name peer_name; /* peer name */
145 struct ceph_entity_addr peer_addr_for_me;
146 u32 connect_seq; /* identify the most recent connection
147 attempt for this connection, client */
148 u32 peer_global_seq; /* peer's global seq for this connection */
149
150 int auth_retry; /* true if we need a newer authorizer */
151 void *auth_reply_buf; /* where to put the authorizer reply */
152 int auth_reply_buf_len;
153
154 struct mutex mutex;
155
156 /* out queue */
157 struct list_head out_queue;
158 struct list_head out_sent; /* sending or sent but unacked */
159 u64 out_seq; /* last message queued for send */
160 u64 out_seq_sent; /* last message sent */
161 bool out_keepalive_pending;
162
163 u64 in_seq, in_seq_acked; /* last message received, acked */
164
165 /* connection negotiation temps */
166 char in_banner[CEPH_BANNER_MAX_LEN];
167 union {
168 struct { /* outgoing connection */
169 struct ceph_msg_connect out_connect;
170 struct ceph_msg_connect_reply in_reply;
171 };
172 struct { /* incoming */
173 struct ceph_msg_connect in_connect;
174 struct ceph_msg_connect_reply out_reply;
175 };
176 };
177 struct ceph_entity_addr actual_peer_addr;
178
179 /* message out temps */
180 struct ceph_msg *out_msg; /* sending message (== tail of
181 out_sent) */
182 bool out_msg_done;
183 struct ceph_msg_pos out_msg_pos;
184
185 struct kvec out_kvec[8], /* sending header/footer data */
186 *out_kvec_cur;
187 int out_kvec_left; /* kvec's left in out_kvec */
188 int out_skip; /* skip this many bytes */
189 int out_kvec_bytes; /* total bytes left */
190 bool out_kvec_is_msg; /* kvec refers to out_msg */
191 int out_more; /* there is more data after the kvecs */
192 __le64 out_temp_ack; /* for writing an ack */
193
194 /* message in temps */
195 struct ceph_msg_header in_hdr;
196 struct ceph_msg *in_msg;
197 struct ceph_msg_pos in_msg_pos;
198 u32 in_front_crc, in_middle_crc, in_data_crc; /* calculated crc */
199
200 char in_tag; /* protocol control byte */
201 int in_base_pos; /* bytes read */
202 __le64 in_temp_ack; /* for reading an ack */
203
204 struct delayed_work work; /* send|recv work */
205 unsigned long delay; /* current delay interval */
206};
207
208
209extern const char *pr_addr(const struct sockaddr_storage *ss);
210extern int ceph_parse_ips(const char *c, const char *end,
211 struct ceph_entity_addr *addr,
212 int max_count, int *count);
213
214
215extern int ceph_msgr_init(void);
216extern void ceph_msgr_exit(void);
217
218extern struct ceph_messenger *ceph_messenger_create(
219 struct ceph_entity_addr *myaddr);
220extern void ceph_messenger_destroy(struct ceph_messenger *);
221
222extern void ceph_con_init(struct ceph_messenger *msgr,
223 struct ceph_connection *con);
224extern void ceph_con_open(struct ceph_connection *con,
225 struct ceph_entity_addr *addr);
226extern void ceph_con_close(struct ceph_connection *con);
227extern void ceph_con_send(struct ceph_connection *con, struct ceph_msg *msg);
228extern void ceph_con_revoke(struct ceph_connection *con, struct ceph_msg *msg);
229extern void ceph_con_revoke_message(struct ceph_connection *con,
230 struct ceph_msg *msg);
231extern void ceph_con_keepalive(struct ceph_connection *con);
232extern struct ceph_connection *ceph_con_get(struct ceph_connection *con);
233extern void ceph_con_put(struct ceph_connection *con);
234
235extern struct ceph_msg *ceph_msg_new(int type, int front_len,
236 int page_len, int page_off,
237 struct page **pages);
238extern void ceph_msg_kfree(struct ceph_msg *m);
239
240
241static inline struct ceph_msg *ceph_msg_get(struct ceph_msg *msg)
242{
243 kref_get(&msg->kref);
244 return msg;
245}
246extern void ceph_msg_last_put(struct kref *kref);
247static inline void ceph_msg_put(struct ceph_msg *msg)
248{
249 kref_put(&msg->kref, ceph_msg_last_put);
250}
251
252extern void ceph_msg_dump(struct ceph_msg *msg);
253
254#endif
diff --git a/fs/ceph/mon_client.c b/fs/ceph/mon_client.c
new file mode 100644
index 000000000000..890597c09d43
--- /dev/null
+++ b/fs/ceph/mon_client.c
@@ -0,0 +1,834 @@
1#include "ceph_debug.h"
2
3#include <linux/types.h>
4#include <linux/random.h>
5#include <linux/sched.h>
6
7#include "mon_client.h"
8#include "super.h"
9#include "auth.h"
10#include "decode.h"
11
12/*
13 * Interact with Ceph monitor cluster. Handle requests for new map
14 * versions, and periodically resend as needed. Also implement
15 * statfs() and umount().
16 *
17 * A small cluster of Ceph "monitors" are responsible for managing critical
18 * cluster configuration and state information. An odd number (e.g., 3, 5)
19 * of cmon daemons use a modified version of the Paxos part-time parliament
20 * algorithm to manage the MDS map (mds cluster membership), OSD map, and
21 * list of clients who have mounted the file system.
22 *
23 * We maintain an open, active session with a monitor at all times in order to
24 * receive timely MDSMap updates. We periodically send a keepalive byte on the
25 * TCP socket to ensure we detect a failure. If the connection does break, we
26 * randomly hunt for a new monitor. Once the connection is reestablished, we
27 * resend any outstanding requests.
28 */
29
30const static struct ceph_connection_operations mon_con_ops;
31
32static int __validate_auth(struct ceph_mon_client *monc);
33
34/*
35 * Decode a monmap blob (e.g., during mount).
36 */
37struct ceph_monmap *ceph_monmap_decode(void *p, void *end)
38{
39 struct ceph_monmap *m = NULL;
40 int i, err = -EINVAL;
41 struct ceph_fsid fsid;
42 u32 epoch, num_mon;
43 u16 version;
44 u32 len;
45
46 ceph_decode_32_safe(&p, end, len, bad);
47 ceph_decode_need(&p, end, len, bad);
48
49 dout("monmap_decode %p %p len %d\n", p, end, (int)(end-p));
50
51 ceph_decode_16_safe(&p, end, version, bad);
52
53 ceph_decode_need(&p, end, sizeof(fsid) + 2*sizeof(u32), bad);
54 ceph_decode_copy(&p, &fsid, sizeof(fsid));
55 epoch = ceph_decode_32(&p);
56
57 num_mon = ceph_decode_32(&p);
58 ceph_decode_need(&p, end, num_mon*sizeof(m->mon_inst[0]), bad);
59
60 if (num_mon >= CEPH_MAX_MON)
61 goto bad;
62 m = kmalloc(sizeof(*m) + sizeof(m->mon_inst[0])*num_mon, GFP_NOFS);
63 if (m == NULL)
64 return ERR_PTR(-ENOMEM);
65 m->fsid = fsid;
66 m->epoch = epoch;
67 m->num_mon = num_mon;
68 ceph_decode_copy(&p, m->mon_inst, num_mon*sizeof(m->mon_inst[0]));
69 for (i = 0; i < num_mon; i++)
70 ceph_decode_addr(&m->mon_inst[i].addr);
71
72 dout("monmap_decode epoch %d, num_mon %d\n", m->epoch,
73 m->num_mon);
74 for (i = 0; i < m->num_mon; i++)
75 dout("monmap_decode mon%d is %s\n", i,
76 pr_addr(&m->mon_inst[i].addr.in_addr));
77 return m;
78
79bad:
80 dout("monmap_decode failed with %d\n", err);
81 kfree(m);
82 return ERR_PTR(err);
83}
84
85/*
86 * return true if *addr is included in the monmap.
87 */
88int ceph_monmap_contains(struct ceph_monmap *m, struct ceph_entity_addr *addr)
89{
90 int i;
91
92 for (i = 0; i < m->num_mon; i++)
93 if (memcmp(addr, &m->mon_inst[i].addr, sizeof(*addr)) == 0)
94 return 1;
95 return 0;
96}
97
98/*
99 * Send an auth request.
100 */
101static void __send_prepared_auth_request(struct ceph_mon_client *monc, int len)
102{
103 monc->pending_auth = 1;
104 monc->m_auth->front.iov_len = len;
105 monc->m_auth->hdr.front_len = cpu_to_le32(len);
106 ceph_msg_get(monc->m_auth); /* keep our ref */
107 ceph_con_send(monc->con, monc->m_auth);
108}
109
110/*
111 * Close monitor session, if any.
112 */
113static void __close_session(struct ceph_mon_client *monc)
114{
115 if (monc->con) {
116 dout("__close_session closing mon%d\n", monc->cur_mon);
117 ceph_con_revoke(monc->con, monc->m_auth);
118 ceph_con_close(monc->con);
119 monc->cur_mon = -1;
120 monc->pending_auth = 0;
121 ceph_auth_reset(monc->auth);
122 }
123}
124
125/*
126 * Open a session with a (new) monitor.
127 */
128static int __open_session(struct ceph_mon_client *monc)
129{
130 char r;
131 int ret;
132
133 if (monc->cur_mon < 0) {
134 get_random_bytes(&r, 1);
135 monc->cur_mon = r % monc->monmap->num_mon;
136 dout("open_session num=%d r=%d -> mon%d\n",
137 monc->monmap->num_mon, r, monc->cur_mon);
138 monc->sub_sent = 0;
139 monc->sub_renew_after = jiffies; /* i.e., expired */
140 monc->want_next_osdmap = !!monc->want_next_osdmap;
141
142 dout("open_session mon%d opening\n", monc->cur_mon);
143 monc->con->peer_name.type = CEPH_ENTITY_TYPE_MON;
144 monc->con->peer_name.num = cpu_to_le64(monc->cur_mon);
145 ceph_con_open(monc->con,
146 &monc->monmap->mon_inst[monc->cur_mon].addr);
147
148 /* initiatiate authentication handshake */
149 ret = ceph_auth_build_hello(monc->auth,
150 monc->m_auth->front.iov_base,
151 monc->m_auth->front_max);
152 __send_prepared_auth_request(monc, ret);
153 } else {
154 dout("open_session mon%d already open\n", monc->cur_mon);
155 }
156 return 0;
157}
158
159static bool __sub_expired(struct ceph_mon_client *monc)
160{
161 return time_after_eq(jiffies, monc->sub_renew_after);
162}
163
164/*
165 * Reschedule delayed work timer.
166 */
167static void __schedule_delayed(struct ceph_mon_client *monc)
168{
169 unsigned delay;
170
171 if (monc->cur_mon < 0 || __sub_expired(monc))
172 delay = 10 * HZ;
173 else
174 delay = 20 * HZ;
175 dout("__schedule_delayed after %u\n", delay);
176 schedule_delayed_work(&monc->delayed_work, delay);
177}
178
179/*
180 * Send subscribe request for mdsmap and/or osdmap.
181 */
182static void __send_subscribe(struct ceph_mon_client *monc)
183{
184 dout("__send_subscribe sub_sent=%u exp=%u want_osd=%d\n",
185 (unsigned)monc->sub_sent, __sub_expired(monc),
186 monc->want_next_osdmap);
187 if ((__sub_expired(monc) && !monc->sub_sent) ||
188 monc->want_next_osdmap == 1) {
189 struct ceph_msg *msg;
190 struct ceph_mon_subscribe_item *i;
191 void *p, *end;
192
193 msg = ceph_msg_new(CEPH_MSG_MON_SUBSCRIBE, 96, 0, 0, NULL);
194 if (!msg)
195 return;
196
197 p = msg->front.iov_base;
198 end = p + msg->front.iov_len;
199
200 dout("__send_subscribe to 'mdsmap' %u+\n",
201 (unsigned)monc->have_mdsmap);
202 if (monc->want_next_osdmap) {
203 dout("__send_subscribe to 'osdmap' %u\n",
204 (unsigned)monc->have_osdmap);
205 ceph_encode_32(&p, 3);
206 ceph_encode_string(&p, end, "osdmap", 6);
207 i = p;
208 i->have = cpu_to_le64(monc->have_osdmap);
209 i->onetime = 1;
210 p += sizeof(*i);
211 monc->want_next_osdmap = 2; /* requested */
212 } else {
213 ceph_encode_32(&p, 2);
214 }
215 ceph_encode_string(&p, end, "mdsmap", 6);
216 i = p;
217 i->have = cpu_to_le64(monc->have_mdsmap);
218 i->onetime = 0;
219 p += sizeof(*i);
220 ceph_encode_string(&p, end, "monmap", 6);
221 i = p;
222 i->have = 0;
223 i->onetime = 0;
224 p += sizeof(*i);
225
226 msg->front.iov_len = p - msg->front.iov_base;
227 msg->hdr.front_len = cpu_to_le32(msg->front.iov_len);
228 ceph_con_send(monc->con, msg);
229
230 monc->sub_sent = jiffies | 1; /* never 0 */
231 }
232}
233
234static void handle_subscribe_ack(struct ceph_mon_client *monc,
235 struct ceph_msg *msg)
236{
237 unsigned seconds;
238 struct ceph_mon_subscribe_ack *h = msg->front.iov_base;
239
240 if (msg->front.iov_len < sizeof(*h))
241 goto bad;
242 seconds = le32_to_cpu(h->duration);
243
244 mutex_lock(&monc->mutex);
245 if (monc->hunting) {
246 pr_info("mon%d %s session established\n",
247 monc->cur_mon, pr_addr(&monc->con->peer_addr.in_addr));
248 monc->hunting = false;
249 }
250 dout("handle_subscribe_ack after %d seconds\n", seconds);
251 monc->sub_renew_after = monc->sub_sent + (seconds >> 1)*HZ - 1;
252 monc->sub_sent = 0;
253 mutex_unlock(&monc->mutex);
254 return;
255bad:
256 pr_err("got corrupt subscribe-ack msg\n");
257 ceph_msg_dump(msg);
258}
259
260/*
261 * Keep track of which maps we have
262 */
263int ceph_monc_got_mdsmap(struct ceph_mon_client *monc, u32 got)
264{
265 mutex_lock(&monc->mutex);
266 monc->have_mdsmap = got;
267 mutex_unlock(&monc->mutex);
268 return 0;
269}
270
271int ceph_monc_got_osdmap(struct ceph_mon_client *monc, u32 got)
272{
273 mutex_lock(&monc->mutex);
274 monc->have_osdmap = got;
275 monc->want_next_osdmap = 0;
276 mutex_unlock(&monc->mutex);
277 return 0;
278}
279
280/*
281 * Register interest in the next osdmap
282 */
283void ceph_monc_request_next_osdmap(struct ceph_mon_client *monc)
284{
285 dout("request_next_osdmap have %u\n", monc->have_osdmap);
286 mutex_lock(&monc->mutex);
287 if (!monc->want_next_osdmap)
288 monc->want_next_osdmap = 1;
289 if (monc->want_next_osdmap < 2)
290 __send_subscribe(monc);
291 mutex_unlock(&monc->mutex);
292}
293
294/*
295 *
296 */
297int ceph_monc_open_session(struct ceph_mon_client *monc)
298{
299 if (!monc->con) {
300 monc->con = kmalloc(sizeof(*monc->con), GFP_KERNEL);
301 if (!monc->con)
302 return -ENOMEM;
303 ceph_con_init(monc->client->msgr, monc->con);
304 monc->con->private = monc;
305 monc->con->ops = &mon_con_ops;
306 }
307
308 mutex_lock(&monc->mutex);
309 __open_session(monc);
310 __schedule_delayed(monc);
311 mutex_unlock(&monc->mutex);
312 return 0;
313}
314
315/*
316 * The monitor responds with mount ack indicate mount success. The
317 * included client ticket allows the client to talk to MDSs and OSDs.
318 */
319static void ceph_monc_handle_map(struct ceph_mon_client *monc,
320 struct ceph_msg *msg)
321{
322 struct ceph_client *client = monc->client;
323 struct ceph_monmap *monmap = NULL, *old = monc->monmap;
324 void *p, *end;
325
326 mutex_lock(&monc->mutex);
327
328 dout("handle_monmap\n");
329 p = msg->front.iov_base;
330 end = p + msg->front.iov_len;
331
332 monmap = ceph_monmap_decode(p, end);
333 if (IS_ERR(monmap)) {
334 pr_err("problem decoding monmap, %d\n",
335 (int)PTR_ERR(monmap));
336 goto out;
337 }
338
339 if (ceph_check_fsid(monc->client, &monmap->fsid) < 0) {
340 kfree(monmap);
341 goto out;
342 }
343
344 client->monc.monmap = monmap;
345 kfree(old);
346
347out:
348 mutex_unlock(&monc->mutex);
349 wake_up(&client->auth_wq);
350}
351
352/*
353 * statfs
354 */
355static struct ceph_mon_statfs_request *__lookup_statfs(
356 struct ceph_mon_client *monc, u64 tid)
357{
358 struct ceph_mon_statfs_request *req;
359 struct rb_node *n = monc->statfs_request_tree.rb_node;
360
361 while (n) {
362 req = rb_entry(n, struct ceph_mon_statfs_request, node);
363 if (tid < req->tid)
364 n = n->rb_left;
365 else if (tid > req->tid)
366 n = n->rb_right;
367 else
368 return req;
369 }
370 return NULL;
371}
372
373static void __insert_statfs(struct ceph_mon_client *monc,
374 struct ceph_mon_statfs_request *new)
375{
376 struct rb_node **p = &monc->statfs_request_tree.rb_node;
377 struct rb_node *parent = NULL;
378 struct ceph_mon_statfs_request *req = NULL;
379
380 while (*p) {
381 parent = *p;
382 req = rb_entry(parent, struct ceph_mon_statfs_request, node);
383 if (new->tid < req->tid)
384 p = &(*p)->rb_left;
385 else if (new->tid > req->tid)
386 p = &(*p)->rb_right;
387 else
388 BUG();
389 }
390
391 rb_link_node(&new->node, parent, p);
392 rb_insert_color(&new->node, &monc->statfs_request_tree);
393}
394
395static void handle_statfs_reply(struct ceph_mon_client *monc,
396 struct ceph_msg *msg)
397{
398 struct ceph_mon_statfs_request *req;
399 struct ceph_mon_statfs_reply *reply = msg->front.iov_base;
400 u64 tid;
401
402 if (msg->front.iov_len != sizeof(*reply))
403 goto bad;
404 tid = le64_to_cpu(msg->hdr.tid);
405 dout("handle_statfs_reply %p tid %llu\n", msg, tid);
406
407 mutex_lock(&monc->mutex);
408 req = __lookup_statfs(monc, tid);
409 if (req) {
410 *req->buf = reply->st;
411 req->result = 0;
412 }
413 mutex_unlock(&monc->mutex);
414 if (req)
415 complete(&req->completion);
416 return;
417
418bad:
419 pr_err("corrupt statfs reply, no tid\n");
420 ceph_msg_dump(msg);
421}
422
423/*
424 * (re)send a statfs request
425 */
426static int send_statfs(struct ceph_mon_client *monc,
427 struct ceph_mon_statfs_request *req)
428{
429 struct ceph_msg *msg;
430 struct ceph_mon_statfs *h;
431
432 dout("send_statfs tid %llu\n", req->tid);
433 msg = ceph_msg_new(CEPH_MSG_STATFS, sizeof(*h), 0, 0, NULL);
434 if (IS_ERR(msg))
435 return PTR_ERR(msg);
436 req->request = msg;
437 msg->hdr.tid = cpu_to_le64(req->tid);
438 h = msg->front.iov_base;
439 h->monhdr.have_version = 0;
440 h->monhdr.session_mon = cpu_to_le16(-1);
441 h->monhdr.session_mon_tid = 0;
442 h->fsid = monc->monmap->fsid;
443 ceph_con_send(monc->con, msg);
444 return 0;
445}
446
447/*
448 * Do a synchronous statfs().
449 */
450int ceph_monc_do_statfs(struct ceph_mon_client *monc, struct ceph_statfs *buf)
451{
452 struct ceph_mon_statfs_request req;
453 int err;
454
455 req.buf = buf;
456 init_completion(&req.completion);
457
458 /* allocate memory for reply */
459 err = ceph_msgpool_resv(&monc->msgpool_statfs_reply, 1);
460 if (err)
461 return err;
462
463 /* register request */
464 mutex_lock(&monc->mutex);
465 req.tid = ++monc->last_tid;
466 req.last_attempt = jiffies;
467 req.delay = BASE_DELAY_INTERVAL;
468 __insert_statfs(monc, &req);
469 monc->num_statfs_requests++;
470 mutex_unlock(&monc->mutex);
471
472 /* send request and wait */
473 err = send_statfs(monc, &req);
474 if (!err)
475 err = wait_for_completion_interruptible(&req.completion);
476
477 mutex_lock(&monc->mutex);
478 rb_erase(&req.node, &monc->statfs_request_tree);
479 monc->num_statfs_requests--;
480 ceph_msgpool_resv(&monc->msgpool_statfs_reply, -1);
481 mutex_unlock(&monc->mutex);
482
483 if (!err)
484 err = req.result;
485 return err;
486}
487
488/*
489 * Resend pending statfs requests.
490 */
491static void __resend_statfs(struct ceph_mon_client *monc)
492{
493 struct ceph_mon_statfs_request *req;
494 struct rb_node *p;
495
496 for (p = rb_first(&monc->statfs_request_tree); p; p = rb_next(p)) {
497 req = rb_entry(p, struct ceph_mon_statfs_request, node);
498 send_statfs(monc, req);
499 }
500}
501
502/*
503 * Delayed work. If we haven't mounted yet, retry. Otherwise,
504 * renew/retry subscription as needed (in case it is timing out, or we
505 * got an ENOMEM). And keep the monitor connection alive.
506 */
507static void delayed_work(struct work_struct *work)
508{
509 struct ceph_mon_client *monc =
510 container_of(work, struct ceph_mon_client, delayed_work.work);
511
512 dout("monc delayed_work\n");
513 mutex_lock(&monc->mutex);
514 if (monc->hunting) {
515 __close_session(monc);
516 __open_session(monc); /* continue hunting */
517 } else {
518 ceph_con_keepalive(monc->con);
519
520 __validate_auth(monc);
521
522 if (monc->auth->ops->is_authenticated(monc->auth))
523 __send_subscribe(monc);
524 }
525 __schedule_delayed(monc);
526 mutex_unlock(&monc->mutex);
527}
528
529/*
530 * On startup, we build a temporary monmap populated with the IPs
531 * provided by mount(2).
532 */
533static int build_initial_monmap(struct ceph_mon_client *monc)
534{
535 struct ceph_mount_args *args = monc->client->mount_args;
536 struct ceph_entity_addr *mon_addr = args->mon_addr;
537 int num_mon = args->num_mon;
538 int i;
539
540 /* build initial monmap */
541 monc->monmap = kzalloc(sizeof(*monc->monmap) +
542 num_mon*sizeof(monc->monmap->mon_inst[0]),
543 GFP_KERNEL);
544 if (!monc->monmap)
545 return -ENOMEM;
546 for (i = 0; i < num_mon; i++) {
547 monc->monmap->mon_inst[i].addr = mon_addr[i];
548 monc->monmap->mon_inst[i].addr.nonce = 0;
549 monc->monmap->mon_inst[i].name.type =
550 CEPH_ENTITY_TYPE_MON;
551 monc->monmap->mon_inst[i].name.num = cpu_to_le64(i);
552 }
553 monc->monmap->num_mon = num_mon;
554 monc->have_fsid = false;
555
556 /* release addr memory */
557 kfree(args->mon_addr);
558 args->mon_addr = NULL;
559 args->num_mon = 0;
560 return 0;
561}
562
563int ceph_monc_init(struct ceph_mon_client *monc, struct ceph_client *cl)
564{
565 int err = 0;
566
567 dout("init\n");
568 memset(monc, 0, sizeof(*monc));
569 monc->client = cl;
570 monc->monmap = NULL;
571 mutex_init(&monc->mutex);
572
573 err = build_initial_monmap(monc);
574 if (err)
575 goto out;
576
577 monc->con = NULL;
578
579 /* authentication */
580 monc->auth = ceph_auth_init(cl->mount_args->name,
581 cl->mount_args->secret);
582 if (IS_ERR(monc->auth))
583 return PTR_ERR(monc->auth);
584 monc->auth->want_keys =
585 CEPH_ENTITY_TYPE_AUTH | CEPH_ENTITY_TYPE_MON |
586 CEPH_ENTITY_TYPE_OSD | CEPH_ENTITY_TYPE_MDS;
587
588 /* msg pools */
589 err = ceph_msgpool_init(&monc->msgpool_subscribe_ack,
590 sizeof(struct ceph_mon_subscribe_ack), 1, false);
591 if (err < 0)
592 goto out_monmap;
593 err = ceph_msgpool_init(&monc->msgpool_statfs_reply,
594 sizeof(struct ceph_mon_statfs_reply), 0, false);
595 if (err < 0)
596 goto out_pool1;
597 err = ceph_msgpool_init(&monc->msgpool_auth_reply, 4096, 1, false);
598 if (err < 0)
599 goto out_pool2;
600
601 monc->m_auth = ceph_msg_new(CEPH_MSG_AUTH, 4096, 0, 0, NULL);
602 monc->pending_auth = 0;
603 if (IS_ERR(monc->m_auth)) {
604 err = PTR_ERR(monc->m_auth);
605 monc->m_auth = NULL;
606 goto out_pool3;
607 }
608
609 monc->cur_mon = -1;
610 monc->hunting = true;
611 monc->sub_renew_after = jiffies;
612 monc->sub_sent = 0;
613
614 INIT_DELAYED_WORK(&monc->delayed_work, delayed_work);
615 monc->statfs_request_tree = RB_ROOT;
616 monc->num_statfs_requests = 0;
617 monc->last_tid = 0;
618
619 monc->have_mdsmap = 0;
620 monc->have_osdmap = 0;
621 monc->want_next_osdmap = 1;
622 return 0;
623
624out_pool3:
625 ceph_msgpool_destroy(&monc->msgpool_auth_reply);
626out_pool2:
627 ceph_msgpool_destroy(&monc->msgpool_subscribe_ack);
628out_pool1:
629 ceph_msgpool_destroy(&monc->msgpool_statfs_reply);
630out_monmap:
631 kfree(monc->monmap);
632out:
633 return err;
634}
635
636void ceph_monc_stop(struct ceph_mon_client *monc)
637{
638 dout("stop\n");
639 cancel_delayed_work_sync(&monc->delayed_work);
640
641 mutex_lock(&monc->mutex);
642 __close_session(monc);
643 if (monc->con) {
644 monc->con->private = NULL;
645 monc->con->ops->put(monc->con);
646 monc->con = NULL;
647 }
648 mutex_unlock(&monc->mutex);
649
650 ceph_auth_destroy(monc->auth);
651
652 ceph_msg_put(monc->m_auth);
653 ceph_msgpool_destroy(&monc->msgpool_subscribe_ack);
654 ceph_msgpool_destroy(&monc->msgpool_statfs_reply);
655 ceph_msgpool_destroy(&monc->msgpool_auth_reply);
656
657 kfree(monc->monmap);
658}
659
660static void handle_auth_reply(struct ceph_mon_client *monc,
661 struct ceph_msg *msg)
662{
663 int ret;
664
665 mutex_lock(&monc->mutex);
666 monc->pending_auth = 0;
667 ret = ceph_handle_auth_reply(monc->auth, msg->front.iov_base,
668 msg->front.iov_len,
669 monc->m_auth->front.iov_base,
670 monc->m_auth->front_max);
671 if (ret < 0) {
672 monc->client->auth_err = ret;
673 wake_up(&monc->client->auth_wq);
674 } else if (ret > 0) {
675 __send_prepared_auth_request(monc, ret);
676 } else if (monc->auth->ops->is_authenticated(monc->auth)) {
677 dout("authenticated, starting session\n");
678
679 monc->client->msgr->inst.name.type = CEPH_ENTITY_TYPE_CLIENT;
680 monc->client->msgr->inst.name.num = monc->auth->global_id;
681
682 __send_subscribe(monc);
683 __resend_statfs(monc);
684 }
685 mutex_unlock(&monc->mutex);
686}
687
688static int __validate_auth(struct ceph_mon_client *monc)
689{
690 int ret;
691
692 if (monc->pending_auth)
693 return 0;
694
695 ret = ceph_build_auth(monc->auth, monc->m_auth->front.iov_base,
696 monc->m_auth->front_max);
697 if (ret <= 0)
698 return ret; /* either an error, or no need to authenticate */
699 __send_prepared_auth_request(monc, ret);
700 return 0;
701}
702
703int ceph_monc_validate_auth(struct ceph_mon_client *monc)
704{
705 int ret;
706
707 mutex_lock(&monc->mutex);
708 ret = __validate_auth(monc);
709 mutex_unlock(&monc->mutex);
710 return ret;
711}
712
713/*
714 * handle incoming message
715 */
716static void dispatch(struct ceph_connection *con, struct ceph_msg *msg)
717{
718 struct ceph_mon_client *monc = con->private;
719 int type = le16_to_cpu(msg->hdr.type);
720
721 if (!monc)
722 return;
723
724 switch (type) {
725 case CEPH_MSG_AUTH_REPLY:
726 handle_auth_reply(monc, msg);
727 break;
728
729 case CEPH_MSG_MON_SUBSCRIBE_ACK:
730 handle_subscribe_ack(monc, msg);
731 break;
732
733 case CEPH_MSG_STATFS_REPLY:
734 handle_statfs_reply(monc, msg);
735 break;
736
737 case CEPH_MSG_MON_MAP:
738 ceph_monc_handle_map(monc, msg);
739 break;
740
741 case CEPH_MSG_MDS_MAP:
742 ceph_mdsc_handle_map(&monc->client->mdsc, msg);
743 break;
744
745 case CEPH_MSG_OSD_MAP:
746 ceph_osdc_handle_map(&monc->client->osdc, msg);
747 break;
748
749 default:
750 pr_err("received unknown message type %d %s\n", type,
751 ceph_msg_type_name(type));
752 }
753 ceph_msg_put(msg);
754}
755
756/*
757 * Allocate memory for incoming message
758 */
759static struct ceph_msg *mon_alloc_msg(struct ceph_connection *con,
760 struct ceph_msg_header *hdr,
761 int *skip)
762{
763 struct ceph_mon_client *monc = con->private;
764 int type = le16_to_cpu(hdr->type);
765 int front_len = le32_to_cpu(hdr->front_len);
766 struct ceph_msg *m = NULL;
767
768 *skip = 0;
769
770 switch (type) {
771 case CEPH_MSG_MON_SUBSCRIBE_ACK:
772 m = ceph_msgpool_get(&monc->msgpool_subscribe_ack, front_len);
773 break;
774 case CEPH_MSG_STATFS_REPLY:
775 m = ceph_msgpool_get(&monc->msgpool_statfs_reply, front_len);
776 break;
777 case CEPH_MSG_AUTH_REPLY:
778 m = ceph_msgpool_get(&monc->msgpool_auth_reply, front_len);
779 break;
780 case CEPH_MSG_MON_MAP:
781 case CEPH_MSG_MDS_MAP:
782 case CEPH_MSG_OSD_MAP:
783 m = ceph_msg_new(type, front_len, 0, 0, NULL);
784 break;
785 }
786
787 if (!m) {
788 pr_info("alloc_msg unknown type %d\n", type);
789 *skip = 1;
790 }
791 return m;
792}
793
794/*
795 * If the monitor connection resets, pick a new monitor and resubmit
796 * any pending requests.
797 */
798static void mon_fault(struct ceph_connection *con)
799{
800 struct ceph_mon_client *monc = con->private;
801
802 if (!monc)
803 return;
804
805 dout("mon_fault\n");
806 mutex_lock(&monc->mutex);
807 if (!con->private)
808 goto out;
809
810 if (monc->con && !monc->hunting)
811 pr_info("mon%d %s session lost, "
812 "hunting for new mon\n", monc->cur_mon,
813 pr_addr(&monc->con->peer_addr.in_addr));
814
815 __close_session(monc);
816 if (!monc->hunting) {
817 /* start hunting */
818 monc->hunting = true;
819 __open_session(monc);
820 } else {
821 /* already hunting, let's wait a bit */
822 __schedule_delayed(monc);
823 }
824out:
825 mutex_unlock(&monc->mutex);
826}
827
828const static struct ceph_connection_operations mon_con_ops = {
829 .get = ceph_con_get,
830 .put = ceph_con_put,
831 .dispatch = dispatch,
832 .fault = mon_fault,
833 .alloc_msg = mon_alloc_msg,
834};
diff --git a/fs/ceph/mon_client.h b/fs/ceph/mon_client.h
new file mode 100644
index 000000000000..b958ad5afa06
--- /dev/null
+++ b/fs/ceph/mon_client.h
@@ -0,0 +1,119 @@
1#ifndef _FS_CEPH_MON_CLIENT_H
2#define _FS_CEPH_MON_CLIENT_H
3
4#include <linux/completion.h>
5#include <linux/rbtree.h>
6
7#include "messenger.h"
8#include "msgpool.h"
9
10struct ceph_client;
11struct ceph_mount_args;
12struct ceph_auth_client;
13
14/*
15 * The monitor map enumerates the set of all monitors.
16 */
17struct ceph_monmap {
18 struct ceph_fsid fsid;
19 u32 epoch;
20 u32 num_mon;
21 struct ceph_entity_inst mon_inst[0];
22};
23
24struct ceph_mon_client;
25struct ceph_mon_statfs_request;
26
27
28/*
29 * Generic mechanism for resending monitor requests.
30 */
31typedef void (*ceph_monc_request_func_t)(struct ceph_mon_client *monc,
32 int newmon);
33
34/* a pending monitor request */
35struct ceph_mon_request {
36 struct ceph_mon_client *monc;
37 struct delayed_work delayed_work;
38 unsigned long delay;
39 ceph_monc_request_func_t do_request;
40};
41
42/*
43 * statfs() is done a bit differently because we need to get data back
44 * to the caller
45 */
46struct ceph_mon_statfs_request {
47 u64 tid;
48 struct rb_node node;
49 int result;
50 struct ceph_statfs *buf;
51 struct completion completion;
52 unsigned long last_attempt, delay; /* jiffies */
53 struct ceph_msg *request; /* original request */
54};
55
56struct ceph_mon_client {
57 struct ceph_client *client;
58 struct ceph_monmap *monmap;
59
60 struct mutex mutex;
61 struct delayed_work delayed_work;
62
63 struct ceph_auth_client *auth;
64 struct ceph_msg *m_auth;
65 int pending_auth;
66
67 bool hunting;
68 int cur_mon; /* last monitor i contacted */
69 unsigned long sub_sent, sub_renew_after;
70 struct ceph_connection *con;
71 bool have_fsid;
72
73 /* msg pools */
74 struct ceph_msgpool msgpool_subscribe_ack;
75 struct ceph_msgpool msgpool_statfs_reply;
76 struct ceph_msgpool msgpool_auth_reply;
77
78 /* pending statfs requests */
79 struct rb_root statfs_request_tree;
80 int num_statfs_requests;
81 u64 last_tid;
82
83 /* mds/osd map */
84 int want_next_osdmap; /* 1 = want, 2 = want+asked */
85 u32 have_osdmap, have_mdsmap;
86
87#ifdef CONFIG_DEBUG_FS
88 struct dentry *debugfs_file;
89#endif
90};
91
92extern struct ceph_monmap *ceph_monmap_decode(void *p, void *end);
93extern int ceph_monmap_contains(struct ceph_monmap *m,
94 struct ceph_entity_addr *addr);
95
96extern int ceph_monc_init(struct ceph_mon_client *monc, struct ceph_client *cl);
97extern void ceph_monc_stop(struct ceph_mon_client *monc);
98
99/*
100 * The model here is to indicate that we need a new map of at least
101 * epoch @want, and also call in when we receive a map. We will
102 * periodically rerequest the map from the monitor cluster until we
103 * get what we want.
104 */
105extern int ceph_monc_got_mdsmap(struct ceph_mon_client *monc, u32 have);
106extern int ceph_monc_got_osdmap(struct ceph_mon_client *monc, u32 have);
107
108extern void ceph_monc_request_next_osdmap(struct ceph_mon_client *monc);
109
110extern int ceph_monc_do_statfs(struct ceph_mon_client *monc,
111 struct ceph_statfs *buf);
112
113extern int ceph_monc_open_session(struct ceph_mon_client *monc);
114
115extern int ceph_monc_validate_auth(struct ceph_mon_client *monc);
116
117
118
119#endif
diff --git a/fs/ceph/msgpool.c b/fs/ceph/msgpool.c
new file mode 100644
index 000000000000..ca3b44a89f2d
--- /dev/null
+++ b/fs/ceph/msgpool.c
@@ -0,0 +1,186 @@
1#include "ceph_debug.h"
2
3#include <linux/err.h>
4#include <linux/sched.h>
5#include <linux/types.h>
6#include <linux/vmalloc.h>
7
8#include "msgpool.h"
9
10/*
11 * We use msg pools to preallocate memory for messages we expect to
12 * receive over the wire, to avoid getting ourselves into OOM
13 * conditions at unexpected times. We take use a few different
14 * strategies:
15 *
16 * - for request/response type interactions, we preallocate the
17 * memory needed for the response when we generate the request.
18 *
19 * - for messages we can receive at any time from the MDS, we preallocate
20 * a pool of messages we can re-use.
21 *
22 * - for writeback, we preallocate some number of messages to use for
23 * requests and their replies, so that we always make forward
24 * progress.
25 *
26 * The msgpool behaves like a mempool_t, but keeps preallocated
27 * ceph_msgs strung together on a list_head instead of using a pointer
28 * vector. This avoids vector reallocation when we adjust the number
29 * of preallocated items (which happens frequently).
30 */
31
32
33/*
34 * Allocate or release as necessary to meet our target pool size.
35 */
36static int __fill_msgpool(struct ceph_msgpool *pool)
37{
38 struct ceph_msg *msg;
39
40 while (pool->num < pool->min) {
41 dout("fill_msgpool %p %d/%d allocating\n", pool, pool->num,
42 pool->min);
43 spin_unlock(&pool->lock);
44 msg = ceph_msg_new(0, pool->front_len, 0, 0, NULL);
45 spin_lock(&pool->lock);
46 if (IS_ERR(msg))
47 return PTR_ERR(msg);
48 msg->pool = pool;
49 list_add(&msg->list_head, &pool->msgs);
50 pool->num++;
51 }
52 while (pool->num > pool->min) {
53 msg = list_first_entry(&pool->msgs, struct ceph_msg, list_head);
54 dout("fill_msgpool %p %d/%d releasing %p\n", pool, pool->num,
55 pool->min, msg);
56 list_del_init(&msg->list_head);
57 pool->num--;
58 ceph_msg_kfree(msg);
59 }
60 return 0;
61}
62
63int ceph_msgpool_init(struct ceph_msgpool *pool,
64 int front_len, int min, bool blocking)
65{
66 int ret;
67
68 dout("msgpool_init %p front_len %d min %d\n", pool, front_len, min);
69 spin_lock_init(&pool->lock);
70 pool->front_len = front_len;
71 INIT_LIST_HEAD(&pool->msgs);
72 pool->num = 0;
73 pool->min = min;
74 pool->blocking = blocking;
75 init_waitqueue_head(&pool->wait);
76
77 spin_lock(&pool->lock);
78 ret = __fill_msgpool(pool);
79 spin_unlock(&pool->lock);
80 return ret;
81}
82
83void ceph_msgpool_destroy(struct ceph_msgpool *pool)
84{
85 dout("msgpool_destroy %p\n", pool);
86 spin_lock(&pool->lock);
87 pool->min = 0;
88 __fill_msgpool(pool);
89 spin_unlock(&pool->lock);
90}
91
92int ceph_msgpool_resv(struct ceph_msgpool *pool, int delta)
93{
94 int ret;
95
96 spin_lock(&pool->lock);
97 dout("msgpool_resv %p delta %d\n", pool, delta);
98 pool->min += delta;
99 ret = __fill_msgpool(pool);
100 spin_unlock(&pool->lock);
101 return ret;
102}
103
104struct ceph_msg *ceph_msgpool_get(struct ceph_msgpool *pool, int front_len)
105{
106 wait_queue_t wait;
107 struct ceph_msg *msg;
108
109 if (front_len && front_len > pool->front_len) {
110 pr_err("msgpool_get pool %p need front %d, pool size is %d\n",
111 pool, front_len, pool->front_len);
112 WARN_ON(1);
113
114 /* try to alloc a fresh message */
115 msg = ceph_msg_new(0, front_len, 0, 0, NULL);
116 if (!IS_ERR(msg))
117 return msg;
118 }
119
120 if (!front_len)
121 front_len = pool->front_len;
122
123 if (pool->blocking) {
124 /* mempool_t behavior; first try to alloc */
125 msg = ceph_msg_new(0, front_len, 0, 0, NULL);
126 if (!IS_ERR(msg))
127 return msg;
128 }
129
130 while (1) {
131 spin_lock(&pool->lock);
132 if (likely(pool->num)) {
133 msg = list_entry(pool->msgs.next, struct ceph_msg,
134 list_head);
135 list_del_init(&msg->list_head);
136 pool->num--;
137 dout("msgpool_get %p got %p, now %d/%d\n", pool, msg,
138 pool->num, pool->min);
139 spin_unlock(&pool->lock);
140 return msg;
141 }
142 pr_err("msgpool_get %p now %d/%d, %s\n", pool, pool->num,
143 pool->min, pool->blocking ? "waiting" : "may fail");
144 spin_unlock(&pool->lock);
145
146 if (!pool->blocking) {
147 WARN_ON(1);
148
149 /* maybe we can allocate it now? */
150 msg = ceph_msg_new(0, front_len, 0, 0, NULL);
151 if (!IS_ERR(msg))
152 return msg;
153
154 pr_err("msgpool_get %p empty + alloc failed\n", pool);
155 return ERR_PTR(-ENOMEM);
156 }
157
158 init_wait(&wait);
159 prepare_to_wait(&pool->wait, &wait, TASK_UNINTERRUPTIBLE);
160 schedule();
161 finish_wait(&pool->wait, &wait);
162 }
163}
164
165void ceph_msgpool_put(struct ceph_msgpool *pool, struct ceph_msg *msg)
166{
167 spin_lock(&pool->lock);
168 if (pool->num < pool->min) {
169 /* reset msg front_len; user may have changed it */
170 msg->front.iov_len = pool->front_len;
171 msg->hdr.front_len = cpu_to_le32(pool->front_len);
172
173 kref_set(&msg->kref, 1); /* retake a single ref */
174 list_add(&msg->list_head, &pool->msgs);
175 pool->num++;
176 dout("msgpool_put %p reclaim %p, now %d/%d\n", pool, msg,
177 pool->num, pool->min);
178 spin_unlock(&pool->lock);
179 wake_up(&pool->wait);
180 } else {
181 dout("msgpool_put %p drop %p, at %d/%d\n", pool, msg,
182 pool->num, pool->min);
183 spin_unlock(&pool->lock);
184 ceph_msg_kfree(msg);
185 }
186}
diff --git a/fs/ceph/msgpool.h b/fs/ceph/msgpool.h
new file mode 100644
index 000000000000..bc834bfcd720
--- /dev/null
+++ b/fs/ceph/msgpool.h
@@ -0,0 +1,27 @@
1#ifndef _FS_CEPH_MSGPOOL
2#define _FS_CEPH_MSGPOOL
3
4#include "messenger.h"
5
6/*
7 * we use memory pools for preallocating messages we may receive, to
8 * avoid unexpected OOM conditions.
9 */
10struct ceph_msgpool {
11 spinlock_t lock;
12 int front_len; /* preallocated payload size */
13 struct list_head msgs; /* msgs in the pool; each has 1 ref */
14 int num, min; /* cur, min # msgs in the pool */
15 bool blocking;
16 wait_queue_head_t wait;
17};
18
19extern int ceph_msgpool_init(struct ceph_msgpool *pool,
20 int front_len, int size, bool blocking);
21extern void ceph_msgpool_destroy(struct ceph_msgpool *pool);
22extern int ceph_msgpool_resv(struct ceph_msgpool *, int delta);
23extern struct ceph_msg *ceph_msgpool_get(struct ceph_msgpool *,
24 int front_len);
25extern void ceph_msgpool_put(struct ceph_msgpool *, struct ceph_msg *);
26
27#endif
diff --git a/fs/ceph/msgr.h b/fs/ceph/msgr.h
new file mode 100644
index 000000000000..8aaab414f3f8
--- /dev/null
+++ b/fs/ceph/msgr.h
@@ -0,0 +1,158 @@
1#ifndef __MSGR_H
2#define __MSGR_H
3
4/*
5 * Data types for message passing layer used by Ceph.
6 */
7
8#define CEPH_MON_PORT 6789 /* default monitor port */
9
10/*
11 * client-side processes will try to bind to ports in this
12 * range, simply for the benefit of tools like nmap or wireshark
13 * that would like to identify the protocol.
14 */
15#define CEPH_PORT_FIRST 6789
16#define CEPH_PORT_START 6800 /* non-monitors start here */
17#define CEPH_PORT_LAST 6900
18
19/*
20 * tcp connection banner. include a protocol version. and adjust
21 * whenever the wire protocol changes. try to keep this string length
22 * constant.
23 */
24#define CEPH_BANNER "ceph v027"
25#define CEPH_BANNER_MAX_LEN 30
26
27
28/*
29 * Rollover-safe type and comparator for 32-bit sequence numbers.
30 * Comparator returns -1, 0, or 1.
31 */
32typedef __u32 ceph_seq_t;
33
34static inline __s32 ceph_seq_cmp(__u32 a, __u32 b)
35{
36 return (__s32)a - (__s32)b;
37}
38
39
40/*
41 * entity_name -- logical name for a process participating in the
42 * network, e.g. 'mds0' or 'osd3'.
43 */
44struct ceph_entity_name {
45 __u8 type; /* CEPH_ENTITY_TYPE_* */
46 __le64 num;
47} __attribute__ ((packed));
48
49#define CEPH_ENTITY_TYPE_MON 0x01
50#define CEPH_ENTITY_TYPE_MDS 0x02
51#define CEPH_ENTITY_TYPE_OSD 0x04
52#define CEPH_ENTITY_TYPE_CLIENT 0x08
53#define CEPH_ENTITY_TYPE_ADMIN 0x10
54#define CEPH_ENTITY_TYPE_AUTH 0x20
55
56#define CEPH_ENTITY_TYPE_ANY 0xFF
57
58extern const char *ceph_entity_type_name(int type);
59
60/*
61 * entity_addr -- network address
62 */
63struct ceph_entity_addr {
64 __le32 type;
65 __le32 nonce; /* unique id for process (e.g. pid) */
66 struct sockaddr_storage in_addr;
67} __attribute__ ((packed));
68
69struct ceph_entity_inst {
70 struct ceph_entity_name name;
71 struct ceph_entity_addr addr;
72} __attribute__ ((packed));
73
74
75/* used by message exchange protocol */
76#define CEPH_MSGR_TAG_READY 1 /* server->client: ready for messages */
77#define CEPH_MSGR_TAG_RESETSESSION 2 /* server->client: reset, try again */
78#define CEPH_MSGR_TAG_WAIT 3 /* server->client: wait for racing
79 incoming connection */
80#define CEPH_MSGR_TAG_RETRY_SESSION 4 /* server->client + cseq: try again
81 with higher cseq */
82#define CEPH_MSGR_TAG_RETRY_GLOBAL 5 /* server->client + gseq: try again
83 with higher gseq */
84#define CEPH_MSGR_TAG_CLOSE 6 /* closing pipe */
85#define CEPH_MSGR_TAG_MSG 7 /* message */
86#define CEPH_MSGR_TAG_ACK 8 /* message ack */
87#define CEPH_MSGR_TAG_KEEPALIVE 9 /* just a keepalive byte! */
88#define CEPH_MSGR_TAG_BADPROTOVER 10 /* bad protocol version */
89#define CEPH_MSGR_TAG_BADAUTHORIZER 11 /* bad authorizer */
90#define CEPH_MSGR_TAG_FEATURES 12 /* insufficient features */
91
92
93/*
94 * connection negotiation
95 */
96struct ceph_msg_connect {
97 __le64 features; /* supported feature bits */
98 __le32 host_type; /* CEPH_ENTITY_TYPE_* */
99 __le32 global_seq; /* count connections initiated by this host */
100 __le32 connect_seq; /* count connections initiated in this session */
101 __le32 protocol_version;
102 __le32 authorizer_protocol;
103 __le32 authorizer_len;
104 __u8 flags; /* CEPH_MSG_CONNECT_* */
105} __attribute__ ((packed));
106
107struct ceph_msg_connect_reply {
108 __u8 tag;
109 __le64 features; /* feature bits for this session */
110 __le32 global_seq;
111 __le32 connect_seq;
112 __le32 protocol_version;
113 __le32 authorizer_len;
114 __u8 flags;
115} __attribute__ ((packed));
116
117#define CEPH_MSG_CONNECT_LOSSY 1 /* messages i send may be safely dropped */
118
119
120/*
121 * message header
122 */
123struct ceph_msg_header {
124 __le64 seq; /* message seq# for this session */
125 __le64 tid; /* transaction id */
126 __le16 type; /* message type */
127 __le16 priority; /* priority. higher value == higher priority */
128 __le16 version; /* version of message encoding */
129
130 __le32 front_len; /* bytes in main payload */
131 __le32 middle_len;/* bytes in middle payload */
132 __le32 data_len; /* bytes of data payload */
133 __le16 data_off; /* sender: include full offset;
134 receiver: mask against ~PAGE_MASK */
135
136 struct ceph_entity_inst src, orig_src;
137 __le32 reserved;
138 __le32 crc; /* header crc32c */
139} __attribute__ ((packed));
140
141#define CEPH_MSG_PRIO_LOW 64
142#define CEPH_MSG_PRIO_DEFAULT 127
143#define CEPH_MSG_PRIO_HIGH 196
144#define CEPH_MSG_PRIO_HIGHEST 255
145
146/*
147 * follows data payload
148 */
149struct ceph_msg_footer {
150 __le32 front_crc, middle_crc, data_crc;
151 __u8 flags;
152} __attribute__ ((packed));
153
154#define CEPH_MSG_FOOTER_COMPLETE (1<<0) /* msg wasn't aborted */
155#define CEPH_MSG_FOOTER_NOCRC (1<<1) /* no data crc */
156
157
158#endif
diff --git a/fs/ceph/osd_client.c b/fs/ceph/osd_client.c
new file mode 100644
index 000000000000..dbe63db9762f
--- /dev/null
+++ b/fs/ceph/osd_client.c
@@ -0,0 +1,1537 @@
1#include "ceph_debug.h"
2
3#include <linux/err.h>
4#include <linux/highmem.h>
5#include <linux/mm.h>
6#include <linux/pagemap.h>
7#include <linux/slab.h>
8#include <linux/uaccess.h>
9
10#include "super.h"
11#include "osd_client.h"
12#include "messenger.h"
13#include "decode.h"
14#include "auth.h"
15
16#define OSD_OP_FRONT_LEN 4096
17#define OSD_OPREPLY_FRONT_LEN 512
18
19const static struct ceph_connection_operations osd_con_ops;
20static int __kick_requests(struct ceph_osd_client *osdc,
21 struct ceph_osd *kickosd);
22
23static void kick_requests(struct ceph_osd_client *osdc, struct ceph_osd *osd);
24
25/*
26 * Implement client access to distributed object storage cluster.
27 *
28 * All data objects are stored within a cluster/cloud of OSDs, or
29 * "object storage devices." (Note that Ceph OSDs have _nothing_ to
30 * do with the T10 OSD extensions to SCSI.) Ceph OSDs are simply
31 * remote daemons serving up and coordinating consistent and safe
32 * access to storage.
33 *
34 * Cluster membership and the mapping of data objects onto storage devices
35 * are described by the osd map.
36 *
37 * We keep track of pending OSD requests (read, write), resubmit
38 * requests to different OSDs when the cluster topology/data layout
39 * change, or retry the affected requests when the communications
40 * channel with an OSD is reset.
41 */
42
43/*
44 * calculate the mapping of a file extent onto an object, and fill out the
45 * request accordingly. shorten extent as necessary if it crosses an
46 * object boundary.
47 *
48 * fill osd op in request message.
49 */
50static void calc_layout(struct ceph_osd_client *osdc,
51 struct ceph_vino vino, struct ceph_file_layout *layout,
52 u64 off, u64 *plen,
53 struct ceph_osd_request *req)
54{
55 struct ceph_osd_request_head *reqhead = req->r_request->front.iov_base;
56 struct ceph_osd_op *op = (void *)(reqhead + 1);
57 u64 orig_len = *plen;
58 u64 objoff, objlen; /* extent in object */
59 u64 bno;
60
61 reqhead->snapid = cpu_to_le64(vino.snap);
62
63 /* object extent? */
64 ceph_calc_file_object_mapping(layout, off, plen, &bno,
65 &objoff, &objlen);
66 if (*plen < orig_len)
67 dout(" skipping last %llu, final file extent %llu~%llu\n",
68 orig_len - *plen, off, *plen);
69
70 sprintf(req->r_oid, "%llx.%08llx", vino.ino, bno);
71 req->r_oid_len = strlen(req->r_oid);
72
73 op->extent.offset = cpu_to_le64(objoff);
74 op->extent.length = cpu_to_le64(objlen);
75 req->r_num_pages = calc_pages_for(off, *plen);
76
77 dout("calc_layout %s (%d) %llu~%llu (%d pages)\n",
78 req->r_oid, req->r_oid_len, objoff, objlen, req->r_num_pages);
79}
80
81/*
82 * requests
83 */
84void ceph_osdc_release_request(struct kref *kref)
85{
86 struct ceph_osd_request *req = container_of(kref,
87 struct ceph_osd_request,
88 r_kref);
89
90 if (req->r_request)
91 ceph_msg_put(req->r_request);
92 if (req->r_reply)
93 ceph_msg_put(req->r_reply);
94 if (req->r_con_filling_msg) {
95 dout("release_request revoking pages %p from con %p\n",
96 req->r_pages, req->r_con_filling_msg);
97 ceph_con_revoke_message(req->r_con_filling_msg,
98 req->r_reply);
99 ceph_con_put(req->r_con_filling_msg);
100 }
101 if (req->r_own_pages)
102 ceph_release_page_vector(req->r_pages,
103 req->r_num_pages);
104 ceph_put_snap_context(req->r_snapc);
105 if (req->r_mempool)
106 mempool_free(req, req->r_osdc->req_mempool);
107 else
108 kfree(req);
109}
110
111/*
112 * build new request AND message, calculate layout, and adjust file
113 * extent as needed.
114 *
115 * if the file was recently truncated, we include information about its
116 * old and new size so that the object can be updated appropriately. (we
117 * avoid synchronously deleting truncated objects because it's slow.)
118 *
119 * if @do_sync, include a 'startsync' command so that the osd will flush
120 * data quickly.
121 */
122struct ceph_osd_request *ceph_osdc_new_request(struct ceph_osd_client *osdc,
123 struct ceph_file_layout *layout,
124 struct ceph_vino vino,
125 u64 off, u64 *plen,
126 int opcode, int flags,
127 struct ceph_snap_context *snapc,
128 int do_sync,
129 u32 truncate_seq,
130 u64 truncate_size,
131 struct timespec *mtime,
132 bool use_mempool, int num_reply)
133{
134 struct ceph_osd_request *req;
135 struct ceph_msg *msg;
136 struct ceph_osd_request_head *head;
137 struct ceph_osd_op *op;
138 void *p;
139 int num_op = 1 + do_sync;
140 size_t msg_size = sizeof(*head) + num_op*sizeof(*op);
141 int i;
142
143 if (use_mempool) {
144 req = mempool_alloc(osdc->req_mempool, GFP_NOFS);
145 memset(req, 0, sizeof(*req));
146 } else {
147 req = kzalloc(sizeof(*req), GFP_NOFS);
148 }
149 if (req == NULL)
150 return ERR_PTR(-ENOMEM);
151
152 req->r_osdc = osdc;
153 req->r_mempool = use_mempool;
154 kref_init(&req->r_kref);
155 init_completion(&req->r_completion);
156 init_completion(&req->r_safe_completion);
157 INIT_LIST_HEAD(&req->r_unsafe_item);
158 req->r_flags = flags;
159
160 WARN_ON((flags & (CEPH_OSD_FLAG_READ|CEPH_OSD_FLAG_WRITE)) == 0);
161
162 /* create reply message */
163 if (use_mempool)
164 msg = ceph_msgpool_get(&osdc->msgpool_op_reply, 0);
165 else
166 msg = ceph_msg_new(CEPH_MSG_OSD_OPREPLY,
167 OSD_OPREPLY_FRONT_LEN, 0, 0, NULL);
168 if (IS_ERR(msg)) {
169 ceph_osdc_put_request(req);
170 return ERR_PTR(PTR_ERR(msg));
171 }
172 req->r_reply = msg;
173
174 /* create request message; allow space for oid */
175 msg_size += 40;
176 if (snapc)
177 msg_size += sizeof(u64) * snapc->num_snaps;
178 if (use_mempool)
179 msg = ceph_msgpool_get(&osdc->msgpool_op, 0);
180 else
181 msg = ceph_msg_new(CEPH_MSG_OSD_OP, msg_size, 0, 0, NULL);
182 if (IS_ERR(msg)) {
183 ceph_osdc_put_request(req);
184 return ERR_PTR(PTR_ERR(msg));
185 }
186 msg->hdr.type = cpu_to_le16(CEPH_MSG_OSD_OP);
187 memset(msg->front.iov_base, 0, msg->front.iov_len);
188 head = msg->front.iov_base;
189 op = (void *)(head + 1);
190 p = (void *)(op + num_op);
191
192 req->r_request = msg;
193 req->r_snapc = ceph_get_snap_context(snapc);
194
195 head->client_inc = cpu_to_le32(1); /* always, for now. */
196 head->flags = cpu_to_le32(flags);
197 if (flags & CEPH_OSD_FLAG_WRITE)
198 ceph_encode_timespec(&head->mtime, mtime);
199 head->num_ops = cpu_to_le16(num_op);
200 op->op = cpu_to_le16(opcode);
201
202 /* calculate max write size */
203 calc_layout(osdc, vino, layout, off, plen, req);
204 req->r_file_layout = *layout; /* keep a copy */
205
206 if (flags & CEPH_OSD_FLAG_WRITE) {
207 req->r_request->hdr.data_off = cpu_to_le16(off);
208 req->r_request->hdr.data_len = cpu_to_le32(*plen);
209 op->payload_len = cpu_to_le32(*plen);
210 }
211 op->extent.truncate_size = cpu_to_le64(truncate_size);
212 op->extent.truncate_seq = cpu_to_le32(truncate_seq);
213
214 /* fill in oid */
215 head->object_len = cpu_to_le32(req->r_oid_len);
216 memcpy(p, req->r_oid, req->r_oid_len);
217 p += req->r_oid_len;
218
219 if (do_sync) {
220 op++;
221 op->op = cpu_to_le16(CEPH_OSD_OP_STARTSYNC);
222 }
223 if (snapc) {
224 head->snap_seq = cpu_to_le64(snapc->seq);
225 head->num_snaps = cpu_to_le32(snapc->num_snaps);
226 for (i = 0; i < snapc->num_snaps; i++) {
227 put_unaligned_le64(snapc->snaps[i], p);
228 p += sizeof(u64);
229 }
230 }
231
232 BUG_ON(p > msg->front.iov_base + msg->front.iov_len);
233 msg_size = p - msg->front.iov_base;
234 msg->front.iov_len = msg_size;
235 msg->hdr.front_len = cpu_to_le32(msg_size);
236 return req;
237}
238
239/*
240 * We keep osd requests in an rbtree, sorted by ->r_tid.
241 */
242static void __insert_request(struct ceph_osd_client *osdc,
243 struct ceph_osd_request *new)
244{
245 struct rb_node **p = &osdc->requests.rb_node;
246 struct rb_node *parent = NULL;
247 struct ceph_osd_request *req = NULL;
248
249 while (*p) {
250 parent = *p;
251 req = rb_entry(parent, struct ceph_osd_request, r_node);
252 if (new->r_tid < req->r_tid)
253 p = &(*p)->rb_left;
254 else if (new->r_tid > req->r_tid)
255 p = &(*p)->rb_right;
256 else
257 BUG();
258 }
259
260 rb_link_node(&new->r_node, parent, p);
261 rb_insert_color(&new->r_node, &osdc->requests);
262}
263
264static struct ceph_osd_request *__lookup_request(struct ceph_osd_client *osdc,
265 u64 tid)
266{
267 struct ceph_osd_request *req;
268 struct rb_node *n = osdc->requests.rb_node;
269
270 while (n) {
271 req = rb_entry(n, struct ceph_osd_request, r_node);
272 if (tid < req->r_tid)
273 n = n->rb_left;
274 else if (tid > req->r_tid)
275 n = n->rb_right;
276 else
277 return req;
278 }
279 return NULL;
280}
281
282static struct ceph_osd_request *
283__lookup_request_ge(struct ceph_osd_client *osdc,
284 u64 tid)
285{
286 struct ceph_osd_request *req;
287 struct rb_node *n = osdc->requests.rb_node;
288
289 while (n) {
290 req = rb_entry(n, struct ceph_osd_request, r_node);
291 if (tid < req->r_tid) {
292 if (!n->rb_left)
293 return req;
294 n = n->rb_left;
295 } else if (tid > req->r_tid) {
296 n = n->rb_right;
297 } else {
298 return req;
299 }
300 }
301 return NULL;
302}
303
304
305/*
306 * If the osd connection drops, we need to resubmit all requests.
307 */
308static void osd_reset(struct ceph_connection *con)
309{
310 struct ceph_osd *osd = con->private;
311 struct ceph_osd_client *osdc;
312
313 if (!osd)
314 return;
315 dout("osd_reset osd%d\n", osd->o_osd);
316 osdc = osd->o_osdc;
317 down_read(&osdc->map_sem);
318 kick_requests(osdc, osd);
319 up_read(&osdc->map_sem);
320}
321
322/*
323 * Track open sessions with osds.
324 */
325static struct ceph_osd *create_osd(struct ceph_osd_client *osdc)
326{
327 struct ceph_osd *osd;
328
329 osd = kzalloc(sizeof(*osd), GFP_NOFS);
330 if (!osd)
331 return NULL;
332
333 atomic_set(&osd->o_ref, 1);
334 osd->o_osdc = osdc;
335 INIT_LIST_HEAD(&osd->o_requests);
336 INIT_LIST_HEAD(&osd->o_osd_lru);
337 osd->o_incarnation = 1;
338
339 ceph_con_init(osdc->client->msgr, &osd->o_con);
340 osd->o_con.private = osd;
341 osd->o_con.ops = &osd_con_ops;
342 osd->o_con.peer_name.type = CEPH_ENTITY_TYPE_OSD;
343
344 INIT_LIST_HEAD(&osd->o_keepalive_item);
345 return osd;
346}
347
348static struct ceph_osd *get_osd(struct ceph_osd *osd)
349{
350 if (atomic_inc_not_zero(&osd->o_ref)) {
351 dout("get_osd %p %d -> %d\n", osd, atomic_read(&osd->o_ref)-1,
352 atomic_read(&osd->o_ref));
353 return osd;
354 } else {
355 dout("get_osd %p FAIL\n", osd);
356 return NULL;
357 }
358}
359
360static void put_osd(struct ceph_osd *osd)
361{
362 dout("put_osd %p %d -> %d\n", osd, atomic_read(&osd->o_ref),
363 atomic_read(&osd->o_ref) - 1);
364 if (atomic_dec_and_test(&osd->o_ref))
365 kfree(osd);
366}
367
368/*
369 * remove an osd from our map
370 */
371static void __remove_osd(struct ceph_osd_client *osdc, struct ceph_osd *osd)
372{
373 dout("__remove_osd %p\n", osd);
374 BUG_ON(!list_empty(&osd->o_requests));
375 rb_erase(&osd->o_node, &osdc->osds);
376 list_del_init(&osd->o_osd_lru);
377 ceph_con_close(&osd->o_con);
378 put_osd(osd);
379}
380
381static void __move_osd_to_lru(struct ceph_osd_client *osdc,
382 struct ceph_osd *osd)
383{
384 dout("__move_osd_to_lru %p\n", osd);
385 BUG_ON(!list_empty(&osd->o_osd_lru));
386 list_add_tail(&osd->o_osd_lru, &osdc->osd_lru);
387 osd->lru_ttl = jiffies + osdc->client->mount_args->osd_idle_ttl * HZ;
388}
389
390static void __remove_osd_from_lru(struct ceph_osd *osd)
391{
392 dout("__remove_osd_from_lru %p\n", osd);
393 if (!list_empty(&osd->o_osd_lru))
394 list_del_init(&osd->o_osd_lru);
395}
396
397static void remove_old_osds(struct ceph_osd_client *osdc, int remove_all)
398{
399 struct ceph_osd *osd, *nosd;
400
401 dout("__remove_old_osds %p\n", osdc);
402 mutex_lock(&osdc->request_mutex);
403 list_for_each_entry_safe(osd, nosd, &osdc->osd_lru, o_osd_lru) {
404 if (!remove_all && time_before(jiffies, osd->lru_ttl))
405 break;
406 __remove_osd(osdc, osd);
407 }
408 mutex_unlock(&osdc->request_mutex);
409}
410
411/*
412 * reset osd connect
413 */
414static int __reset_osd(struct ceph_osd_client *osdc, struct ceph_osd *osd)
415{
416 int ret = 0;
417
418 dout("__reset_osd %p osd%d\n", osd, osd->o_osd);
419 if (list_empty(&osd->o_requests)) {
420 __remove_osd(osdc, osd);
421 } else {
422 ceph_con_close(&osd->o_con);
423 ceph_con_open(&osd->o_con, &osdc->osdmap->osd_addr[osd->o_osd]);
424 osd->o_incarnation++;
425 }
426 return ret;
427}
428
429static void __insert_osd(struct ceph_osd_client *osdc, struct ceph_osd *new)
430{
431 struct rb_node **p = &osdc->osds.rb_node;
432 struct rb_node *parent = NULL;
433 struct ceph_osd *osd = NULL;
434
435 while (*p) {
436 parent = *p;
437 osd = rb_entry(parent, struct ceph_osd, o_node);
438 if (new->o_osd < osd->o_osd)
439 p = &(*p)->rb_left;
440 else if (new->o_osd > osd->o_osd)
441 p = &(*p)->rb_right;
442 else
443 BUG();
444 }
445
446 rb_link_node(&new->o_node, parent, p);
447 rb_insert_color(&new->o_node, &osdc->osds);
448}
449
450static struct ceph_osd *__lookup_osd(struct ceph_osd_client *osdc, int o)
451{
452 struct ceph_osd *osd;
453 struct rb_node *n = osdc->osds.rb_node;
454
455 while (n) {
456 osd = rb_entry(n, struct ceph_osd, o_node);
457 if (o < osd->o_osd)
458 n = n->rb_left;
459 else if (o > osd->o_osd)
460 n = n->rb_right;
461 else
462 return osd;
463 }
464 return NULL;
465}
466
467static void __schedule_osd_timeout(struct ceph_osd_client *osdc)
468{
469 schedule_delayed_work(&osdc->timeout_work,
470 osdc->client->mount_args->osd_keepalive_timeout * HZ);
471}
472
473static void __cancel_osd_timeout(struct ceph_osd_client *osdc)
474{
475 cancel_delayed_work(&osdc->timeout_work);
476}
477
478/*
479 * Register request, assign tid. If this is the first request, set up
480 * the timeout event.
481 */
482static void register_request(struct ceph_osd_client *osdc,
483 struct ceph_osd_request *req)
484{
485 mutex_lock(&osdc->request_mutex);
486 req->r_tid = ++osdc->last_tid;
487 req->r_request->hdr.tid = cpu_to_le64(req->r_tid);
488 INIT_LIST_HEAD(&req->r_req_lru_item);
489
490 dout("register_request %p tid %lld\n", req, req->r_tid);
491 __insert_request(osdc, req);
492 ceph_osdc_get_request(req);
493 osdc->num_requests++;
494
495 if (osdc->num_requests == 1) {
496 dout(" first request, scheduling timeout\n");
497 __schedule_osd_timeout(osdc);
498 }
499 mutex_unlock(&osdc->request_mutex);
500}
501
502/*
503 * called under osdc->request_mutex
504 */
505static void __unregister_request(struct ceph_osd_client *osdc,
506 struct ceph_osd_request *req)
507{
508 dout("__unregister_request %p tid %lld\n", req, req->r_tid);
509 rb_erase(&req->r_node, &osdc->requests);
510 osdc->num_requests--;
511
512 if (req->r_osd) {
513 /* make sure the original request isn't in flight. */
514 ceph_con_revoke(&req->r_osd->o_con, req->r_request);
515
516 list_del_init(&req->r_osd_item);
517 if (list_empty(&req->r_osd->o_requests))
518 __move_osd_to_lru(osdc, req->r_osd);
519 req->r_osd = NULL;
520 }
521
522 ceph_osdc_put_request(req);
523
524 list_del_init(&req->r_req_lru_item);
525 if (osdc->num_requests == 0) {
526 dout(" no requests, canceling timeout\n");
527 __cancel_osd_timeout(osdc);
528 }
529}
530
531/*
532 * Cancel a previously queued request message
533 */
534static void __cancel_request(struct ceph_osd_request *req)
535{
536 if (req->r_sent) {
537 ceph_con_revoke(&req->r_osd->o_con, req->r_request);
538 req->r_sent = 0;
539 }
540 list_del_init(&req->r_req_lru_item);
541}
542
543/*
544 * Pick an osd (the first 'up' osd in the pg), allocate the osd struct
545 * (as needed), and set the request r_osd appropriately. If there is
546 * no up osd, set r_osd to NULL.
547 *
548 * Return 0 if unchanged, 1 if changed, or negative on error.
549 *
550 * Caller should hold map_sem for read and request_mutex.
551 */
552static int __map_osds(struct ceph_osd_client *osdc,
553 struct ceph_osd_request *req)
554{
555 struct ceph_osd_request_head *reqhead = req->r_request->front.iov_base;
556 struct ceph_pg pgid;
557 int o = -1;
558 int err;
559
560 dout("map_osds %p tid %lld\n", req, req->r_tid);
561 err = ceph_calc_object_layout(&reqhead->layout, req->r_oid,
562 &req->r_file_layout, osdc->osdmap);
563 if (err)
564 return err;
565 pgid = reqhead->layout.ol_pgid;
566 req->r_pgid = pgid;
567
568 o = ceph_calc_pg_primary(osdc->osdmap, pgid);
569
570 if ((req->r_osd && req->r_osd->o_osd == o &&
571 req->r_sent >= req->r_osd->o_incarnation) ||
572 (req->r_osd == NULL && o == -1))
573 return 0; /* no change */
574
575 dout("map_osds tid %llu pgid %d.%x osd%d (was osd%d)\n",
576 req->r_tid, le32_to_cpu(pgid.pool), le16_to_cpu(pgid.ps), o,
577 req->r_osd ? req->r_osd->o_osd : -1);
578
579 if (req->r_osd) {
580 __cancel_request(req);
581 list_del_init(&req->r_osd_item);
582 req->r_osd = NULL;
583 }
584
585 req->r_osd = __lookup_osd(osdc, o);
586 if (!req->r_osd && o >= 0) {
587 err = -ENOMEM;
588 req->r_osd = create_osd(osdc);
589 if (!req->r_osd)
590 goto out;
591
592 dout("map_osds osd %p is osd%d\n", req->r_osd, o);
593 req->r_osd->o_osd = o;
594 req->r_osd->o_con.peer_name.num = cpu_to_le64(o);
595 __insert_osd(osdc, req->r_osd);
596
597 ceph_con_open(&req->r_osd->o_con, &osdc->osdmap->osd_addr[o]);
598 }
599
600 if (req->r_osd) {
601 __remove_osd_from_lru(req->r_osd);
602 list_add(&req->r_osd_item, &req->r_osd->o_requests);
603 }
604 err = 1; /* osd changed */
605
606out:
607 return err;
608}
609
610/*
611 * caller should hold map_sem (for read) and request_mutex
612 */
613static int __send_request(struct ceph_osd_client *osdc,
614 struct ceph_osd_request *req)
615{
616 struct ceph_osd_request_head *reqhead;
617 int err;
618
619 err = __map_osds(osdc, req);
620 if (err < 0)
621 return err;
622 if (req->r_osd == NULL) {
623 dout("send_request %p no up osds in pg\n", req);
624 ceph_monc_request_next_osdmap(&osdc->client->monc);
625 return 0;
626 }
627
628 dout("send_request %p tid %llu to osd%d flags %d\n",
629 req, req->r_tid, req->r_osd->o_osd, req->r_flags);
630
631 reqhead = req->r_request->front.iov_base;
632 reqhead->osdmap_epoch = cpu_to_le32(osdc->osdmap->epoch);
633 reqhead->flags |= cpu_to_le32(req->r_flags); /* e.g., RETRY */
634 reqhead->reassert_version = req->r_reassert_version;
635
636 req->r_sent_stamp = jiffies;
637 list_move_tail(&osdc->req_lru, &req->r_req_lru_item);
638
639 ceph_msg_get(req->r_request); /* send consumes a ref */
640 ceph_con_send(&req->r_osd->o_con, req->r_request);
641 req->r_sent = req->r_osd->o_incarnation;
642 return 0;
643}
644
645/*
646 * Timeout callback, called every N seconds when 1 or more osd
647 * requests has been active for more than N seconds. When this
648 * happens, we ping all OSDs with requests who have timed out to
649 * ensure any communications channel reset is detected. Reset the
650 * request timeouts another N seconds in the future as we go.
651 * Reschedule the timeout event another N seconds in future (unless
652 * there are no open requests).
653 */
654static void handle_timeout(struct work_struct *work)
655{
656 struct ceph_osd_client *osdc =
657 container_of(work, struct ceph_osd_client, timeout_work.work);
658 struct ceph_osd_request *req, *last_req = NULL;
659 struct ceph_osd *osd;
660 unsigned long timeout = osdc->client->mount_args->osd_timeout * HZ;
661 unsigned long keepalive =
662 osdc->client->mount_args->osd_keepalive_timeout * HZ;
663 unsigned long last_sent = 0;
664 struct rb_node *p;
665 struct list_head slow_osds;
666
667 dout("timeout\n");
668 down_read(&osdc->map_sem);
669
670 ceph_monc_request_next_osdmap(&osdc->client->monc);
671
672 mutex_lock(&osdc->request_mutex);
673 for (p = rb_first(&osdc->requests); p; p = rb_next(p)) {
674 req = rb_entry(p, struct ceph_osd_request, r_node);
675
676 if (req->r_resend) {
677 int err;
678
679 dout("osdc resending prev failed %lld\n", req->r_tid);
680 err = __send_request(osdc, req);
681 if (err)
682 dout("osdc failed again on %lld\n", req->r_tid);
683 else
684 req->r_resend = false;
685 continue;
686 }
687 }
688
689 /*
690 * reset osds that appear to be _really_ unresponsive. this
691 * is a failsafe measure.. we really shouldn't be getting to
692 * this point if the system is working properly. the monitors
693 * should mark the osd as failed and we should find out about
694 * it from an updated osd map.
695 */
696 while (!list_empty(&osdc->req_lru)) {
697 req = list_entry(osdc->req_lru.next, struct ceph_osd_request,
698 r_req_lru_item);
699
700 if (time_before(jiffies, req->r_sent_stamp + timeout))
701 break;
702
703 BUG_ON(req == last_req && req->r_sent_stamp == last_sent);
704 last_req = req;
705 last_sent = req->r_sent_stamp;
706
707 osd = req->r_osd;
708 BUG_ON(!osd);
709 pr_warning(" tid %llu timed out on osd%d, will reset osd\n",
710 req->r_tid, osd->o_osd);
711 __kick_requests(osdc, osd);
712 }
713
714 /*
715 * ping osds that are a bit slow. this ensures that if there
716 * is a break in the TCP connection we will notice, and reopen
717 * a connection with that osd (from the fault callback).
718 */
719 INIT_LIST_HEAD(&slow_osds);
720 list_for_each_entry(req, &osdc->req_lru, r_req_lru_item) {
721 if (time_before(jiffies, req->r_sent_stamp + keepalive))
722 break;
723
724 osd = req->r_osd;
725 BUG_ON(!osd);
726 dout(" tid %llu is slow, will send keepalive on osd%d\n",
727 req->r_tid, osd->o_osd);
728 list_move_tail(&osd->o_keepalive_item, &slow_osds);
729 }
730 while (!list_empty(&slow_osds)) {
731 osd = list_entry(slow_osds.next, struct ceph_osd,
732 o_keepalive_item);
733 list_del_init(&osd->o_keepalive_item);
734 ceph_con_keepalive(&osd->o_con);
735 }
736
737 __schedule_osd_timeout(osdc);
738 mutex_unlock(&osdc->request_mutex);
739
740 up_read(&osdc->map_sem);
741}
742
743static void handle_osds_timeout(struct work_struct *work)
744{
745 struct ceph_osd_client *osdc =
746 container_of(work, struct ceph_osd_client,
747 osds_timeout_work.work);
748 unsigned long delay =
749 osdc->client->mount_args->osd_idle_ttl * HZ >> 2;
750
751 dout("osds timeout\n");
752 down_read(&osdc->map_sem);
753 remove_old_osds(osdc, 0);
754 up_read(&osdc->map_sem);
755
756 schedule_delayed_work(&osdc->osds_timeout_work,
757 round_jiffies_relative(delay));
758}
759
760/*
761 * handle osd op reply. either call the callback if it is specified,
762 * or do the completion to wake up the waiting thread.
763 */
764static void handle_reply(struct ceph_osd_client *osdc, struct ceph_msg *msg,
765 struct ceph_connection *con)
766{
767 struct ceph_osd_reply_head *rhead = msg->front.iov_base;
768 struct ceph_osd_request *req;
769 u64 tid;
770 int numops, object_len, flags;
771
772 tid = le64_to_cpu(msg->hdr.tid);
773 if (msg->front.iov_len < sizeof(*rhead))
774 goto bad;
775 numops = le32_to_cpu(rhead->num_ops);
776 object_len = le32_to_cpu(rhead->object_len);
777 if (msg->front.iov_len != sizeof(*rhead) + object_len +
778 numops * sizeof(struct ceph_osd_op))
779 goto bad;
780 dout("handle_reply %p tid %llu\n", msg, tid);
781
782 /* lookup */
783 mutex_lock(&osdc->request_mutex);
784 req = __lookup_request(osdc, tid);
785 if (req == NULL) {
786 dout("handle_reply tid %llu dne\n", tid);
787 mutex_unlock(&osdc->request_mutex);
788 return;
789 }
790 ceph_osdc_get_request(req);
791 flags = le32_to_cpu(rhead->flags);
792
793 /*
794 * if this connection filled our message, drop our reference now, to
795 * avoid a (safe but slower) revoke later.
796 */
797 if (req->r_con_filling_msg == con && req->r_reply == msg) {
798 dout(" dropping con_filling_msg ref %p\n", con);
799 req->r_con_filling_msg = NULL;
800 ceph_con_put(con);
801 }
802
803 if (!req->r_got_reply) {
804 unsigned bytes;
805
806 req->r_result = le32_to_cpu(rhead->result);
807 bytes = le32_to_cpu(msg->hdr.data_len);
808 dout("handle_reply result %d bytes %d\n", req->r_result,
809 bytes);
810 if (req->r_result == 0)
811 req->r_result = bytes;
812
813 /* in case this is a write and we need to replay, */
814 req->r_reassert_version = rhead->reassert_version;
815
816 req->r_got_reply = 1;
817 } else if ((flags & CEPH_OSD_FLAG_ONDISK) == 0) {
818 dout("handle_reply tid %llu dup ack\n", tid);
819 mutex_unlock(&osdc->request_mutex);
820 goto done;
821 }
822
823 dout("handle_reply tid %llu flags %d\n", tid, flags);
824
825 /* either this is a read, or we got the safe response */
826 if ((flags & CEPH_OSD_FLAG_ONDISK) ||
827 ((flags & CEPH_OSD_FLAG_WRITE) == 0))
828 __unregister_request(osdc, req);
829
830 mutex_unlock(&osdc->request_mutex);
831
832 if (req->r_callback)
833 req->r_callback(req, msg);
834 else
835 complete(&req->r_completion);
836
837 if (flags & CEPH_OSD_FLAG_ONDISK) {
838 if (req->r_safe_callback)
839 req->r_safe_callback(req, msg);
840 complete(&req->r_safe_completion); /* fsync waiter */
841 }
842
843done:
844 ceph_osdc_put_request(req);
845 return;
846
847bad:
848 pr_err("corrupt osd_op_reply got %d %d expected %d\n",
849 (int)msg->front.iov_len, le32_to_cpu(msg->hdr.front_len),
850 (int)sizeof(*rhead));
851 ceph_msg_dump(msg);
852}
853
854
855static int __kick_requests(struct ceph_osd_client *osdc,
856 struct ceph_osd *kickosd)
857{
858 struct ceph_osd_request *req;
859 struct rb_node *p, *n;
860 int needmap = 0;
861 int err;
862
863 dout("kick_requests osd%d\n", kickosd ? kickosd->o_osd : -1);
864 if (kickosd) {
865 __reset_osd(osdc, kickosd);
866 } else {
867 for (p = rb_first(&osdc->osds); p; p = n) {
868 struct ceph_osd *osd =
869 rb_entry(p, struct ceph_osd, o_node);
870
871 n = rb_next(p);
872 if (!ceph_osd_is_up(osdc->osdmap, osd->o_osd) ||
873 memcmp(&osd->o_con.peer_addr,
874 ceph_osd_addr(osdc->osdmap,
875 osd->o_osd),
876 sizeof(struct ceph_entity_addr)) != 0)
877 __reset_osd(osdc, osd);
878 }
879 }
880
881 for (p = rb_first(&osdc->requests); p; p = rb_next(p)) {
882 req = rb_entry(p, struct ceph_osd_request, r_node);
883
884 if (req->r_resend) {
885 dout(" r_resend set on tid %llu\n", req->r_tid);
886 __cancel_request(req);
887 goto kick;
888 }
889 if (req->r_osd && kickosd == req->r_osd) {
890 __cancel_request(req);
891 goto kick;
892 }
893
894 err = __map_osds(osdc, req);
895 if (err == 0)
896 continue; /* no change */
897 if (err < 0) {
898 /*
899 * FIXME: really, we should set the request
900 * error and fail if this isn't a 'nofail'
901 * request, but that's a fair bit more
902 * complicated to do. So retry!
903 */
904 dout(" setting r_resend on %llu\n", req->r_tid);
905 req->r_resend = true;
906 continue;
907 }
908 if (req->r_osd == NULL) {
909 dout("tid %llu maps to no valid osd\n", req->r_tid);
910 needmap++; /* request a newer map */
911 continue;
912 }
913
914kick:
915 dout("kicking %p tid %llu osd%d\n", req, req->r_tid,
916 req->r_osd->o_osd);
917 req->r_flags |= CEPH_OSD_FLAG_RETRY;
918 err = __send_request(osdc, req);
919 if (err) {
920 dout(" setting r_resend on %llu\n", req->r_tid);
921 req->r_resend = true;
922 }
923 }
924
925 return needmap;
926}
927
928/*
929 * Resubmit osd requests whose osd or osd address has changed. Request
930 * a new osd map if osds are down, or we are otherwise unable to determine
931 * how to direct a request.
932 *
933 * Close connections to down osds.
934 *
935 * If @who is specified, resubmit requests for that specific osd.
936 *
937 * Caller should hold map_sem for read and request_mutex.
938 */
939static void kick_requests(struct ceph_osd_client *osdc,
940 struct ceph_osd *kickosd)
941{
942 int needmap;
943
944 mutex_lock(&osdc->request_mutex);
945 needmap = __kick_requests(osdc, kickosd);
946 mutex_unlock(&osdc->request_mutex);
947
948 if (needmap) {
949 dout("%d requests for down osds, need new map\n", needmap);
950 ceph_monc_request_next_osdmap(&osdc->client->monc);
951 }
952
953}
954/*
955 * Process updated osd map.
956 *
957 * The message contains any number of incremental and full maps, normally
958 * indicating some sort of topology change in the cluster. Kick requests
959 * off to different OSDs as needed.
960 */
961void ceph_osdc_handle_map(struct ceph_osd_client *osdc, struct ceph_msg *msg)
962{
963 void *p, *end, *next;
964 u32 nr_maps, maplen;
965 u32 epoch;
966 struct ceph_osdmap *newmap = NULL, *oldmap;
967 int err;
968 struct ceph_fsid fsid;
969
970 dout("handle_map have %u\n", osdc->osdmap ? osdc->osdmap->epoch : 0);
971 p = msg->front.iov_base;
972 end = p + msg->front.iov_len;
973
974 /* verify fsid */
975 ceph_decode_need(&p, end, sizeof(fsid), bad);
976 ceph_decode_copy(&p, &fsid, sizeof(fsid));
977 if (ceph_check_fsid(osdc->client, &fsid) < 0)
978 return;
979
980 down_write(&osdc->map_sem);
981
982 /* incremental maps */
983 ceph_decode_32_safe(&p, end, nr_maps, bad);
984 dout(" %d inc maps\n", nr_maps);
985 while (nr_maps > 0) {
986 ceph_decode_need(&p, end, 2*sizeof(u32), bad);
987 epoch = ceph_decode_32(&p);
988 maplen = ceph_decode_32(&p);
989 ceph_decode_need(&p, end, maplen, bad);
990 next = p + maplen;
991 if (osdc->osdmap && osdc->osdmap->epoch+1 == epoch) {
992 dout("applying incremental map %u len %d\n",
993 epoch, maplen);
994 newmap = osdmap_apply_incremental(&p, next,
995 osdc->osdmap,
996 osdc->client->msgr);
997 if (IS_ERR(newmap)) {
998 err = PTR_ERR(newmap);
999 goto bad;
1000 }
1001 BUG_ON(!newmap);
1002 if (newmap != osdc->osdmap) {
1003 ceph_osdmap_destroy(osdc->osdmap);
1004 osdc->osdmap = newmap;
1005 }
1006 } else {
1007 dout("ignoring incremental map %u len %d\n",
1008 epoch, maplen);
1009 }
1010 p = next;
1011 nr_maps--;
1012 }
1013 if (newmap)
1014 goto done;
1015
1016 /* full maps */
1017 ceph_decode_32_safe(&p, end, nr_maps, bad);
1018 dout(" %d full maps\n", nr_maps);
1019 while (nr_maps) {
1020 ceph_decode_need(&p, end, 2*sizeof(u32), bad);
1021 epoch = ceph_decode_32(&p);
1022 maplen = ceph_decode_32(&p);
1023 ceph_decode_need(&p, end, maplen, bad);
1024 if (nr_maps > 1) {
1025 dout("skipping non-latest full map %u len %d\n",
1026 epoch, maplen);
1027 } else if (osdc->osdmap && osdc->osdmap->epoch >= epoch) {
1028 dout("skipping full map %u len %d, "
1029 "older than our %u\n", epoch, maplen,
1030 osdc->osdmap->epoch);
1031 } else {
1032 dout("taking full map %u len %d\n", epoch, maplen);
1033 newmap = osdmap_decode(&p, p+maplen);
1034 if (IS_ERR(newmap)) {
1035 err = PTR_ERR(newmap);
1036 goto bad;
1037 }
1038 BUG_ON(!newmap);
1039 oldmap = osdc->osdmap;
1040 osdc->osdmap = newmap;
1041 if (oldmap)
1042 ceph_osdmap_destroy(oldmap);
1043 }
1044 p += maplen;
1045 nr_maps--;
1046 }
1047
1048done:
1049 downgrade_write(&osdc->map_sem);
1050 ceph_monc_got_osdmap(&osdc->client->monc, osdc->osdmap->epoch);
1051 if (newmap)
1052 kick_requests(osdc, NULL);
1053 up_read(&osdc->map_sem);
1054 return;
1055
1056bad:
1057 pr_err("osdc handle_map corrupt msg\n");
1058 ceph_msg_dump(msg);
1059 up_write(&osdc->map_sem);
1060 return;
1061}
1062
1063
1064/*
1065 * A read request prepares specific pages that data is to be read into.
1066 * When a message is being read off the wire, we call prepare_pages to
1067 * find those pages.
1068 * 0 = success, -1 failure.
1069 */
1070static int __prepare_pages(struct ceph_connection *con,
1071 struct ceph_msg_header *hdr,
1072 struct ceph_osd_request *req,
1073 u64 tid,
1074 struct ceph_msg *m)
1075{
1076 struct ceph_osd *osd = con->private;
1077 struct ceph_osd_client *osdc;
1078 int ret = -1;
1079 int data_len = le32_to_cpu(hdr->data_len);
1080 unsigned data_off = le16_to_cpu(hdr->data_off);
1081
1082 int want = calc_pages_for(data_off & ~PAGE_MASK, data_len);
1083
1084 if (!osd)
1085 return -1;
1086
1087 osdc = osd->o_osdc;
1088
1089 dout("__prepare_pages on msg %p tid %llu, has %d pages, want %d\n", m,
1090 tid, req->r_num_pages, want);
1091 if (unlikely(req->r_num_pages < want))
1092 goto out;
1093 m->pages = req->r_pages;
1094 m->nr_pages = req->r_num_pages;
1095 ret = 0; /* success */
1096out:
1097 BUG_ON(ret < 0 || m->nr_pages < want);
1098
1099 return ret;
1100}
1101
1102/*
1103 * Register request, send initial attempt.
1104 */
1105int ceph_osdc_start_request(struct ceph_osd_client *osdc,
1106 struct ceph_osd_request *req,
1107 bool nofail)
1108{
1109 int rc = 0;
1110
1111 req->r_request->pages = req->r_pages;
1112 req->r_request->nr_pages = req->r_num_pages;
1113
1114 register_request(osdc, req);
1115
1116 down_read(&osdc->map_sem);
1117 mutex_lock(&osdc->request_mutex);
1118 /*
1119 * a racing kick_requests() may have sent the message for us
1120 * while we dropped request_mutex above, so only send now if
1121 * the request still han't been touched yet.
1122 */
1123 if (req->r_sent == 0) {
1124 rc = __send_request(osdc, req);
1125 if (rc) {
1126 if (nofail) {
1127 dout("osdc_start_request failed send, "
1128 " marking %lld\n", req->r_tid);
1129 req->r_resend = true;
1130 rc = 0;
1131 } else {
1132 __unregister_request(osdc, req);
1133 }
1134 }
1135 }
1136 mutex_unlock(&osdc->request_mutex);
1137 up_read(&osdc->map_sem);
1138 return rc;
1139}
1140
1141/*
1142 * wait for a request to complete
1143 */
1144int ceph_osdc_wait_request(struct ceph_osd_client *osdc,
1145 struct ceph_osd_request *req)
1146{
1147 int rc;
1148
1149 rc = wait_for_completion_interruptible(&req->r_completion);
1150 if (rc < 0) {
1151 mutex_lock(&osdc->request_mutex);
1152 __cancel_request(req);
1153 __unregister_request(osdc, req);
1154 mutex_unlock(&osdc->request_mutex);
1155 dout("wait_request tid %llu canceled/timed out\n", req->r_tid);
1156 return rc;
1157 }
1158
1159 dout("wait_request tid %llu result %d\n", req->r_tid, req->r_result);
1160 return req->r_result;
1161}
1162
1163/*
1164 * sync - wait for all in-flight requests to flush. avoid starvation.
1165 */
1166void ceph_osdc_sync(struct ceph_osd_client *osdc)
1167{
1168 struct ceph_osd_request *req;
1169 u64 last_tid, next_tid = 0;
1170
1171 mutex_lock(&osdc->request_mutex);
1172 last_tid = osdc->last_tid;
1173 while (1) {
1174 req = __lookup_request_ge(osdc, next_tid);
1175 if (!req)
1176 break;
1177 if (req->r_tid > last_tid)
1178 break;
1179
1180 next_tid = req->r_tid + 1;
1181 if ((req->r_flags & CEPH_OSD_FLAG_WRITE) == 0)
1182 continue;
1183
1184 ceph_osdc_get_request(req);
1185 mutex_unlock(&osdc->request_mutex);
1186 dout("sync waiting on tid %llu (last is %llu)\n",
1187 req->r_tid, last_tid);
1188 wait_for_completion(&req->r_safe_completion);
1189 mutex_lock(&osdc->request_mutex);
1190 ceph_osdc_put_request(req);
1191 }
1192 mutex_unlock(&osdc->request_mutex);
1193 dout("sync done (thru tid %llu)\n", last_tid);
1194}
1195
1196/*
1197 * init, shutdown
1198 */
1199int ceph_osdc_init(struct ceph_osd_client *osdc, struct ceph_client *client)
1200{
1201 int err;
1202
1203 dout("init\n");
1204 osdc->client = client;
1205 osdc->osdmap = NULL;
1206 init_rwsem(&osdc->map_sem);
1207 init_completion(&osdc->map_waiters);
1208 osdc->last_requested_map = 0;
1209 mutex_init(&osdc->request_mutex);
1210 osdc->last_tid = 0;
1211 osdc->osds = RB_ROOT;
1212 INIT_LIST_HEAD(&osdc->osd_lru);
1213 osdc->requests = RB_ROOT;
1214 INIT_LIST_HEAD(&osdc->req_lru);
1215 osdc->num_requests = 0;
1216 INIT_DELAYED_WORK(&osdc->timeout_work, handle_timeout);
1217 INIT_DELAYED_WORK(&osdc->osds_timeout_work, handle_osds_timeout);
1218
1219 schedule_delayed_work(&osdc->osds_timeout_work,
1220 round_jiffies_relative(osdc->client->mount_args->osd_idle_ttl * HZ));
1221
1222 err = -ENOMEM;
1223 osdc->req_mempool = mempool_create_kmalloc_pool(10,
1224 sizeof(struct ceph_osd_request));
1225 if (!osdc->req_mempool)
1226 goto out;
1227
1228 err = ceph_msgpool_init(&osdc->msgpool_op, OSD_OP_FRONT_LEN, 10, true);
1229 if (err < 0)
1230 goto out_mempool;
1231 err = ceph_msgpool_init(&osdc->msgpool_op_reply,
1232 OSD_OPREPLY_FRONT_LEN, 10, true);
1233 if (err < 0)
1234 goto out_msgpool;
1235 return 0;
1236
1237out_msgpool:
1238 ceph_msgpool_destroy(&osdc->msgpool_op);
1239out_mempool:
1240 mempool_destroy(osdc->req_mempool);
1241out:
1242 return err;
1243}
1244
1245void ceph_osdc_stop(struct ceph_osd_client *osdc)
1246{
1247 cancel_delayed_work_sync(&osdc->timeout_work);
1248 cancel_delayed_work_sync(&osdc->osds_timeout_work);
1249 if (osdc->osdmap) {
1250 ceph_osdmap_destroy(osdc->osdmap);
1251 osdc->osdmap = NULL;
1252 }
1253 remove_old_osds(osdc, 1);
1254 mempool_destroy(osdc->req_mempool);
1255 ceph_msgpool_destroy(&osdc->msgpool_op);
1256 ceph_msgpool_destroy(&osdc->msgpool_op_reply);
1257}
1258
1259/*
1260 * Read some contiguous pages. If we cross a stripe boundary, shorten
1261 * *plen. Return number of bytes read, or error.
1262 */
1263int ceph_osdc_readpages(struct ceph_osd_client *osdc,
1264 struct ceph_vino vino, struct ceph_file_layout *layout,
1265 u64 off, u64 *plen,
1266 u32 truncate_seq, u64 truncate_size,
1267 struct page **pages, int num_pages)
1268{
1269 struct ceph_osd_request *req;
1270 int rc = 0;
1271
1272 dout("readpages on ino %llx.%llx on %llu~%llu\n", vino.ino,
1273 vino.snap, off, *plen);
1274 req = ceph_osdc_new_request(osdc, layout, vino, off, plen,
1275 CEPH_OSD_OP_READ, CEPH_OSD_FLAG_READ,
1276 NULL, 0, truncate_seq, truncate_size, NULL,
1277 false, 1);
1278 if (IS_ERR(req))
1279 return PTR_ERR(req);
1280
1281 /* it may be a short read due to an object boundary */
1282 req->r_pages = pages;
1283 num_pages = calc_pages_for(off, *plen);
1284 req->r_num_pages = num_pages;
1285
1286 dout("readpages final extent is %llu~%llu (%d pages)\n",
1287 off, *plen, req->r_num_pages);
1288
1289 rc = ceph_osdc_start_request(osdc, req, false);
1290 if (!rc)
1291 rc = ceph_osdc_wait_request(osdc, req);
1292
1293 ceph_osdc_put_request(req);
1294 dout("readpages result %d\n", rc);
1295 return rc;
1296}
1297
1298/*
1299 * do a synchronous write on N pages
1300 */
1301int ceph_osdc_writepages(struct ceph_osd_client *osdc, struct ceph_vino vino,
1302 struct ceph_file_layout *layout,
1303 struct ceph_snap_context *snapc,
1304 u64 off, u64 len,
1305 u32 truncate_seq, u64 truncate_size,
1306 struct timespec *mtime,
1307 struct page **pages, int num_pages,
1308 int flags, int do_sync, bool nofail)
1309{
1310 struct ceph_osd_request *req;
1311 int rc = 0;
1312
1313 BUG_ON(vino.snap != CEPH_NOSNAP);
1314 req = ceph_osdc_new_request(osdc, layout, vino, off, &len,
1315 CEPH_OSD_OP_WRITE,
1316 flags | CEPH_OSD_FLAG_ONDISK |
1317 CEPH_OSD_FLAG_WRITE,
1318 snapc, do_sync,
1319 truncate_seq, truncate_size, mtime,
1320 nofail, 1);
1321 if (IS_ERR(req))
1322 return PTR_ERR(req);
1323
1324 /* it may be a short write due to an object boundary */
1325 req->r_pages = pages;
1326 req->r_num_pages = calc_pages_for(off, len);
1327 dout("writepages %llu~%llu (%d pages)\n", off, len,
1328 req->r_num_pages);
1329
1330 rc = ceph_osdc_start_request(osdc, req, nofail);
1331 if (!rc)
1332 rc = ceph_osdc_wait_request(osdc, req);
1333
1334 ceph_osdc_put_request(req);
1335 if (rc == 0)
1336 rc = len;
1337 dout("writepages result %d\n", rc);
1338 return rc;
1339}
1340
1341/*
1342 * handle incoming message
1343 */
1344static void dispatch(struct ceph_connection *con, struct ceph_msg *msg)
1345{
1346 struct ceph_osd *osd = con->private;
1347 struct ceph_osd_client *osdc;
1348 int type = le16_to_cpu(msg->hdr.type);
1349
1350 if (!osd)
1351 return;
1352 osdc = osd->o_osdc;
1353
1354 switch (type) {
1355 case CEPH_MSG_OSD_MAP:
1356 ceph_osdc_handle_map(osdc, msg);
1357 break;
1358 case CEPH_MSG_OSD_OPREPLY:
1359 handle_reply(osdc, msg, con);
1360 break;
1361
1362 default:
1363 pr_err("received unknown message type %d %s\n", type,
1364 ceph_msg_type_name(type));
1365 }
1366 ceph_msg_put(msg);
1367}
1368
1369/*
1370 * lookup and return message for incoming reply
1371 */
1372static struct ceph_msg *get_reply(struct ceph_connection *con,
1373 struct ceph_msg_header *hdr,
1374 int *skip)
1375{
1376 struct ceph_osd *osd = con->private;
1377 struct ceph_osd_client *osdc = osd->o_osdc;
1378 struct ceph_msg *m;
1379 struct ceph_osd_request *req;
1380 int front = le32_to_cpu(hdr->front_len);
1381 int data_len = le32_to_cpu(hdr->data_len);
1382 u64 tid;
1383 int err;
1384
1385 tid = le64_to_cpu(hdr->tid);
1386 mutex_lock(&osdc->request_mutex);
1387 req = __lookup_request(osdc, tid);
1388 if (!req) {
1389 *skip = 1;
1390 m = NULL;
1391 pr_info("get_reply unknown tid %llu from osd%d\n", tid,
1392 osd->o_osd);
1393 goto out;
1394 }
1395
1396 if (req->r_con_filling_msg) {
1397 dout("get_reply revoking msg %p from old con %p\n",
1398 req->r_reply, req->r_con_filling_msg);
1399 ceph_con_revoke_message(req->r_con_filling_msg, req->r_reply);
1400 ceph_con_put(req->r_con_filling_msg);
1401 }
1402
1403 if (front > req->r_reply->front.iov_len) {
1404 pr_warning("get_reply front %d > preallocated %d\n",
1405 front, (int)req->r_reply->front.iov_len);
1406 m = ceph_msg_new(CEPH_MSG_OSD_OPREPLY, front, 0, 0, NULL);
1407 if (IS_ERR(m))
1408 goto out;
1409 ceph_msg_put(req->r_reply);
1410 req->r_reply = m;
1411 }
1412 m = ceph_msg_get(req->r_reply);
1413
1414 if (data_len > 0) {
1415 err = __prepare_pages(con, hdr, req, tid, m);
1416 if (err < 0) {
1417 *skip = 1;
1418 ceph_msg_put(m);
1419 m = ERR_PTR(err);
1420 }
1421 }
1422 *skip = 0;
1423 req->r_con_filling_msg = ceph_con_get(con);
1424 dout("get_reply tid %lld %p\n", tid, m);
1425
1426out:
1427 mutex_unlock(&osdc->request_mutex);
1428 return m;
1429
1430}
1431
1432static struct ceph_msg *alloc_msg(struct ceph_connection *con,
1433 struct ceph_msg_header *hdr,
1434 int *skip)
1435{
1436 struct ceph_osd *osd = con->private;
1437 int type = le16_to_cpu(hdr->type);
1438 int front = le32_to_cpu(hdr->front_len);
1439
1440 switch (type) {
1441 case CEPH_MSG_OSD_MAP:
1442 return ceph_msg_new(type, front, 0, 0, NULL);
1443 case CEPH_MSG_OSD_OPREPLY:
1444 return get_reply(con, hdr, skip);
1445 default:
1446 pr_info("alloc_msg unexpected msg type %d from osd%d\n", type,
1447 osd->o_osd);
1448 *skip = 1;
1449 return NULL;
1450 }
1451}
1452
1453/*
1454 * Wrappers to refcount containing ceph_osd struct
1455 */
1456static struct ceph_connection *get_osd_con(struct ceph_connection *con)
1457{
1458 struct ceph_osd *osd = con->private;
1459 if (get_osd(osd))
1460 return con;
1461 return NULL;
1462}
1463
1464static void put_osd_con(struct ceph_connection *con)
1465{
1466 struct ceph_osd *osd = con->private;
1467 put_osd(osd);
1468}
1469
1470/*
1471 * authentication
1472 */
1473static int get_authorizer(struct ceph_connection *con,
1474 void **buf, int *len, int *proto,
1475 void **reply_buf, int *reply_len, int force_new)
1476{
1477 struct ceph_osd *o = con->private;
1478 struct ceph_osd_client *osdc = o->o_osdc;
1479 struct ceph_auth_client *ac = osdc->client->monc.auth;
1480 int ret = 0;
1481
1482 if (force_new && o->o_authorizer) {
1483 ac->ops->destroy_authorizer(ac, o->o_authorizer);
1484 o->o_authorizer = NULL;
1485 }
1486 if (o->o_authorizer == NULL) {
1487 ret = ac->ops->create_authorizer(
1488 ac, CEPH_ENTITY_TYPE_OSD,
1489 &o->o_authorizer,
1490 &o->o_authorizer_buf,
1491 &o->o_authorizer_buf_len,
1492 &o->o_authorizer_reply_buf,
1493 &o->o_authorizer_reply_buf_len);
1494 if (ret)
1495 return ret;
1496 }
1497
1498 *proto = ac->protocol;
1499 *buf = o->o_authorizer_buf;
1500 *len = o->o_authorizer_buf_len;
1501 *reply_buf = o->o_authorizer_reply_buf;
1502 *reply_len = o->o_authorizer_reply_buf_len;
1503 return 0;
1504}
1505
1506
1507static int verify_authorizer_reply(struct ceph_connection *con, int len)
1508{
1509 struct ceph_osd *o = con->private;
1510 struct ceph_osd_client *osdc = o->o_osdc;
1511 struct ceph_auth_client *ac = osdc->client->monc.auth;
1512
1513 return ac->ops->verify_authorizer_reply(ac, o->o_authorizer, len);
1514}
1515
1516static int invalidate_authorizer(struct ceph_connection *con)
1517{
1518 struct ceph_osd *o = con->private;
1519 struct ceph_osd_client *osdc = o->o_osdc;
1520 struct ceph_auth_client *ac = osdc->client->monc.auth;
1521
1522 if (ac->ops->invalidate_authorizer)
1523 ac->ops->invalidate_authorizer(ac, CEPH_ENTITY_TYPE_OSD);
1524
1525 return ceph_monc_validate_auth(&osdc->client->monc);
1526}
1527
1528const static struct ceph_connection_operations osd_con_ops = {
1529 .get = get_osd_con,
1530 .put = put_osd_con,
1531 .dispatch = dispatch,
1532 .get_authorizer = get_authorizer,
1533 .verify_authorizer_reply = verify_authorizer_reply,
1534 .invalidate_authorizer = invalidate_authorizer,
1535 .alloc_msg = alloc_msg,
1536 .fault = osd_reset,
1537};
diff --git a/fs/ceph/osd_client.h b/fs/ceph/osd_client.h
new file mode 100644
index 000000000000..1b1a3ca43afc
--- /dev/null
+++ b/fs/ceph/osd_client.h
@@ -0,0 +1,166 @@
1#ifndef _FS_CEPH_OSD_CLIENT_H
2#define _FS_CEPH_OSD_CLIENT_H
3
4#include <linux/completion.h>
5#include <linux/kref.h>
6#include <linux/mempool.h>
7#include <linux/rbtree.h>
8
9#include "types.h"
10#include "osdmap.h"
11#include "messenger.h"
12
13struct ceph_msg;
14struct ceph_snap_context;
15struct ceph_osd_request;
16struct ceph_osd_client;
17struct ceph_authorizer;
18
19/*
20 * completion callback for async writepages
21 */
22typedef void (*ceph_osdc_callback_t)(struct ceph_osd_request *,
23 struct ceph_msg *);
24
25/* a given osd we're communicating with */
26struct ceph_osd {
27 atomic_t o_ref;
28 struct ceph_osd_client *o_osdc;
29 int o_osd;
30 int o_incarnation;
31 struct rb_node o_node;
32 struct ceph_connection o_con;
33 struct list_head o_requests;
34 struct list_head o_osd_lru;
35 struct ceph_authorizer *o_authorizer;
36 void *o_authorizer_buf, *o_authorizer_reply_buf;
37 size_t o_authorizer_buf_len, o_authorizer_reply_buf_len;
38 unsigned long lru_ttl;
39 int o_marked_for_keepalive;
40 struct list_head o_keepalive_item;
41};
42
43/* an in-flight request */
44struct ceph_osd_request {
45 u64 r_tid; /* unique for this client */
46 struct rb_node r_node;
47 struct list_head r_req_lru_item;
48 struct list_head r_osd_item;
49 struct ceph_osd *r_osd;
50 struct ceph_pg r_pgid;
51
52 struct ceph_connection *r_con_filling_msg;
53
54 struct ceph_msg *r_request, *r_reply;
55 int r_result;
56 int r_flags; /* any additional flags for the osd */
57 u32 r_sent; /* >0 if r_request is sending/sent */
58 int r_got_reply;
59
60 struct ceph_osd_client *r_osdc;
61 struct kref r_kref;
62 bool r_mempool;
63 struct completion r_completion, r_safe_completion;
64 ceph_osdc_callback_t r_callback, r_safe_callback;
65 struct ceph_eversion r_reassert_version;
66 struct list_head r_unsafe_item;
67
68 struct inode *r_inode; /* for use by callbacks */
69 struct writeback_control *r_wbc; /* ditto */
70
71 char r_oid[40]; /* object name */
72 int r_oid_len;
73 unsigned long r_sent_stamp;
74 bool r_resend; /* msg send failed, needs retry */
75
76 struct ceph_file_layout r_file_layout;
77 struct ceph_snap_context *r_snapc; /* snap context for writes */
78 unsigned r_num_pages; /* size of page array (follows) */
79 struct page **r_pages; /* pages for data payload */
80 int r_pages_from_pool;
81 int r_own_pages; /* if true, i own page list */
82};
83
84struct ceph_osd_client {
85 struct ceph_client *client;
86
87 struct ceph_osdmap *osdmap; /* current map */
88 struct rw_semaphore map_sem;
89 struct completion map_waiters;
90 u64 last_requested_map;
91
92 struct mutex request_mutex;
93 struct rb_root osds; /* osds */
94 struct list_head osd_lru; /* idle osds */
95 u64 timeout_tid; /* tid of timeout triggering rq */
96 u64 last_tid; /* tid of last request */
97 struct rb_root requests; /* pending requests */
98 struct list_head req_lru; /* pending requests lru */
99 int num_requests;
100 struct delayed_work timeout_work;
101 struct delayed_work osds_timeout_work;
102#ifdef CONFIG_DEBUG_FS
103 struct dentry *debugfs_file;
104#endif
105
106 mempool_t *req_mempool;
107
108 struct ceph_msgpool msgpool_op;
109 struct ceph_msgpool msgpool_op_reply;
110};
111
112extern int ceph_osdc_init(struct ceph_osd_client *osdc,
113 struct ceph_client *client);
114extern void ceph_osdc_stop(struct ceph_osd_client *osdc);
115
116extern void ceph_osdc_handle_reply(struct ceph_osd_client *osdc,
117 struct ceph_msg *msg);
118extern void ceph_osdc_handle_map(struct ceph_osd_client *osdc,
119 struct ceph_msg *msg);
120
121extern struct ceph_osd_request *ceph_osdc_new_request(struct ceph_osd_client *,
122 struct ceph_file_layout *layout,
123 struct ceph_vino vino,
124 u64 offset, u64 *len, int op, int flags,
125 struct ceph_snap_context *snapc,
126 int do_sync, u32 truncate_seq,
127 u64 truncate_size,
128 struct timespec *mtime,
129 bool use_mempool, int num_reply);
130
131static inline void ceph_osdc_get_request(struct ceph_osd_request *req)
132{
133 kref_get(&req->r_kref);
134}
135extern void ceph_osdc_release_request(struct kref *kref);
136static inline void ceph_osdc_put_request(struct ceph_osd_request *req)
137{
138 kref_put(&req->r_kref, ceph_osdc_release_request);
139}
140
141extern int ceph_osdc_start_request(struct ceph_osd_client *osdc,
142 struct ceph_osd_request *req,
143 bool nofail);
144extern int ceph_osdc_wait_request(struct ceph_osd_client *osdc,
145 struct ceph_osd_request *req);
146extern void ceph_osdc_sync(struct ceph_osd_client *osdc);
147
148extern int ceph_osdc_readpages(struct ceph_osd_client *osdc,
149 struct ceph_vino vino,
150 struct ceph_file_layout *layout,
151 u64 off, u64 *plen,
152 u32 truncate_seq, u64 truncate_size,
153 struct page **pages, int nr_pages);
154
155extern int ceph_osdc_writepages(struct ceph_osd_client *osdc,
156 struct ceph_vino vino,
157 struct ceph_file_layout *layout,
158 struct ceph_snap_context *sc,
159 u64 off, u64 len,
160 u32 truncate_seq, u64 truncate_size,
161 struct timespec *mtime,
162 struct page **pages, int nr_pages,
163 int flags, int do_sync, bool nofail);
164
165#endif
166
diff --git a/fs/ceph/osdmap.c b/fs/ceph/osdmap.c
new file mode 100644
index 000000000000..b83f2692b835
--- /dev/null
+++ b/fs/ceph/osdmap.c
@@ -0,0 +1,1019 @@
1
2#include <asm/div64.h>
3
4#include "super.h"
5#include "osdmap.h"
6#include "crush/hash.h"
7#include "crush/mapper.h"
8#include "decode.h"
9#include "ceph_debug.h"
10
11char *ceph_osdmap_state_str(char *str, int len, int state)
12{
13 int flag = 0;
14
15 if (!len)
16 goto done;
17
18 *str = '\0';
19 if (state) {
20 if (state & CEPH_OSD_EXISTS) {
21 snprintf(str, len, "exists");
22 flag = 1;
23 }
24 if (state & CEPH_OSD_UP) {
25 snprintf(str, len, "%s%s%s", str, (flag ? ", " : ""),
26 "up");
27 flag = 1;
28 }
29 } else {
30 snprintf(str, len, "doesn't exist");
31 }
32done:
33 return str;
34}
35
36/* maps */
37
38static int calc_bits_of(unsigned t)
39{
40 int b = 0;
41 while (t) {
42 t = t >> 1;
43 b++;
44 }
45 return b;
46}
47
48/*
49 * the foo_mask is the smallest value 2^n-1 that is >= foo.
50 */
51static void calc_pg_masks(struct ceph_pg_pool_info *pi)
52{
53 pi->pg_num_mask = (1 << calc_bits_of(le32_to_cpu(pi->v.pg_num)-1)) - 1;
54 pi->pgp_num_mask =
55 (1 << calc_bits_of(le32_to_cpu(pi->v.pgp_num)-1)) - 1;
56 pi->lpg_num_mask =
57 (1 << calc_bits_of(le32_to_cpu(pi->v.lpg_num)-1)) - 1;
58 pi->lpgp_num_mask =
59 (1 << calc_bits_of(le32_to_cpu(pi->v.lpgp_num)-1)) - 1;
60}
61
62/*
63 * decode crush map
64 */
65static int crush_decode_uniform_bucket(void **p, void *end,
66 struct crush_bucket_uniform *b)
67{
68 dout("crush_decode_uniform_bucket %p to %p\n", *p, end);
69 ceph_decode_need(p, end, (1+b->h.size) * sizeof(u32), bad);
70 b->item_weight = ceph_decode_32(p);
71 return 0;
72bad:
73 return -EINVAL;
74}
75
76static int crush_decode_list_bucket(void **p, void *end,
77 struct crush_bucket_list *b)
78{
79 int j;
80 dout("crush_decode_list_bucket %p to %p\n", *p, end);
81 b->item_weights = kcalloc(b->h.size, sizeof(u32), GFP_NOFS);
82 if (b->item_weights == NULL)
83 return -ENOMEM;
84 b->sum_weights = kcalloc(b->h.size, sizeof(u32), GFP_NOFS);
85 if (b->sum_weights == NULL)
86 return -ENOMEM;
87 ceph_decode_need(p, end, 2 * b->h.size * sizeof(u32), bad);
88 for (j = 0; j < b->h.size; j++) {
89 b->item_weights[j] = ceph_decode_32(p);
90 b->sum_weights[j] = ceph_decode_32(p);
91 }
92 return 0;
93bad:
94 return -EINVAL;
95}
96
97static int crush_decode_tree_bucket(void **p, void *end,
98 struct crush_bucket_tree *b)
99{
100 int j;
101 dout("crush_decode_tree_bucket %p to %p\n", *p, end);
102 ceph_decode_32_safe(p, end, b->num_nodes, bad);
103 b->node_weights = kcalloc(b->num_nodes, sizeof(u32), GFP_NOFS);
104 if (b->node_weights == NULL)
105 return -ENOMEM;
106 ceph_decode_need(p, end, b->num_nodes * sizeof(u32), bad);
107 for (j = 0; j < b->num_nodes; j++)
108 b->node_weights[j] = ceph_decode_32(p);
109 return 0;
110bad:
111 return -EINVAL;
112}
113
114static int crush_decode_straw_bucket(void **p, void *end,
115 struct crush_bucket_straw *b)
116{
117 int j;
118 dout("crush_decode_straw_bucket %p to %p\n", *p, end);
119 b->item_weights = kcalloc(b->h.size, sizeof(u32), GFP_NOFS);
120 if (b->item_weights == NULL)
121 return -ENOMEM;
122 b->straws = kcalloc(b->h.size, sizeof(u32), GFP_NOFS);
123 if (b->straws == NULL)
124 return -ENOMEM;
125 ceph_decode_need(p, end, 2 * b->h.size * sizeof(u32), bad);
126 for (j = 0; j < b->h.size; j++) {
127 b->item_weights[j] = ceph_decode_32(p);
128 b->straws[j] = ceph_decode_32(p);
129 }
130 return 0;
131bad:
132 return -EINVAL;
133}
134
135static struct crush_map *crush_decode(void *pbyval, void *end)
136{
137 struct crush_map *c;
138 int err = -EINVAL;
139 int i, j;
140 void **p = &pbyval;
141 void *start = pbyval;
142 u32 magic;
143
144 dout("crush_decode %p to %p len %d\n", *p, end, (int)(end - *p));
145
146 c = kzalloc(sizeof(*c), GFP_NOFS);
147 if (c == NULL)
148 return ERR_PTR(-ENOMEM);
149
150 ceph_decode_need(p, end, 4*sizeof(u32), bad);
151 magic = ceph_decode_32(p);
152 if (magic != CRUSH_MAGIC) {
153 pr_err("crush_decode magic %x != current %x\n",
154 (unsigned)magic, (unsigned)CRUSH_MAGIC);
155 goto bad;
156 }
157 c->max_buckets = ceph_decode_32(p);
158 c->max_rules = ceph_decode_32(p);
159 c->max_devices = ceph_decode_32(p);
160
161 c->device_parents = kcalloc(c->max_devices, sizeof(u32), GFP_NOFS);
162 if (c->device_parents == NULL)
163 goto badmem;
164 c->bucket_parents = kcalloc(c->max_buckets, sizeof(u32), GFP_NOFS);
165 if (c->bucket_parents == NULL)
166 goto badmem;
167
168 c->buckets = kcalloc(c->max_buckets, sizeof(*c->buckets), GFP_NOFS);
169 if (c->buckets == NULL)
170 goto badmem;
171 c->rules = kcalloc(c->max_rules, sizeof(*c->rules), GFP_NOFS);
172 if (c->rules == NULL)
173 goto badmem;
174
175 /* buckets */
176 for (i = 0; i < c->max_buckets; i++) {
177 int size = 0;
178 u32 alg;
179 struct crush_bucket *b;
180
181 ceph_decode_32_safe(p, end, alg, bad);
182 if (alg == 0) {
183 c->buckets[i] = NULL;
184 continue;
185 }
186 dout("crush_decode bucket %d off %x %p to %p\n",
187 i, (int)(*p-start), *p, end);
188
189 switch (alg) {
190 case CRUSH_BUCKET_UNIFORM:
191 size = sizeof(struct crush_bucket_uniform);
192 break;
193 case CRUSH_BUCKET_LIST:
194 size = sizeof(struct crush_bucket_list);
195 break;
196 case CRUSH_BUCKET_TREE:
197 size = sizeof(struct crush_bucket_tree);
198 break;
199 case CRUSH_BUCKET_STRAW:
200 size = sizeof(struct crush_bucket_straw);
201 break;
202 default:
203 err = -EINVAL;
204 goto bad;
205 }
206 BUG_ON(size == 0);
207 b = c->buckets[i] = kzalloc(size, GFP_NOFS);
208 if (b == NULL)
209 goto badmem;
210
211 ceph_decode_need(p, end, 4*sizeof(u32), bad);
212 b->id = ceph_decode_32(p);
213 b->type = ceph_decode_16(p);
214 b->alg = ceph_decode_8(p);
215 b->hash = ceph_decode_8(p);
216 b->weight = ceph_decode_32(p);
217 b->size = ceph_decode_32(p);
218
219 dout("crush_decode bucket size %d off %x %p to %p\n",
220 b->size, (int)(*p-start), *p, end);
221
222 b->items = kcalloc(b->size, sizeof(__s32), GFP_NOFS);
223 if (b->items == NULL)
224 goto badmem;
225 b->perm = kcalloc(b->size, sizeof(u32), GFP_NOFS);
226 if (b->perm == NULL)
227 goto badmem;
228 b->perm_n = 0;
229
230 ceph_decode_need(p, end, b->size*sizeof(u32), bad);
231 for (j = 0; j < b->size; j++)
232 b->items[j] = ceph_decode_32(p);
233
234 switch (b->alg) {
235 case CRUSH_BUCKET_UNIFORM:
236 err = crush_decode_uniform_bucket(p, end,
237 (struct crush_bucket_uniform *)b);
238 if (err < 0)
239 goto bad;
240 break;
241 case CRUSH_BUCKET_LIST:
242 err = crush_decode_list_bucket(p, end,
243 (struct crush_bucket_list *)b);
244 if (err < 0)
245 goto bad;
246 break;
247 case CRUSH_BUCKET_TREE:
248 err = crush_decode_tree_bucket(p, end,
249 (struct crush_bucket_tree *)b);
250 if (err < 0)
251 goto bad;
252 break;
253 case CRUSH_BUCKET_STRAW:
254 err = crush_decode_straw_bucket(p, end,
255 (struct crush_bucket_straw *)b);
256 if (err < 0)
257 goto bad;
258 break;
259 }
260 }
261
262 /* rules */
263 dout("rule vec is %p\n", c->rules);
264 for (i = 0; i < c->max_rules; i++) {
265 u32 yes;
266 struct crush_rule *r;
267
268 ceph_decode_32_safe(p, end, yes, bad);
269 if (!yes) {
270 dout("crush_decode NO rule %d off %x %p to %p\n",
271 i, (int)(*p-start), *p, end);
272 c->rules[i] = NULL;
273 continue;
274 }
275
276 dout("crush_decode rule %d off %x %p to %p\n",
277 i, (int)(*p-start), *p, end);
278
279 /* len */
280 ceph_decode_32_safe(p, end, yes, bad);
281#if BITS_PER_LONG == 32
282 err = -EINVAL;
283 if (yes > ULONG_MAX / sizeof(struct crush_rule_step))
284 goto bad;
285#endif
286 r = c->rules[i] = kmalloc(sizeof(*r) +
287 yes*sizeof(struct crush_rule_step),
288 GFP_NOFS);
289 if (r == NULL)
290 goto badmem;
291 dout(" rule %d is at %p\n", i, r);
292 r->len = yes;
293 ceph_decode_copy_safe(p, end, &r->mask, 4, bad); /* 4 u8's */
294 ceph_decode_need(p, end, r->len*3*sizeof(u32), bad);
295 for (j = 0; j < r->len; j++) {
296 r->steps[j].op = ceph_decode_32(p);
297 r->steps[j].arg1 = ceph_decode_32(p);
298 r->steps[j].arg2 = ceph_decode_32(p);
299 }
300 }
301
302 /* ignore trailing name maps. */
303
304 dout("crush_decode success\n");
305 return c;
306
307badmem:
308 err = -ENOMEM;
309bad:
310 dout("crush_decode fail %d\n", err);
311 crush_destroy(c);
312 return ERR_PTR(err);
313}
314
315
316/*
317 * osd map
318 */
319void ceph_osdmap_destroy(struct ceph_osdmap *map)
320{
321 dout("osdmap_destroy %p\n", map);
322 if (map->crush)
323 crush_destroy(map->crush);
324 while (!RB_EMPTY_ROOT(&map->pg_temp)) {
325 struct ceph_pg_mapping *pg =
326 rb_entry(rb_first(&map->pg_temp),
327 struct ceph_pg_mapping, node);
328 rb_erase(&pg->node, &map->pg_temp);
329 kfree(pg);
330 }
331 while (!RB_EMPTY_ROOT(&map->pg_pools)) {
332 struct ceph_pg_pool_info *pi =
333 rb_entry(rb_first(&map->pg_pools),
334 struct ceph_pg_pool_info, node);
335 rb_erase(&pi->node, &map->pg_pools);
336 kfree(pi);
337 }
338 kfree(map->osd_state);
339 kfree(map->osd_weight);
340 kfree(map->osd_addr);
341 kfree(map);
342}
343
344/*
345 * adjust max osd value. reallocate arrays.
346 */
347static int osdmap_set_max_osd(struct ceph_osdmap *map, int max)
348{
349 u8 *state;
350 struct ceph_entity_addr *addr;
351 u32 *weight;
352
353 state = kcalloc(max, sizeof(*state), GFP_NOFS);
354 addr = kcalloc(max, sizeof(*addr), GFP_NOFS);
355 weight = kcalloc(max, sizeof(*weight), GFP_NOFS);
356 if (state == NULL || addr == NULL || weight == NULL) {
357 kfree(state);
358 kfree(addr);
359 kfree(weight);
360 return -ENOMEM;
361 }
362
363 /* copy old? */
364 if (map->osd_state) {
365 memcpy(state, map->osd_state, map->max_osd*sizeof(*state));
366 memcpy(addr, map->osd_addr, map->max_osd*sizeof(*addr));
367 memcpy(weight, map->osd_weight, map->max_osd*sizeof(*weight));
368 kfree(map->osd_state);
369 kfree(map->osd_addr);
370 kfree(map->osd_weight);
371 }
372
373 map->osd_state = state;
374 map->osd_weight = weight;
375 map->osd_addr = addr;
376 map->max_osd = max;
377 return 0;
378}
379
380/*
381 * rbtree of pg_mapping for handling pg_temp (explicit mapping of pgid
382 * to a set of osds)
383 */
384static int pgid_cmp(struct ceph_pg l, struct ceph_pg r)
385{
386 u64 a = *(u64 *)&l;
387 u64 b = *(u64 *)&r;
388
389 if (a < b)
390 return -1;
391 if (a > b)
392 return 1;
393 return 0;
394}
395
396static int __insert_pg_mapping(struct ceph_pg_mapping *new,
397 struct rb_root *root)
398{
399 struct rb_node **p = &root->rb_node;
400 struct rb_node *parent = NULL;
401 struct ceph_pg_mapping *pg = NULL;
402 int c;
403
404 while (*p) {
405 parent = *p;
406 pg = rb_entry(parent, struct ceph_pg_mapping, node);
407 c = pgid_cmp(new->pgid, pg->pgid);
408 if (c < 0)
409 p = &(*p)->rb_left;
410 else if (c > 0)
411 p = &(*p)->rb_right;
412 else
413 return -EEXIST;
414 }
415
416 rb_link_node(&new->node, parent, p);
417 rb_insert_color(&new->node, root);
418 return 0;
419}
420
421static struct ceph_pg_mapping *__lookup_pg_mapping(struct rb_root *root,
422 struct ceph_pg pgid)
423{
424 struct rb_node *n = root->rb_node;
425 struct ceph_pg_mapping *pg;
426 int c;
427
428 while (n) {
429 pg = rb_entry(n, struct ceph_pg_mapping, node);
430 c = pgid_cmp(pgid, pg->pgid);
431 if (c < 0)
432 n = n->rb_left;
433 else if (c > 0)
434 n = n->rb_right;
435 else
436 return pg;
437 }
438 return NULL;
439}
440
441/*
442 * rbtree of pg pool info
443 */
444static int __insert_pg_pool(struct rb_root *root, struct ceph_pg_pool_info *new)
445{
446 struct rb_node **p = &root->rb_node;
447 struct rb_node *parent = NULL;
448 struct ceph_pg_pool_info *pi = NULL;
449
450 while (*p) {
451 parent = *p;
452 pi = rb_entry(parent, struct ceph_pg_pool_info, node);
453 if (new->id < pi->id)
454 p = &(*p)->rb_left;
455 else if (new->id > pi->id)
456 p = &(*p)->rb_right;
457 else
458 return -EEXIST;
459 }
460
461 rb_link_node(&new->node, parent, p);
462 rb_insert_color(&new->node, root);
463 return 0;
464}
465
466static struct ceph_pg_pool_info *__lookup_pg_pool(struct rb_root *root, int id)
467{
468 struct ceph_pg_pool_info *pi;
469 struct rb_node *n = root->rb_node;
470
471 while (n) {
472 pi = rb_entry(n, struct ceph_pg_pool_info, node);
473 if (id < pi->id)
474 n = n->rb_left;
475 else if (id > pi->id)
476 n = n->rb_right;
477 else
478 return pi;
479 }
480 return NULL;
481}
482
483/*
484 * decode a full map.
485 */
486struct ceph_osdmap *osdmap_decode(void **p, void *end)
487{
488 struct ceph_osdmap *map;
489 u16 version;
490 u32 len, max, i;
491 u8 ev;
492 int err = -EINVAL;
493 void *start = *p;
494 struct ceph_pg_pool_info *pi;
495
496 dout("osdmap_decode %p to %p len %d\n", *p, end, (int)(end - *p));
497
498 map = kzalloc(sizeof(*map), GFP_NOFS);
499 if (map == NULL)
500 return ERR_PTR(-ENOMEM);
501 map->pg_temp = RB_ROOT;
502
503 ceph_decode_16_safe(p, end, version, bad);
504 if (version > CEPH_OSDMAP_VERSION) {
505 pr_warning("got unknown v %d > %d of osdmap\n", version,
506 CEPH_OSDMAP_VERSION);
507 goto bad;
508 }
509
510 ceph_decode_need(p, end, 2*sizeof(u64)+6*sizeof(u32), bad);
511 ceph_decode_copy(p, &map->fsid, sizeof(map->fsid));
512 map->epoch = ceph_decode_32(p);
513 ceph_decode_copy(p, &map->created, sizeof(map->created));
514 ceph_decode_copy(p, &map->modified, sizeof(map->modified));
515
516 ceph_decode_32_safe(p, end, max, bad);
517 while (max--) {
518 ceph_decode_need(p, end, 4 + 1 + sizeof(pi->v), bad);
519 pi = kmalloc(sizeof(*pi), GFP_NOFS);
520 if (!pi)
521 goto bad;
522 pi->id = ceph_decode_32(p);
523 ev = ceph_decode_8(p); /* encoding version */
524 if (ev > CEPH_PG_POOL_VERSION) {
525 pr_warning("got unknown v %d > %d of ceph_pg_pool\n",
526 ev, CEPH_PG_POOL_VERSION);
527 goto bad;
528 }
529 ceph_decode_copy(p, &pi->v, sizeof(pi->v));
530 __insert_pg_pool(&map->pg_pools, pi);
531 calc_pg_masks(pi);
532 *p += le32_to_cpu(pi->v.num_snaps) * sizeof(u64);
533 *p += le32_to_cpu(pi->v.num_removed_snap_intervals)
534 * sizeof(u64) * 2;
535 }
536 ceph_decode_32_safe(p, end, map->pool_max, bad);
537
538 ceph_decode_32_safe(p, end, map->flags, bad);
539
540 max = ceph_decode_32(p);
541
542 /* (re)alloc osd arrays */
543 err = osdmap_set_max_osd(map, max);
544 if (err < 0)
545 goto bad;
546 dout("osdmap_decode max_osd = %d\n", map->max_osd);
547
548 /* osds */
549 err = -EINVAL;
550 ceph_decode_need(p, end, 3*sizeof(u32) +
551 map->max_osd*(1 + sizeof(*map->osd_weight) +
552 sizeof(*map->osd_addr)), bad);
553 *p += 4; /* skip length field (should match max) */
554 ceph_decode_copy(p, map->osd_state, map->max_osd);
555
556 *p += 4; /* skip length field (should match max) */
557 for (i = 0; i < map->max_osd; i++)
558 map->osd_weight[i] = ceph_decode_32(p);
559
560 *p += 4; /* skip length field (should match max) */
561 ceph_decode_copy(p, map->osd_addr, map->max_osd*sizeof(*map->osd_addr));
562 for (i = 0; i < map->max_osd; i++)
563 ceph_decode_addr(&map->osd_addr[i]);
564
565 /* pg_temp */
566 ceph_decode_32_safe(p, end, len, bad);
567 for (i = 0; i < len; i++) {
568 int n, j;
569 struct ceph_pg pgid;
570 struct ceph_pg_mapping *pg;
571
572 ceph_decode_need(p, end, sizeof(u32) + sizeof(u64), bad);
573 ceph_decode_copy(p, &pgid, sizeof(pgid));
574 n = ceph_decode_32(p);
575 ceph_decode_need(p, end, n * sizeof(u32), bad);
576 err = -ENOMEM;
577 pg = kmalloc(sizeof(*pg) + n*sizeof(u32), GFP_NOFS);
578 if (!pg)
579 goto bad;
580 pg->pgid = pgid;
581 pg->len = n;
582 for (j = 0; j < n; j++)
583 pg->osds[j] = ceph_decode_32(p);
584
585 err = __insert_pg_mapping(pg, &map->pg_temp);
586 if (err)
587 goto bad;
588 dout(" added pg_temp %llx len %d\n", *(u64 *)&pgid, len);
589 }
590
591 /* crush */
592 ceph_decode_32_safe(p, end, len, bad);
593 dout("osdmap_decode crush len %d from off 0x%x\n", len,
594 (int)(*p - start));
595 ceph_decode_need(p, end, len, bad);
596 map->crush = crush_decode(*p, end);
597 *p += len;
598 if (IS_ERR(map->crush)) {
599 err = PTR_ERR(map->crush);
600 map->crush = NULL;
601 goto bad;
602 }
603
604 /* ignore the rest of the map */
605 *p = end;
606
607 dout("osdmap_decode done %p %p\n", *p, end);
608 return map;
609
610bad:
611 dout("osdmap_decode fail\n");
612 ceph_osdmap_destroy(map);
613 return ERR_PTR(err);
614}
615
616/*
617 * decode and apply an incremental map update.
618 */
619struct ceph_osdmap *osdmap_apply_incremental(void **p, void *end,
620 struct ceph_osdmap *map,
621 struct ceph_messenger *msgr)
622{
623 struct crush_map *newcrush = NULL;
624 struct ceph_fsid fsid;
625 u32 epoch = 0;
626 struct ceph_timespec modified;
627 u32 len, pool;
628 __s32 new_pool_max, new_flags, max;
629 void *start = *p;
630 int err = -EINVAL;
631 u16 version;
632 struct rb_node *rbp;
633
634 ceph_decode_16_safe(p, end, version, bad);
635 if (version > CEPH_OSDMAP_INC_VERSION) {
636 pr_warning("got unknown v %d > %d of inc osdmap\n", version,
637 CEPH_OSDMAP_INC_VERSION);
638 goto bad;
639 }
640
641 ceph_decode_need(p, end, sizeof(fsid)+sizeof(modified)+2*sizeof(u32),
642 bad);
643 ceph_decode_copy(p, &fsid, sizeof(fsid));
644 epoch = ceph_decode_32(p);
645 BUG_ON(epoch != map->epoch+1);
646 ceph_decode_copy(p, &modified, sizeof(modified));
647 new_pool_max = ceph_decode_32(p);
648 new_flags = ceph_decode_32(p);
649
650 /* full map? */
651 ceph_decode_32_safe(p, end, len, bad);
652 if (len > 0) {
653 dout("apply_incremental full map len %d, %p to %p\n",
654 len, *p, end);
655 return osdmap_decode(p, min(*p+len, end));
656 }
657
658 /* new crush? */
659 ceph_decode_32_safe(p, end, len, bad);
660 if (len > 0) {
661 dout("apply_incremental new crush map len %d, %p to %p\n",
662 len, *p, end);
663 newcrush = crush_decode(*p, min(*p+len, end));
664 if (IS_ERR(newcrush))
665 return ERR_PTR(PTR_ERR(newcrush));
666 }
667
668 /* new flags? */
669 if (new_flags >= 0)
670 map->flags = new_flags;
671 if (new_pool_max >= 0)
672 map->pool_max = new_pool_max;
673
674 ceph_decode_need(p, end, 5*sizeof(u32), bad);
675
676 /* new max? */
677 max = ceph_decode_32(p);
678 if (max >= 0) {
679 err = osdmap_set_max_osd(map, max);
680 if (err < 0)
681 goto bad;
682 }
683
684 map->epoch++;
685 map->modified = map->modified;
686 if (newcrush) {
687 if (map->crush)
688 crush_destroy(map->crush);
689 map->crush = newcrush;
690 newcrush = NULL;
691 }
692
693 /* new_pool */
694 ceph_decode_32_safe(p, end, len, bad);
695 while (len--) {
696 __u8 ev;
697 struct ceph_pg_pool_info *pi;
698
699 ceph_decode_32_safe(p, end, pool, bad);
700 ceph_decode_need(p, end, 1 + sizeof(pi->v), bad);
701 ev = ceph_decode_8(p); /* encoding version */
702 if (ev > CEPH_PG_POOL_VERSION) {
703 pr_warning("got unknown v %d > %d of ceph_pg_pool\n",
704 ev, CEPH_PG_POOL_VERSION);
705 goto bad;
706 }
707 pi = __lookup_pg_pool(&map->pg_pools, pool);
708 if (!pi) {
709 pi = kmalloc(sizeof(*pi), GFP_NOFS);
710 if (!pi) {
711 err = -ENOMEM;
712 goto bad;
713 }
714 pi->id = pool;
715 __insert_pg_pool(&map->pg_pools, pi);
716 }
717 ceph_decode_copy(p, &pi->v, sizeof(pi->v));
718 calc_pg_masks(pi);
719 }
720
721 /* old_pool */
722 ceph_decode_32_safe(p, end, len, bad);
723 while (len--) {
724 struct ceph_pg_pool_info *pi;
725
726 ceph_decode_32_safe(p, end, pool, bad);
727 pi = __lookup_pg_pool(&map->pg_pools, pool);
728 if (pi) {
729 rb_erase(&pi->node, &map->pg_pools);
730 kfree(pi);
731 }
732 }
733
734 /* new_up */
735 err = -EINVAL;
736 ceph_decode_32_safe(p, end, len, bad);
737 while (len--) {
738 u32 osd;
739 struct ceph_entity_addr addr;
740 ceph_decode_32_safe(p, end, osd, bad);
741 ceph_decode_copy_safe(p, end, &addr, sizeof(addr), bad);
742 ceph_decode_addr(&addr);
743 pr_info("osd%d up\n", osd);
744 BUG_ON(osd >= map->max_osd);
745 map->osd_state[osd] |= CEPH_OSD_UP;
746 map->osd_addr[osd] = addr;
747 }
748
749 /* new_down */
750 ceph_decode_32_safe(p, end, len, bad);
751 while (len--) {
752 u32 osd;
753 ceph_decode_32_safe(p, end, osd, bad);
754 (*p)++; /* clean flag */
755 pr_info("osd%d down\n", osd);
756 if (osd < map->max_osd)
757 map->osd_state[osd] &= ~CEPH_OSD_UP;
758 }
759
760 /* new_weight */
761 ceph_decode_32_safe(p, end, len, bad);
762 while (len--) {
763 u32 osd, off;
764 ceph_decode_need(p, end, sizeof(u32)*2, bad);
765 osd = ceph_decode_32(p);
766 off = ceph_decode_32(p);
767 pr_info("osd%d weight 0x%x %s\n", osd, off,
768 off == CEPH_OSD_IN ? "(in)" :
769 (off == CEPH_OSD_OUT ? "(out)" : ""));
770 if (osd < map->max_osd)
771 map->osd_weight[osd] = off;
772 }
773
774 /* new_pg_temp */
775 rbp = rb_first(&map->pg_temp);
776 ceph_decode_32_safe(p, end, len, bad);
777 while (len--) {
778 struct ceph_pg_mapping *pg;
779 int j;
780 struct ceph_pg pgid;
781 u32 pglen;
782 ceph_decode_need(p, end, sizeof(u64) + sizeof(u32), bad);
783 ceph_decode_copy(p, &pgid, sizeof(pgid));
784 pglen = ceph_decode_32(p);
785
786 /* remove any? */
787 while (rbp && pgid_cmp(rb_entry(rbp, struct ceph_pg_mapping,
788 node)->pgid, pgid) <= 0) {
789 struct rb_node *cur = rbp;
790 rbp = rb_next(rbp);
791 dout(" removed pg_temp %llx\n",
792 *(u64 *)&rb_entry(cur, struct ceph_pg_mapping,
793 node)->pgid);
794 rb_erase(cur, &map->pg_temp);
795 }
796
797 if (pglen) {
798 /* insert */
799 ceph_decode_need(p, end, pglen*sizeof(u32), bad);
800 pg = kmalloc(sizeof(*pg) + sizeof(u32)*pglen, GFP_NOFS);
801 if (!pg) {
802 err = -ENOMEM;
803 goto bad;
804 }
805 pg->pgid = pgid;
806 pg->len = pglen;
807 for (j = 0; j < pglen; j++)
808 pg->osds[j] = ceph_decode_32(p);
809 err = __insert_pg_mapping(pg, &map->pg_temp);
810 if (err)
811 goto bad;
812 dout(" added pg_temp %llx len %d\n", *(u64 *)&pgid,
813 pglen);
814 }
815 }
816 while (rbp) {
817 struct rb_node *cur = rbp;
818 rbp = rb_next(rbp);
819 dout(" removed pg_temp %llx\n",
820 *(u64 *)&rb_entry(cur, struct ceph_pg_mapping,
821 node)->pgid);
822 rb_erase(cur, &map->pg_temp);
823 }
824
825 /* ignore the rest */
826 *p = end;
827 return map;
828
829bad:
830 pr_err("corrupt inc osdmap epoch %d off %d (%p of %p-%p)\n",
831 epoch, (int)(*p - start), *p, start, end);
832 print_hex_dump(KERN_DEBUG, "osdmap: ",
833 DUMP_PREFIX_OFFSET, 16, 1,
834 start, end - start, true);
835 if (newcrush)
836 crush_destroy(newcrush);
837 return ERR_PTR(err);
838}
839
840
841
842
843/*
844 * calculate file layout from given offset, length.
845 * fill in correct oid, logical length, and object extent
846 * offset, length.
847 *
848 * for now, we write only a single su, until we can
849 * pass a stride back to the caller.
850 */
851void ceph_calc_file_object_mapping(struct ceph_file_layout *layout,
852 u64 off, u64 *plen,
853 u64 *ono,
854 u64 *oxoff, u64 *oxlen)
855{
856 u32 osize = le32_to_cpu(layout->fl_object_size);
857 u32 su = le32_to_cpu(layout->fl_stripe_unit);
858 u32 sc = le32_to_cpu(layout->fl_stripe_count);
859 u32 bl, stripeno, stripepos, objsetno;
860 u32 su_per_object;
861 u64 t, su_offset;
862
863 dout("mapping %llu~%llu osize %u fl_su %u\n", off, *plen,
864 osize, su);
865 su_per_object = osize / su;
866 dout("osize %u / su %u = su_per_object %u\n", osize, su,
867 su_per_object);
868
869 BUG_ON((su & ~PAGE_MASK) != 0);
870 /* bl = *off / su; */
871 t = off;
872 do_div(t, su);
873 bl = t;
874 dout("off %llu / su %u = bl %u\n", off, su, bl);
875
876 stripeno = bl / sc;
877 stripepos = bl % sc;
878 objsetno = stripeno / su_per_object;
879
880 *ono = objsetno * sc + stripepos;
881 dout("objset %u * sc %u = ono %u\n", objsetno, sc, (unsigned)*ono);
882
883 /* *oxoff = *off % layout->fl_stripe_unit; # offset in su */
884 t = off;
885 su_offset = do_div(t, su);
886 *oxoff = su_offset + (stripeno % su_per_object) * su;
887
888 /*
889 * Calculate the length of the extent being written to the selected
890 * object. This is the minimum of the full length requested (plen) or
891 * the remainder of the current stripe being written to.
892 */
893 *oxlen = min_t(u64, *plen, su - su_offset);
894 *plen = *oxlen;
895
896 dout(" obj extent %llu~%llu\n", *oxoff, *oxlen);
897}
898
899/*
900 * calculate an object layout (i.e. pgid) from an oid,
901 * file_layout, and osdmap
902 */
903int ceph_calc_object_layout(struct ceph_object_layout *ol,
904 const char *oid,
905 struct ceph_file_layout *fl,
906 struct ceph_osdmap *osdmap)
907{
908 unsigned num, num_mask;
909 struct ceph_pg pgid;
910 s32 preferred = (s32)le32_to_cpu(fl->fl_pg_preferred);
911 int poolid = le32_to_cpu(fl->fl_pg_pool);
912 struct ceph_pg_pool_info *pool;
913 unsigned ps;
914
915 BUG_ON(!osdmap);
916
917 pool = __lookup_pg_pool(&osdmap->pg_pools, poolid);
918 if (!pool)
919 return -EIO;
920 ps = ceph_str_hash(pool->v.object_hash, oid, strlen(oid));
921 if (preferred >= 0) {
922 ps += preferred;
923 num = le32_to_cpu(pool->v.lpg_num);
924 num_mask = pool->lpg_num_mask;
925 } else {
926 num = le32_to_cpu(pool->v.pg_num);
927 num_mask = pool->pg_num_mask;
928 }
929
930 pgid.ps = cpu_to_le16(ps);
931 pgid.preferred = cpu_to_le16(preferred);
932 pgid.pool = fl->fl_pg_pool;
933 if (preferred >= 0)
934 dout("calc_object_layout '%s' pgid %d.%xp%d\n", oid, poolid, ps,
935 (int)preferred);
936 else
937 dout("calc_object_layout '%s' pgid %d.%x\n", oid, poolid, ps);
938
939 ol->ol_pgid = pgid;
940 ol->ol_stripe_unit = fl->fl_object_stripe_unit;
941 return 0;
942}
943
944/*
945 * Calculate raw osd vector for the given pgid. Return pointer to osd
946 * array, or NULL on failure.
947 */
948static int *calc_pg_raw(struct ceph_osdmap *osdmap, struct ceph_pg pgid,
949 int *osds, int *num)
950{
951 struct ceph_pg_mapping *pg;
952 struct ceph_pg_pool_info *pool;
953 int ruleno;
954 unsigned poolid, ps, pps;
955 int preferred;
956
957 /* pg_temp? */
958 pg = __lookup_pg_mapping(&osdmap->pg_temp, pgid);
959 if (pg) {
960 *num = pg->len;
961 return pg->osds;
962 }
963
964 /* crush */
965 poolid = le32_to_cpu(pgid.pool);
966 ps = le16_to_cpu(pgid.ps);
967 preferred = (s16)le16_to_cpu(pgid.preferred);
968
969 /* don't forcefeed bad device ids to crush */
970 if (preferred >= osdmap->max_osd ||
971 preferred >= osdmap->crush->max_devices)
972 preferred = -1;
973
974 pool = __lookup_pg_pool(&osdmap->pg_pools, poolid);
975 if (!pool)
976 return NULL;
977 ruleno = crush_find_rule(osdmap->crush, pool->v.crush_ruleset,
978 pool->v.type, pool->v.size);
979 if (ruleno < 0) {
980 pr_err("no crush rule pool %d type %d size %d\n",
981 poolid, pool->v.type, pool->v.size);
982 return NULL;
983 }
984
985 if (preferred >= 0)
986 pps = ceph_stable_mod(ps,
987 le32_to_cpu(pool->v.lpgp_num),
988 pool->lpgp_num_mask);
989 else
990 pps = ceph_stable_mod(ps,
991 le32_to_cpu(pool->v.pgp_num),
992 pool->pgp_num_mask);
993 pps += poolid;
994 *num = crush_do_rule(osdmap->crush, ruleno, pps, osds,
995 min_t(int, pool->v.size, *num),
996 preferred, osdmap->osd_weight);
997 return osds;
998}
999
1000/*
1001 * Return primary osd for given pgid, or -1 if none.
1002 */
1003int ceph_calc_pg_primary(struct ceph_osdmap *osdmap, struct ceph_pg pgid)
1004{
1005 int rawosds[10], *osds;
1006 int i, num = ARRAY_SIZE(rawosds);
1007
1008 osds = calc_pg_raw(osdmap, pgid, rawosds, &num);
1009 if (!osds)
1010 return -1;
1011
1012 /* primary is first up osd */
1013 for (i = 0; i < num; i++)
1014 if (ceph_osd_is_up(osdmap, osds[i])) {
1015 return osds[i];
1016 break;
1017 }
1018 return -1;
1019}
diff --git a/fs/ceph/osdmap.h b/fs/ceph/osdmap.h
new file mode 100644
index 000000000000..1fb55afb2642
--- /dev/null
+++ b/fs/ceph/osdmap.h
@@ -0,0 +1,125 @@
1#ifndef _FS_CEPH_OSDMAP_H
2#define _FS_CEPH_OSDMAP_H
3
4#include <linux/rbtree.h>
5#include "types.h"
6#include "ceph_fs.h"
7#include "crush/crush.h"
8
9/*
10 * The osd map describes the current membership of the osd cluster and
11 * specifies the mapping of objects to placement groups and placement
12 * groups to (sets of) osds. That is, it completely specifies the
13 * (desired) distribution of all data objects in the system at some
14 * point in time.
15 *
16 * Each map version is identified by an epoch, which increases monotonically.
17 *
18 * The map can be updated either via an incremental map (diff) describing
19 * the change between two successive epochs, or as a fully encoded map.
20 */
21struct ceph_pg_pool_info {
22 struct rb_node node;
23 int id;
24 struct ceph_pg_pool v;
25 int pg_num_mask, pgp_num_mask, lpg_num_mask, lpgp_num_mask;
26};
27
28struct ceph_pg_mapping {
29 struct rb_node node;
30 struct ceph_pg pgid;
31 int len;
32 int osds[];
33};
34
35struct ceph_osdmap {
36 struct ceph_fsid fsid;
37 u32 epoch;
38 u32 mkfs_epoch;
39 struct ceph_timespec created, modified;
40
41 u32 flags; /* CEPH_OSDMAP_* */
42
43 u32 max_osd; /* size of osd_state, _offload, _addr arrays */
44 u8 *osd_state; /* CEPH_OSD_* */
45 u32 *osd_weight; /* 0 = failed, 0x10000 = 100% normal */
46 struct ceph_entity_addr *osd_addr;
47
48 struct rb_root pg_temp;
49 struct rb_root pg_pools;
50 u32 pool_max;
51
52 /* the CRUSH map specifies the mapping of placement groups to
53 * the list of osds that store+replicate them. */
54 struct crush_map *crush;
55};
56
57/*
58 * file layout helpers
59 */
60#define ceph_file_layout_su(l) ((__s32)le32_to_cpu((l).fl_stripe_unit))
61#define ceph_file_layout_stripe_count(l) \
62 ((__s32)le32_to_cpu((l).fl_stripe_count))
63#define ceph_file_layout_object_size(l) ((__s32)le32_to_cpu((l).fl_object_size))
64#define ceph_file_layout_cas_hash(l) ((__s32)le32_to_cpu((l).fl_cas_hash))
65#define ceph_file_layout_object_su(l) \
66 ((__s32)le32_to_cpu((l).fl_object_stripe_unit))
67#define ceph_file_layout_pg_preferred(l) \
68 ((__s32)le32_to_cpu((l).fl_pg_preferred))
69#define ceph_file_layout_pg_pool(l) \
70 ((__s32)le32_to_cpu((l).fl_pg_pool))
71
72static inline unsigned ceph_file_layout_stripe_width(struct ceph_file_layout *l)
73{
74 return le32_to_cpu(l->fl_stripe_unit) *
75 le32_to_cpu(l->fl_stripe_count);
76}
77
78/* "period" == bytes before i start on a new set of objects */
79static inline unsigned ceph_file_layout_period(struct ceph_file_layout *l)
80{
81 return le32_to_cpu(l->fl_object_size) *
82 le32_to_cpu(l->fl_stripe_count);
83}
84
85
86static inline int ceph_osd_is_up(struct ceph_osdmap *map, int osd)
87{
88 return (osd < map->max_osd) && (map->osd_state[osd] & CEPH_OSD_UP);
89}
90
91static inline bool ceph_osdmap_flag(struct ceph_osdmap *map, int flag)
92{
93 return map && (map->flags & flag);
94}
95
96extern char *ceph_osdmap_state_str(char *str, int len, int state);
97
98static inline struct ceph_entity_addr *ceph_osd_addr(struct ceph_osdmap *map,
99 int osd)
100{
101 if (osd >= map->max_osd)
102 return NULL;
103 return &map->osd_addr[osd];
104}
105
106extern struct ceph_osdmap *osdmap_decode(void **p, void *end);
107extern struct ceph_osdmap *osdmap_apply_incremental(void **p, void *end,
108 struct ceph_osdmap *map,
109 struct ceph_messenger *msgr);
110extern void ceph_osdmap_destroy(struct ceph_osdmap *map);
111
112/* calculate mapping of a file extent to an object */
113extern void ceph_calc_file_object_mapping(struct ceph_file_layout *layout,
114 u64 off, u64 *plen,
115 u64 *bno, u64 *oxoff, u64 *oxlen);
116
117/* calculate mapping of object to a placement group */
118extern int ceph_calc_object_layout(struct ceph_object_layout *ol,
119 const char *oid,
120 struct ceph_file_layout *fl,
121 struct ceph_osdmap *osdmap);
122extern int ceph_calc_pg_primary(struct ceph_osdmap *osdmap,
123 struct ceph_pg pgid);
124
125#endif
diff --git a/fs/ceph/pagelist.c b/fs/ceph/pagelist.c
new file mode 100644
index 000000000000..370e93695474
--- /dev/null
+++ b/fs/ceph/pagelist.c
@@ -0,0 +1,54 @@
1
2#include <linux/pagemap.h>
3#include <linux/highmem.h>
4
5#include "pagelist.h"
6
7int ceph_pagelist_release(struct ceph_pagelist *pl)
8{
9 if (pl->mapped_tail)
10 kunmap(pl->mapped_tail);
11 while (!list_empty(&pl->head)) {
12 struct page *page = list_first_entry(&pl->head, struct page,
13 lru);
14 list_del(&page->lru);
15 __free_page(page);
16 }
17 return 0;
18}
19
20static int ceph_pagelist_addpage(struct ceph_pagelist *pl)
21{
22 struct page *page = alloc_page(GFP_NOFS);
23 if (!page)
24 return -ENOMEM;
25 pl->room += PAGE_SIZE;
26 list_add_tail(&page->lru, &pl->head);
27 if (pl->mapped_tail)
28 kunmap(pl->mapped_tail);
29 pl->mapped_tail = kmap(page);
30 return 0;
31}
32
33int ceph_pagelist_append(struct ceph_pagelist *pl, void *buf, size_t len)
34{
35 while (pl->room < len) {
36 size_t bit = pl->room;
37 int ret;
38
39 memcpy(pl->mapped_tail + (pl->length & ~PAGE_CACHE_MASK),
40 buf, bit);
41 pl->length += bit;
42 pl->room -= bit;
43 buf += bit;
44 len -= bit;
45 ret = ceph_pagelist_addpage(pl);
46 if (ret)
47 return ret;
48 }
49
50 memcpy(pl->mapped_tail + (pl->length & ~PAGE_CACHE_MASK), buf, len);
51 pl->length += len;
52 pl->room -= len;
53 return 0;
54}
diff --git a/fs/ceph/pagelist.h b/fs/ceph/pagelist.h
new file mode 100644
index 000000000000..e8a4187e1087
--- /dev/null
+++ b/fs/ceph/pagelist.h
@@ -0,0 +1,54 @@
1#ifndef __FS_CEPH_PAGELIST_H
2#define __FS_CEPH_PAGELIST_H
3
4#include <linux/list.h>
5
6struct ceph_pagelist {
7 struct list_head head;
8 void *mapped_tail;
9 size_t length;
10 size_t room;
11};
12
13static inline void ceph_pagelist_init(struct ceph_pagelist *pl)
14{
15 INIT_LIST_HEAD(&pl->head);
16 pl->mapped_tail = NULL;
17 pl->length = 0;
18 pl->room = 0;
19}
20extern int ceph_pagelist_release(struct ceph_pagelist *pl);
21
22extern int ceph_pagelist_append(struct ceph_pagelist *pl, void *d, size_t l);
23
24static inline int ceph_pagelist_encode_64(struct ceph_pagelist *pl, u64 v)
25{
26 __le64 ev = cpu_to_le64(v);
27 return ceph_pagelist_append(pl, &ev, sizeof(ev));
28}
29static inline int ceph_pagelist_encode_32(struct ceph_pagelist *pl, u32 v)
30{
31 __le32 ev = cpu_to_le32(v);
32 return ceph_pagelist_append(pl, &ev, sizeof(ev));
33}
34static inline int ceph_pagelist_encode_16(struct ceph_pagelist *pl, u16 v)
35{
36 __le16 ev = cpu_to_le16(v);
37 return ceph_pagelist_append(pl, &ev, sizeof(ev));
38}
39static inline int ceph_pagelist_encode_8(struct ceph_pagelist *pl, u8 v)
40{
41 return ceph_pagelist_append(pl, &v, 1);
42}
43static inline int ceph_pagelist_encode_string(struct ceph_pagelist *pl,
44 char *s, size_t len)
45{
46 int ret = ceph_pagelist_encode_32(pl, len);
47 if (ret)
48 return ret;
49 if (len)
50 return ceph_pagelist_append(pl, s, len);
51 return 0;
52}
53
54#endif
diff --git a/fs/ceph/rados.h b/fs/ceph/rados.h
new file mode 100644
index 000000000000..26ac8b89a676
--- /dev/null
+++ b/fs/ceph/rados.h
@@ -0,0 +1,374 @@
1#ifndef __RADOS_H
2#define __RADOS_H
3
4/*
5 * Data types for the Ceph distributed object storage layer RADOS
6 * (Reliable Autonomic Distributed Object Store).
7 */
8
9#include "msgr.h"
10
11/*
12 * osdmap encoding versions
13 */
14#define CEPH_OSDMAP_INC_VERSION 4
15#define CEPH_OSDMAP_VERSION 4
16
17/*
18 * fs id
19 */
20struct ceph_fsid {
21 unsigned char fsid[16];
22};
23
24static inline int ceph_fsid_compare(const struct ceph_fsid *a,
25 const struct ceph_fsid *b)
26{
27 return memcmp(a, b, sizeof(*a));
28}
29
30/*
31 * ino, object, etc.
32 */
33typedef __le64 ceph_snapid_t;
34#define CEPH_SNAPDIR ((__u64)(-1)) /* reserved for hidden .snap dir */
35#define CEPH_NOSNAP ((__u64)(-2)) /* "head", "live" revision */
36#define CEPH_MAXSNAP ((__u64)(-3)) /* largest valid snapid */
37
38struct ceph_timespec {
39 __le32 tv_sec;
40 __le32 tv_nsec;
41} __attribute__ ((packed));
42
43
44/*
45 * object layout - how objects are mapped into PGs
46 */
47#define CEPH_OBJECT_LAYOUT_HASH 1
48#define CEPH_OBJECT_LAYOUT_LINEAR 2
49#define CEPH_OBJECT_LAYOUT_HASHINO 3
50
51/*
52 * pg layout -- how PGs are mapped onto (sets of) OSDs
53 */
54#define CEPH_PG_LAYOUT_CRUSH 0
55#define CEPH_PG_LAYOUT_HASH 1
56#define CEPH_PG_LAYOUT_LINEAR 2
57#define CEPH_PG_LAYOUT_HYBRID 3
58
59
60/*
61 * placement group.
62 * we encode this into one __le64.
63 */
64struct ceph_pg {
65 __le16 preferred; /* preferred primary osd */
66 __le16 ps; /* placement seed */
67 __le32 pool; /* object pool */
68} __attribute__ ((packed));
69
70/*
71 * pg_pool is a set of pgs storing a pool of objects
72 *
73 * pg_num -- base number of pseudorandomly placed pgs
74 *
75 * pgp_num -- effective number when calculating pg placement. this
76 * is used for pg_num increases. new pgs result in data being "split"
77 * into new pgs. for this to proceed smoothly, new pgs are intiially
78 * colocated with their parents; that is, pgp_num doesn't increase
79 * until the new pgs have successfully split. only _then_ are the new
80 * pgs placed independently.
81 *
82 * lpg_num -- localized pg count (per device). replicas are randomly
83 * selected.
84 *
85 * lpgp_num -- as above.
86 */
87#define CEPH_PG_TYPE_REP 1
88#define CEPH_PG_TYPE_RAID4 2
89#define CEPH_PG_POOL_VERSION 2
90struct ceph_pg_pool {
91 __u8 type; /* CEPH_PG_TYPE_* */
92 __u8 size; /* number of osds in each pg */
93 __u8 crush_ruleset; /* crush placement rule */
94 __u8 object_hash; /* hash mapping object name to ps */
95 __le32 pg_num, pgp_num; /* number of pg's */
96 __le32 lpg_num, lpgp_num; /* number of localized pg's */
97 __le32 last_change; /* most recent epoch changed */
98 __le64 snap_seq; /* seq for per-pool snapshot */
99 __le32 snap_epoch; /* epoch of last snap */
100 __le32 num_snaps;
101 __le32 num_removed_snap_intervals;
102 __le64 uid;
103} __attribute__ ((packed));
104
105/*
106 * stable_mod func is used to control number of placement groups.
107 * similar to straight-up modulo, but produces a stable mapping as b
108 * increases over time. b is the number of bins, and bmask is the
109 * containing power of 2 minus 1.
110 *
111 * b <= bmask and bmask=(2**n)-1
112 * e.g., b=12 -> bmask=15, b=123 -> bmask=127
113 */
114static inline int ceph_stable_mod(int x, int b, int bmask)
115{
116 if ((x & bmask) < b)
117 return x & bmask;
118 else
119 return x & (bmask >> 1);
120}
121
122/*
123 * object layout - how a given object should be stored.
124 */
125struct ceph_object_layout {
126 struct ceph_pg ol_pgid; /* raw pg, with _full_ ps precision. */
127 __le32 ol_stripe_unit; /* for per-object parity, if any */
128} __attribute__ ((packed));
129
130/*
131 * compound epoch+version, used by storage layer to serialize mutations
132 */
133struct ceph_eversion {
134 __le32 epoch;
135 __le64 version;
136} __attribute__ ((packed));
137
138/*
139 * osd map bits
140 */
141
142/* status bits */
143#define CEPH_OSD_EXISTS 1
144#define CEPH_OSD_UP 2
145
146/* osd weights. fixed point value: 0x10000 == 1.0 ("in"), 0 == "out" */
147#define CEPH_OSD_IN 0x10000
148#define CEPH_OSD_OUT 0
149
150
151/*
152 * osd map flag bits
153 */
154#define CEPH_OSDMAP_NEARFULL (1<<0) /* sync writes (near ENOSPC) */
155#define CEPH_OSDMAP_FULL (1<<1) /* no data writes (ENOSPC) */
156#define CEPH_OSDMAP_PAUSERD (1<<2) /* pause all reads */
157#define CEPH_OSDMAP_PAUSEWR (1<<3) /* pause all writes */
158#define CEPH_OSDMAP_PAUSEREC (1<<4) /* pause recovery */
159
160/*
161 * osd ops
162 */
163#define CEPH_OSD_OP_MODE 0xf000
164#define CEPH_OSD_OP_MODE_RD 0x1000
165#define CEPH_OSD_OP_MODE_WR 0x2000
166#define CEPH_OSD_OP_MODE_RMW 0x3000
167#define CEPH_OSD_OP_MODE_SUB 0x4000
168
169#define CEPH_OSD_OP_TYPE 0x0f00
170#define CEPH_OSD_OP_TYPE_LOCK 0x0100
171#define CEPH_OSD_OP_TYPE_DATA 0x0200
172#define CEPH_OSD_OP_TYPE_ATTR 0x0300
173#define CEPH_OSD_OP_TYPE_EXEC 0x0400
174#define CEPH_OSD_OP_TYPE_PG 0x0500
175
176enum {
177 /** data **/
178 /* read */
179 CEPH_OSD_OP_READ = CEPH_OSD_OP_MODE_RD | CEPH_OSD_OP_TYPE_DATA | 1,
180 CEPH_OSD_OP_STAT = CEPH_OSD_OP_MODE_RD | CEPH_OSD_OP_TYPE_DATA | 2,
181
182 /* fancy read */
183 CEPH_OSD_OP_MASKTRUNC = CEPH_OSD_OP_MODE_RD | CEPH_OSD_OP_TYPE_DATA | 4,
184
185 /* write */
186 CEPH_OSD_OP_WRITE = CEPH_OSD_OP_MODE_WR | CEPH_OSD_OP_TYPE_DATA | 1,
187 CEPH_OSD_OP_WRITEFULL = CEPH_OSD_OP_MODE_WR | CEPH_OSD_OP_TYPE_DATA | 2,
188 CEPH_OSD_OP_TRUNCATE = CEPH_OSD_OP_MODE_WR | CEPH_OSD_OP_TYPE_DATA | 3,
189 CEPH_OSD_OP_ZERO = CEPH_OSD_OP_MODE_WR | CEPH_OSD_OP_TYPE_DATA | 4,
190 CEPH_OSD_OP_DELETE = CEPH_OSD_OP_MODE_WR | CEPH_OSD_OP_TYPE_DATA | 5,
191
192 /* fancy write */
193 CEPH_OSD_OP_APPEND = CEPH_OSD_OP_MODE_WR | CEPH_OSD_OP_TYPE_DATA | 6,
194 CEPH_OSD_OP_STARTSYNC = CEPH_OSD_OP_MODE_WR | CEPH_OSD_OP_TYPE_DATA | 7,
195 CEPH_OSD_OP_SETTRUNC = CEPH_OSD_OP_MODE_WR | CEPH_OSD_OP_TYPE_DATA | 8,
196 CEPH_OSD_OP_TRIMTRUNC = CEPH_OSD_OP_MODE_WR | CEPH_OSD_OP_TYPE_DATA | 9,
197
198 CEPH_OSD_OP_TMAPUP = CEPH_OSD_OP_MODE_RMW | CEPH_OSD_OP_TYPE_DATA | 10,
199 CEPH_OSD_OP_TMAPPUT = CEPH_OSD_OP_MODE_WR | CEPH_OSD_OP_TYPE_DATA | 11,
200 CEPH_OSD_OP_TMAPGET = CEPH_OSD_OP_MODE_RD | CEPH_OSD_OP_TYPE_DATA | 12,
201
202 CEPH_OSD_OP_CREATE = CEPH_OSD_OP_MODE_WR | CEPH_OSD_OP_TYPE_DATA | 13,
203
204 /** attrs **/
205 /* read */
206 CEPH_OSD_OP_GETXATTR = CEPH_OSD_OP_MODE_RD | CEPH_OSD_OP_TYPE_ATTR | 1,
207 CEPH_OSD_OP_GETXATTRS = CEPH_OSD_OP_MODE_RD | CEPH_OSD_OP_TYPE_ATTR | 2,
208
209 /* write */
210 CEPH_OSD_OP_SETXATTR = CEPH_OSD_OP_MODE_WR | CEPH_OSD_OP_TYPE_ATTR | 1,
211 CEPH_OSD_OP_SETXATTRS = CEPH_OSD_OP_MODE_WR | CEPH_OSD_OP_TYPE_ATTR | 2,
212 CEPH_OSD_OP_RESETXATTRS = CEPH_OSD_OP_MODE_WR|CEPH_OSD_OP_TYPE_ATTR | 3,
213 CEPH_OSD_OP_RMXATTR = CEPH_OSD_OP_MODE_WR | CEPH_OSD_OP_TYPE_ATTR | 4,
214
215 /** subop **/
216 CEPH_OSD_OP_PULL = CEPH_OSD_OP_MODE_SUB | 1,
217 CEPH_OSD_OP_PUSH = CEPH_OSD_OP_MODE_SUB | 2,
218 CEPH_OSD_OP_BALANCEREADS = CEPH_OSD_OP_MODE_SUB | 3,
219 CEPH_OSD_OP_UNBALANCEREADS = CEPH_OSD_OP_MODE_SUB | 4,
220 CEPH_OSD_OP_SCRUB = CEPH_OSD_OP_MODE_SUB | 5,
221
222 /** lock **/
223 CEPH_OSD_OP_WRLOCK = CEPH_OSD_OP_MODE_WR | CEPH_OSD_OP_TYPE_LOCK | 1,
224 CEPH_OSD_OP_WRUNLOCK = CEPH_OSD_OP_MODE_WR | CEPH_OSD_OP_TYPE_LOCK | 2,
225 CEPH_OSD_OP_RDLOCK = CEPH_OSD_OP_MODE_WR | CEPH_OSD_OP_TYPE_LOCK | 3,
226 CEPH_OSD_OP_RDUNLOCK = CEPH_OSD_OP_MODE_WR | CEPH_OSD_OP_TYPE_LOCK | 4,
227 CEPH_OSD_OP_UPLOCK = CEPH_OSD_OP_MODE_WR | CEPH_OSD_OP_TYPE_LOCK | 5,
228 CEPH_OSD_OP_DNLOCK = CEPH_OSD_OP_MODE_WR | CEPH_OSD_OP_TYPE_LOCK | 6,
229
230 /** exec **/
231 CEPH_OSD_OP_CALL = CEPH_OSD_OP_MODE_RD | CEPH_OSD_OP_TYPE_EXEC | 1,
232
233 /** pg **/
234 CEPH_OSD_OP_PGLS = CEPH_OSD_OP_MODE_RD | CEPH_OSD_OP_TYPE_PG | 1,
235};
236
237static inline int ceph_osd_op_type_lock(int op)
238{
239 return (op & CEPH_OSD_OP_TYPE) == CEPH_OSD_OP_TYPE_LOCK;
240}
241static inline int ceph_osd_op_type_data(int op)
242{
243 return (op & CEPH_OSD_OP_TYPE) == CEPH_OSD_OP_TYPE_DATA;
244}
245static inline int ceph_osd_op_type_attr(int op)
246{
247 return (op & CEPH_OSD_OP_TYPE) == CEPH_OSD_OP_TYPE_ATTR;
248}
249static inline int ceph_osd_op_type_exec(int op)
250{
251 return (op & CEPH_OSD_OP_TYPE) == CEPH_OSD_OP_TYPE_EXEC;
252}
253static inline int ceph_osd_op_type_pg(int op)
254{
255 return (op & CEPH_OSD_OP_TYPE) == CEPH_OSD_OP_TYPE_PG;
256}
257
258static inline int ceph_osd_op_mode_subop(int op)
259{
260 return (op & CEPH_OSD_OP_MODE) == CEPH_OSD_OP_MODE_SUB;
261}
262static inline int ceph_osd_op_mode_read(int op)
263{
264 return (op & CEPH_OSD_OP_MODE) == CEPH_OSD_OP_MODE_RD;
265}
266static inline int ceph_osd_op_mode_modify(int op)
267{
268 return (op & CEPH_OSD_OP_MODE) == CEPH_OSD_OP_MODE_WR;
269}
270
271#define CEPH_OSD_TMAP_HDR 'h'
272#define CEPH_OSD_TMAP_SET 's'
273#define CEPH_OSD_TMAP_RM 'r'
274
275extern const char *ceph_osd_op_name(int op);
276
277
278/*
279 * osd op flags
280 *
281 * An op may be READ, WRITE, or READ|WRITE.
282 */
283enum {
284 CEPH_OSD_FLAG_ACK = 1, /* want (or is) "ack" ack */
285 CEPH_OSD_FLAG_ONNVRAM = 2, /* want (or is) "onnvram" ack */
286 CEPH_OSD_FLAG_ONDISK = 4, /* want (or is) "ondisk" ack */
287 CEPH_OSD_FLAG_RETRY = 8, /* resend attempt */
288 CEPH_OSD_FLAG_READ = 16, /* op may read */
289 CEPH_OSD_FLAG_WRITE = 32, /* op may write */
290 CEPH_OSD_FLAG_ORDERSNAP = 64, /* EOLDSNAP if snapc is out of order */
291 CEPH_OSD_FLAG_PEERSTAT = 128, /* msg includes osd_peer_stat */
292 CEPH_OSD_FLAG_BALANCE_READS = 256,
293 CEPH_OSD_FLAG_PARALLELEXEC = 512, /* execute op in parallel */
294 CEPH_OSD_FLAG_PGOP = 1024, /* pg op, no object */
295 CEPH_OSD_FLAG_EXEC = 2048, /* op may exec */
296};
297
298enum {
299 CEPH_OSD_OP_FLAG_EXCL = 1, /* EXCL object create */
300};
301
302#define EOLDSNAPC ERESTART /* ORDERSNAP flag set; writer has old snapc*/
303#define EBLACKLISTED ESHUTDOWN /* blacklisted */
304
305/*
306 * an individual object operation. each may be accompanied by some data
307 * payload
308 */
309struct ceph_osd_op {
310 __le16 op; /* CEPH_OSD_OP_* */
311 __le32 flags; /* CEPH_OSD_FLAG_* */
312 union {
313 struct {
314 __le64 offset, length;
315 __le64 truncate_size;
316 __le32 truncate_seq;
317 } __attribute__ ((packed)) extent;
318 struct {
319 __le32 name_len;
320 __le32 value_len;
321 } __attribute__ ((packed)) xattr;
322 struct {
323 __u8 class_len;
324 __u8 method_len;
325 __u8 argc;
326 __le32 indata_len;
327 } __attribute__ ((packed)) cls;
328 struct {
329 __le64 cookie, count;
330 } __attribute__ ((packed)) pgls;
331 };
332 __le32 payload_len;
333} __attribute__ ((packed));
334
335/*
336 * osd request message header. each request may include multiple
337 * ceph_osd_op object operations.
338 */
339struct ceph_osd_request_head {
340 __le32 client_inc; /* client incarnation */
341 struct ceph_object_layout layout; /* pgid */
342 __le32 osdmap_epoch; /* client's osdmap epoch */
343
344 __le32 flags;
345
346 struct ceph_timespec mtime; /* for mutations only */
347 struct ceph_eversion reassert_version; /* if we are replaying op */
348
349 __le32 object_len; /* length of object name */
350
351 __le64 snapid; /* snapid to read */
352 __le64 snap_seq; /* writer's snap context */
353 __le32 num_snaps;
354
355 __le16 num_ops;
356 struct ceph_osd_op ops[]; /* followed by ops[], obj, ticket, snaps */
357} __attribute__ ((packed));
358
359struct ceph_osd_reply_head {
360 __le32 client_inc; /* client incarnation */
361 __le32 flags;
362 struct ceph_object_layout layout;
363 __le32 osdmap_epoch;
364 struct ceph_eversion reassert_version; /* for replaying uncommitted */
365
366 __le32 result; /* result code */
367
368 __le32 object_len; /* length of object name */
369 __le32 num_ops;
370 struct ceph_osd_op ops[0]; /* ops[], object */
371} __attribute__ ((packed));
372
373
374#endif
diff --git a/fs/ceph/snap.c b/fs/ceph/snap.c
new file mode 100644
index 000000000000..bf2a5f3846a4
--- /dev/null
+++ b/fs/ceph/snap.c
@@ -0,0 +1,904 @@
1#include "ceph_debug.h"
2
3#include <linux/sort.h>
4
5#include "super.h"
6#include "decode.h"
7
8/*
9 * Snapshots in ceph are driven in large part by cooperation from the
10 * client. In contrast to local file systems or file servers that
11 * implement snapshots at a single point in the system, ceph's
12 * distributed access to storage requires clients to help decide
13 * whether a write logically occurs before or after a recently created
14 * snapshot.
15 *
16 * This provides a perfect instantanous client-wide snapshot. Between
17 * clients, however, snapshots may appear to be applied at slightly
18 * different points in time, depending on delays in delivering the
19 * snapshot notification.
20 *
21 * Snapshots are _not_ file system-wide. Instead, each snapshot
22 * applies to the subdirectory nested beneath some directory. This
23 * effectively divides the hierarchy into multiple "realms," where all
24 * of the files contained by each realm share the same set of
25 * snapshots. An individual realm's snap set contains snapshots
26 * explicitly created on that realm, as well as any snaps in its
27 * parent's snap set _after_ the point at which the parent became it's
28 * parent (due to, say, a rename). Similarly, snaps from prior parents
29 * during the time intervals during which they were the parent are included.
30 *
31 * The client is spared most of this detail, fortunately... it must only
32 * maintains a hierarchy of realms reflecting the current parent/child
33 * realm relationship, and for each realm has an explicit list of snaps
34 * inherited from prior parents.
35 *
36 * A snap_realm struct is maintained for realms containing every inode
37 * with an open cap in the system. (The needed snap realm information is
38 * provided by the MDS whenever a cap is issued, i.e., on open.) A 'seq'
39 * version number is used to ensure that as realm parameters change (new
40 * snapshot, new parent, etc.) the client's realm hierarchy is updated.
41 *
42 * The realm hierarchy drives the generation of a 'snap context' for each
43 * realm, which simply lists the resulting set of snaps for the realm. This
44 * is attached to any writes sent to OSDs.
45 */
46/*
47 * Unfortunately error handling is a bit mixed here. If we get a snap
48 * update, but don't have enough memory to update our realm hierarchy,
49 * it's not clear what we can do about it (besides complaining to the
50 * console).
51 */
52
53
54/*
55 * increase ref count for the realm
56 *
57 * caller must hold snap_rwsem for write.
58 */
59void ceph_get_snap_realm(struct ceph_mds_client *mdsc,
60 struct ceph_snap_realm *realm)
61{
62 dout("get_realm %p %d -> %d\n", realm,
63 atomic_read(&realm->nref), atomic_read(&realm->nref)+1);
64 /*
65 * since we _only_ increment realm refs or empty the empty
66 * list with snap_rwsem held, adjusting the empty list here is
67 * safe. we do need to protect against concurrent empty list
68 * additions, however.
69 */
70 if (atomic_read(&realm->nref) == 0) {
71 spin_lock(&mdsc->snap_empty_lock);
72 list_del_init(&realm->empty_item);
73 spin_unlock(&mdsc->snap_empty_lock);
74 }
75
76 atomic_inc(&realm->nref);
77}
78
79static void __insert_snap_realm(struct rb_root *root,
80 struct ceph_snap_realm *new)
81{
82 struct rb_node **p = &root->rb_node;
83 struct rb_node *parent = NULL;
84 struct ceph_snap_realm *r = NULL;
85
86 while (*p) {
87 parent = *p;
88 r = rb_entry(parent, struct ceph_snap_realm, node);
89 if (new->ino < r->ino)
90 p = &(*p)->rb_left;
91 else if (new->ino > r->ino)
92 p = &(*p)->rb_right;
93 else
94 BUG();
95 }
96
97 rb_link_node(&new->node, parent, p);
98 rb_insert_color(&new->node, root);
99}
100
101/*
102 * create and get the realm rooted at @ino and bump its ref count.
103 *
104 * caller must hold snap_rwsem for write.
105 */
106static struct ceph_snap_realm *ceph_create_snap_realm(
107 struct ceph_mds_client *mdsc,
108 u64 ino)
109{
110 struct ceph_snap_realm *realm;
111
112 realm = kzalloc(sizeof(*realm), GFP_NOFS);
113 if (!realm)
114 return ERR_PTR(-ENOMEM);
115
116 atomic_set(&realm->nref, 0); /* tree does not take a ref */
117 realm->ino = ino;
118 INIT_LIST_HEAD(&realm->children);
119 INIT_LIST_HEAD(&realm->child_item);
120 INIT_LIST_HEAD(&realm->empty_item);
121 INIT_LIST_HEAD(&realm->inodes_with_caps);
122 spin_lock_init(&realm->inodes_with_caps_lock);
123 __insert_snap_realm(&mdsc->snap_realms, realm);
124 dout("create_snap_realm %llx %p\n", realm->ino, realm);
125 return realm;
126}
127
128/*
129 * lookup the realm rooted at @ino.
130 *
131 * caller must hold snap_rwsem for write.
132 */
133struct ceph_snap_realm *ceph_lookup_snap_realm(struct ceph_mds_client *mdsc,
134 u64 ino)
135{
136 struct rb_node *n = mdsc->snap_realms.rb_node;
137 struct ceph_snap_realm *r;
138
139 while (n) {
140 r = rb_entry(n, struct ceph_snap_realm, node);
141 if (ino < r->ino)
142 n = n->rb_left;
143 else if (ino > r->ino)
144 n = n->rb_right;
145 else {
146 dout("lookup_snap_realm %llx %p\n", r->ino, r);
147 return r;
148 }
149 }
150 return NULL;
151}
152
153static void __put_snap_realm(struct ceph_mds_client *mdsc,
154 struct ceph_snap_realm *realm);
155
156/*
157 * called with snap_rwsem (write)
158 */
159static void __destroy_snap_realm(struct ceph_mds_client *mdsc,
160 struct ceph_snap_realm *realm)
161{
162 dout("__destroy_snap_realm %p %llx\n", realm, realm->ino);
163
164 rb_erase(&realm->node, &mdsc->snap_realms);
165
166 if (realm->parent) {
167 list_del_init(&realm->child_item);
168 __put_snap_realm(mdsc, realm->parent);
169 }
170
171 kfree(realm->prior_parent_snaps);
172 kfree(realm->snaps);
173 ceph_put_snap_context(realm->cached_context);
174 kfree(realm);
175}
176
177/*
178 * caller holds snap_rwsem (write)
179 */
180static void __put_snap_realm(struct ceph_mds_client *mdsc,
181 struct ceph_snap_realm *realm)
182{
183 dout("__put_snap_realm %llx %p %d -> %d\n", realm->ino, realm,
184 atomic_read(&realm->nref), atomic_read(&realm->nref)-1);
185 if (atomic_dec_and_test(&realm->nref))
186 __destroy_snap_realm(mdsc, realm);
187}
188
189/*
190 * caller needn't hold any locks
191 */
192void ceph_put_snap_realm(struct ceph_mds_client *mdsc,
193 struct ceph_snap_realm *realm)
194{
195 dout("put_snap_realm %llx %p %d -> %d\n", realm->ino, realm,
196 atomic_read(&realm->nref), atomic_read(&realm->nref)-1);
197 if (!atomic_dec_and_test(&realm->nref))
198 return;
199
200 if (down_write_trylock(&mdsc->snap_rwsem)) {
201 __destroy_snap_realm(mdsc, realm);
202 up_write(&mdsc->snap_rwsem);
203 } else {
204 spin_lock(&mdsc->snap_empty_lock);
205 list_add(&mdsc->snap_empty, &realm->empty_item);
206 spin_unlock(&mdsc->snap_empty_lock);
207 }
208}
209
210/*
211 * Clean up any realms whose ref counts have dropped to zero. Note
212 * that this does not include realms who were created but not yet
213 * used.
214 *
215 * Called under snap_rwsem (write)
216 */
217static void __cleanup_empty_realms(struct ceph_mds_client *mdsc)
218{
219 struct ceph_snap_realm *realm;
220
221 spin_lock(&mdsc->snap_empty_lock);
222 while (!list_empty(&mdsc->snap_empty)) {
223 realm = list_first_entry(&mdsc->snap_empty,
224 struct ceph_snap_realm, empty_item);
225 list_del(&realm->empty_item);
226 spin_unlock(&mdsc->snap_empty_lock);
227 __destroy_snap_realm(mdsc, realm);
228 spin_lock(&mdsc->snap_empty_lock);
229 }
230 spin_unlock(&mdsc->snap_empty_lock);
231}
232
233void ceph_cleanup_empty_realms(struct ceph_mds_client *mdsc)
234{
235 down_write(&mdsc->snap_rwsem);
236 __cleanup_empty_realms(mdsc);
237 up_write(&mdsc->snap_rwsem);
238}
239
240/*
241 * adjust the parent realm of a given @realm. adjust child list, and parent
242 * pointers, and ref counts appropriately.
243 *
244 * return true if parent was changed, 0 if unchanged, <0 on error.
245 *
246 * caller must hold snap_rwsem for write.
247 */
248static int adjust_snap_realm_parent(struct ceph_mds_client *mdsc,
249 struct ceph_snap_realm *realm,
250 u64 parentino)
251{
252 struct ceph_snap_realm *parent;
253
254 if (realm->parent_ino == parentino)
255 return 0;
256
257 parent = ceph_lookup_snap_realm(mdsc, parentino);
258 if (!parent) {
259 parent = ceph_create_snap_realm(mdsc, parentino);
260 if (IS_ERR(parent))
261 return PTR_ERR(parent);
262 }
263 dout("adjust_snap_realm_parent %llx %p: %llx %p -> %llx %p\n",
264 realm->ino, realm, realm->parent_ino, realm->parent,
265 parentino, parent);
266 if (realm->parent) {
267 list_del_init(&realm->child_item);
268 ceph_put_snap_realm(mdsc, realm->parent);
269 }
270 realm->parent_ino = parentino;
271 realm->parent = parent;
272 ceph_get_snap_realm(mdsc, parent);
273 list_add(&realm->child_item, &parent->children);
274 return 1;
275}
276
277
278static int cmpu64_rev(const void *a, const void *b)
279{
280 if (*(u64 *)a < *(u64 *)b)
281 return 1;
282 if (*(u64 *)a > *(u64 *)b)
283 return -1;
284 return 0;
285}
286
287/*
288 * build the snap context for a given realm.
289 */
290static int build_snap_context(struct ceph_snap_realm *realm)
291{
292 struct ceph_snap_realm *parent = realm->parent;
293 struct ceph_snap_context *snapc;
294 int err = 0;
295 int i;
296 int num = realm->num_prior_parent_snaps + realm->num_snaps;
297
298 /*
299 * build parent context, if it hasn't been built.
300 * conservatively estimate that all parent snaps might be
301 * included by us.
302 */
303 if (parent) {
304 if (!parent->cached_context) {
305 err = build_snap_context(parent);
306 if (err)
307 goto fail;
308 }
309 num += parent->cached_context->num_snaps;
310 }
311
312 /* do i actually need to update? not if my context seq
313 matches realm seq, and my parents' does to. (this works
314 because we rebuild_snap_realms() works _downward_ in
315 hierarchy after each update.) */
316 if (realm->cached_context &&
317 realm->cached_context->seq <= realm->seq &&
318 (!parent ||
319 realm->cached_context->seq <= parent->cached_context->seq)) {
320 dout("build_snap_context %llx %p: %p seq %lld (%d snaps)"
321 " (unchanged)\n",
322 realm->ino, realm, realm->cached_context,
323 realm->cached_context->seq,
324 realm->cached_context->num_snaps);
325 return 0;
326 }
327
328 /* alloc new snap context */
329 err = -ENOMEM;
330 if (num > ULONG_MAX / sizeof(u64) - sizeof(*snapc))
331 goto fail;
332 snapc = kzalloc(sizeof(*snapc) + num*sizeof(u64), GFP_NOFS);
333 if (!snapc)
334 goto fail;
335 atomic_set(&snapc->nref, 1);
336
337 /* build (reverse sorted) snap vector */
338 num = 0;
339 snapc->seq = realm->seq;
340 if (parent) {
341 /* include any of parent's snaps occuring _after_ my
342 parent became my parent */
343 for (i = 0; i < parent->cached_context->num_snaps; i++)
344 if (parent->cached_context->snaps[i] >=
345 realm->parent_since)
346 snapc->snaps[num++] =
347 parent->cached_context->snaps[i];
348 if (parent->cached_context->seq > snapc->seq)
349 snapc->seq = parent->cached_context->seq;
350 }
351 memcpy(snapc->snaps + num, realm->snaps,
352 sizeof(u64)*realm->num_snaps);
353 num += realm->num_snaps;
354 memcpy(snapc->snaps + num, realm->prior_parent_snaps,
355 sizeof(u64)*realm->num_prior_parent_snaps);
356 num += realm->num_prior_parent_snaps;
357
358 sort(snapc->snaps, num, sizeof(u64), cmpu64_rev, NULL);
359 snapc->num_snaps = num;
360 dout("build_snap_context %llx %p: %p seq %lld (%d snaps)\n",
361 realm->ino, realm, snapc, snapc->seq, snapc->num_snaps);
362
363 if (realm->cached_context)
364 ceph_put_snap_context(realm->cached_context);
365 realm->cached_context = snapc;
366 return 0;
367
368fail:
369 /*
370 * if we fail, clear old (incorrect) cached_context... hopefully
371 * we'll have better luck building it later
372 */
373 if (realm->cached_context) {
374 ceph_put_snap_context(realm->cached_context);
375 realm->cached_context = NULL;
376 }
377 pr_err("build_snap_context %llx %p fail %d\n", realm->ino,
378 realm, err);
379 return err;
380}
381
382/*
383 * rebuild snap context for the given realm and all of its children.
384 */
385static void rebuild_snap_realms(struct ceph_snap_realm *realm)
386{
387 struct ceph_snap_realm *child;
388
389 dout("rebuild_snap_realms %llx %p\n", realm->ino, realm);
390 build_snap_context(realm);
391
392 list_for_each_entry(child, &realm->children, child_item)
393 rebuild_snap_realms(child);
394}
395
396
397/*
398 * helper to allocate and decode an array of snapids. free prior
399 * instance, if any.
400 */
401static int dup_array(u64 **dst, __le64 *src, int num)
402{
403 int i;
404
405 kfree(*dst);
406 if (num) {
407 *dst = kcalloc(num, sizeof(u64), GFP_NOFS);
408 if (!*dst)
409 return -ENOMEM;
410 for (i = 0; i < num; i++)
411 (*dst)[i] = get_unaligned_le64(src + i);
412 } else {
413 *dst = NULL;
414 }
415 return 0;
416}
417
418
419/*
420 * When a snapshot is applied, the size/mtime inode metadata is queued
421 * in a ceph_cap_snap (one for each snapshot) until writeback
422 * completes and the metadata can be flushed back to the MDS.
423 *
424 * However, if a (sync) write is currently in-progress when we apply
425 * the snapshot, we have to wait until the write succeeds or fails
426 * (and a final size/mtime is known). In this case the
427 * cap_snap->writing = 1, and is said to be "pending." When the write
428 * finishes, we __ceph_finish_cap_snap().
429 *
430 * Caller must hold snap_rwsem for read (i.e., the realm topology won't
431 * change).
432 */
433void ceph_queue_cap_snap(struct ceph_inode_info *ci,
434 struct ceph_snap_context *snapc)
435{
436 struct inode *inode = &ci->vfs_inode;
437 struct ceph_cap_snap *capsnap;
438 int used;
439
440 capsnap = kzalloc(sizeof(*capsnap), GFP_NOFS);
441 if (!capsnap) {
442 pr_err("ENOMEM allocating ceph_cap_snap on %p\n", inode);
443 return;
444 }
445
446 spin_lock(&inode->i_lock);
447 used = __ceph_caps_used(ci);
448 if (__ceph_have_pending_cap_snap(ci)) {
449 /* there is no point in queuing multiple "pending" cap_snaps,
450 as no new writes are allowed to start when pending, so any
451 writes in progress now were started before the previous
452 cap_snap. lucky us. */
453 dout("queue_cap_snap %p snapc %p seq %llu used %d"
454 " already pending\n", inode, snapc, snapc->seq, used);
455 kfree(capsnap);
456 } else if (ci->i_wrbuffer_ref_head || (used & CEPH_CAP_FILE_WR)) {
457 igrab(inode);
458
459 atomic_set(&capsnap->nref, 1);
460 capsnap->ci = ci;
461 INIT_LIST_HEAD(&capsnap->ci_item);
462 INIT_LIST_HEAD(&capsnap->flushing_item);
463
464 capsnap->follows = snapc->seq - 1;
465 capsnap->context = ceph_get_snap_context(snapc);
466 capsnap->issued = __ceph_caps_issued(ci, NULL);
467 capsnap->dirty = __ceph_caps_dirty(ci);
468
469 capsnap->mode = inode->i_mode;
470 capsnap->uid = inode->i_uid;
471 capsnap->gid = inode->i_gid;
472
473 /* fixme? */
474 capsnap->xattr_blob = NULL;
475 capsnap->xattr_len = 0;
476
477 /* dirty page count moved from _head to this cap_snap;
478 all subsequent writes page dirties occur _after_ this
479 snapshot. */
480 capsnap->dirty_pages = ci->i_wrbuffer_ref_head;
481 ci->i_wrbuffer_ref_head = 0;
482 ceph_put_snap_context(ci->i_head_snapc);
483 ci->i_head_snapc = NULL;
484 list_add_tail(&capsnap->ci_item, &ci->i_cap_snaps);
485
486 if (used & CEPH_CAP_FILE_WR) {
487 dout("queue_cap_snap %p cap_snap %p snapc %p"
488 " seq %llu used WR, now pending\n", inode,
489 capsnap, snapc, snapc->seq);
490 capsnap->writing = 1;
491 } else {
492 /* note mtime, size NOW. */
493 __ceph_finish_cap_snap(ci, capsnap);
494 }
495 } else {
496 dout("queue_cap_snap %p nothing dirty|writing\n", inode);
497 kfree(capsnap);
498 }
499
500 spin_unlock(&inode->i_lock);
501}
502
503/*
504 * Finalize the size, mtime for a cap_snap.. that is, settle on final values
505 * to be used for the snapshot, to be flushed back to the mds.
506 *
507 * If capsnap can now be flushed, add to snap_flush list, and return 1.
508 *
509 * Caller must hold i_lock.
510 */
511int __ceph_finish_cap_snap(struct ceph_inode_info *ci,
512 struct ceph_cap_snap *capsnap)
513{
514 struct inode *inode = &ci->vfs_inode;
515 struct ceph_mds_client *mdsc = &ceph_client(inode->i_sb)->mdsc;
516
517 BUG_ON(capsnap->writing);
518 capsnap->size = inode->i_size;
519 capsnap->mtime = inode->i_mtime;
520 capsnap->atime = inode->i_atime;
521 capsnap->ctime = inode->i_ctime;
522 capsnap->time_warp_seq = ci->i_time_warp_seq;
523 if (capsnap->dirty_pages) {
524 dout("finish_cap_snap %p cap_snap %p snapc %p %llu s=%llu "
525 "still has %d dirty pages\n", inode, capsnap,
526 capsnap->context, capsnap->context->seq,
527 capsnap->size, capsnap->dirty_pages);
528 return 0;
529 }
530 dout("finish_cap_snap %p cap_snap %p snapc %p %llu s=%llu clean\n",
531 inode, capsnap, capsnap->context,
532 capsnap->context->seq, capsnap->size);
533
534 spin_lock(&mdsc->snap_flush_lock);
535 list_add_tail(&ci->i_snap_flush_item, &mdsc->snap_flush_list);
536 spin_unlock(&mdsc->snap_flush_lock);
537 return 1; /* caller may want to ceph_flush_snaps */
538}
539
540
541/*
542 * Parse and apply a snapblob "snap trace" from the MDS. This specifies
543 * the snap realm parameters from a given realm and all of its ancestors,
544 * up to the root.
545 *
546 * Caller must hold snap_rwsem for write.
547 */
548int ceph_update_snap_trace(struct ceph_mds_client *mdsc,
549 void *p, void *e, bool deletion)
550{
551 struct ceph_mds_snap_realm *ri; /* encoded */
552 __le64 *snaps; /* encoded */
553 __le64 *prior_parent_snaps; /* encoded */
554 struct ceph_snap_realm *realm;
555 int invalidate = 0;
556 int err = -ENOMEM;
557
558 dout("update_snap_trace deletion=%d\n", deletion);
559more:
560 ceph_decode_need(&p, e, sizeof(*ri), bad);
561 ri = p;
562 p += sizeof(*ri);
563 ceph_decode_need(&p, e, sizeof(u64)*(le32_to_cpu(ri->num_snaps) +
564 le32_to_cpu(ri->num_prior_parent_snaps)), bad);
565 snaps = p;
566 p += sizeof(u64) * le32_to_cpu(ri->num_snaps);
567 prior_parent_snaps = p;
568 p += sizeof(u64) * le32_to_cpu(ri->num_prior_parent_snaps);
569
570 realm = ceph_lookup_snap_realm(mdsc, le64_to_cpu(ri->ino));
571 if (!realm) {
572 realm = ceph_create_snap_realm(mdsc, le64_to_cpu(ri->ino));
573 if (IS_ERR(realm)) {
574 err = PTR_ERR(realm);
575 goto fail;
576 }
577 }
578
579 if (le64_to_cpu(ri->seq) > realm->seq) {
580 dout("update_snap_trace updating %llx %p %lld -> %lld\n",
581 realm->ino, realm, realm->seq, le64_to_cpu(ri->seq));
582 /*
583 * if the realm seq has changed, queue a cap_snap for every
584 * inode with open caps. we do this _before_ we update
585 * the realm info so that we prepare for writeback under the
586 * _previous_ snap context.
587 *
588 * ...unless it's a snap deletion!
589 */
590 if (!deletion) {
591 struct ceph_inode_info *ci;
592 struct inode *lastinode = NULL;
593
594 spin_lock(&realm->inodes_with_caps_lock);
595 list_for_each_entry(ci, &realm->inodes_with_caps,
596 i_snap_realm_item) {
597 struct inode *inode = igrab(&ci->vfs_inode);
598 if (!inode)
599 continue;
600 spin_unlock(&realm->inodes_with_caps_lock);
601 if (lastinode)
602 iput(lastinode);
603 lastinode = inode;
604 ceph_queue_cap_snap(ci, realm->cached_context);
605 spin_lock(&realm->inodes_with_caps_lock);
606 }
607 spin_unlock(&realm->inodes_with_caps_lock);
608 if (lastinode)
609 iput(lastinode);
610 dout("update_snap_trace cap_snaps queued\n");
611 }
612
613 } else {
614 dout("update_snap_trace %llx %p seq %lld unchanged\n",
615 realm->ino, realm, realm->seq);
616 }
617
618 /* ensure the parent is correct */
619 err = adjust_snap_realm_parent(mdsc, realm, le64_to_cpu(ri->parent));
620 if (err < 0)
621 goto fail;
622 invalidate += err;
623
624 if (le64_to_cpu(ri->seq) > realm->seq) {
625 /* update realm parameters, snap lists */
626 realm->seq = le64_to_cpu(ri->seq);
627 realm->created = le64_to_cpu(ri->created);
628 realm->parent_since = le64_to_cpu(ri->parent_since);
629
630 realm->num_snaps = le32_to_cpu(ri->num_snaps);
631 err = dup_array(&realm->snaps, snaps, realm->num_snaps);
632 if (err < 0)
633 goto fail;
634
635 realm->num_prior_parent_snaps =
636 le32_to_cpu(ri->num_prior_parent_snaps);
637 err = dup_array(&realm->prior_parent_snaps, prior_parent_snaps,
638 realm->num_prior_parent_snaps);
639 if (err < 0)
640 goto fail;
641
642 invalidate = 1;
643 } else if (!realm->cached_context) {
644 invalidate = 1;
645 }
646
647 dout("done with %llx %p, invalidated=%d, %p %p\n", realm->ino,
648 realm, invalidate, p, e);
649
650 if (p < e)
651 goto more;
652
653 /* invalidate when we reach the _end_ (root) of the trace */
654 if (invalidate)
655 rebuild_snap_realms(realm);
656
657 __cleanup_empty_realms(mdsc);
658 return 0;
659
660bad:
661 err = -EINVAL;
662fail:
663 pr_err("update_snap_trace error %d\n", err);
664 return err;
665}
666
667
668/*
669 * Send any cap_snaps that are queued for flush. Try to carry
670 * s_mutex across multiple snap flushes to avoid locking overhead.
671 *
672 * Caller holds no locks.
673 */
674static void flush_snaps(struct ceph_mds_client *mdsc)
675{
676 struct ceph_inode_info *ci;
677 struct inode *inode;
678 struct ceph_mds_session *session = NULL;
679
680 dout("flush_snaps\n");
681 spin_lock(&mdsc->snap_flush_lock);
682 while (!list_empty(&mdsc->snap_flush_list)) {
683 ci = list_first_entry(&mdsc->snap_flush_list,
684 struct ceph_inode_info, i_snap_flush_item);
685 inode = &ci->vfs_inode;
686 igrab(inode);
687 spin_unlock(&mdsc->snap_flush_lock);
688 spin_lock(&inode->i_lock);
689 __ceph_flush_snaps(ci, &session);
690 spin_unlock(&inode->i_lock);
691 iput(inode);
692 spin_lock(&mdsc->snap_flush_lock);
693 }
694 spin_unlock(&mdsc->snap_flush_lock);
695
696 if (session) {
697 mutex_unlock(&session->s_mutex);
698 ceph_put_mds_session(session);
699 }
700 dout("flush_snaps done\n");
701}
702
703
704/*
705 * Handle a snap notification from the MDS.
706 *
707 * This can take two basic forms: the simplest is just a snap creation
708 * or deletion notification on an existing realm. This should update the
709 * realm and its children.
710 *
711 * The more difficult case is realm creation, due to snap creation at a
712 * new point in the file hierarchy, or due to a rename that moves a file or
713 * directory into another realm.
714 */
715void ceph_handle_snap(struct ceph_mds_client *mdsc,
716 struct ceph_mds_session *session,
717 struct ceph_msg *msg)
718{
719 struct super_block *sb = mdsc->client->sb;
720 int mds = session->s_mds;
721 u64 split;
722 int op;
723 int trace_len;
724 struct ceph_snap_realm *realm = NULL;
725 void *p = msg->front.iov_base;
726 void *e = p + msg->front.iov_len;
727 struct ceph_mds_snap_head *h;
728 int num_split_inos, num_split_realms;
729 __le64 *split_inos = NULL, *split_realms = NULL;
730 int i;
731 int locked_rwsem = 0;
732
733 /* decode */
734 if (msg->front.iov_len < sizeof(*h))
735 goto bad;
736 h = p;
737 op = le32_to_cpu(h->op);
738 split = le64_to_cpu(h->split); /* non-zero if we are splitting an
739 * existing realm */
740 num_split_inos = le32_to_cpu(h->num_split_inos);
741 num_split_realms = le32_to_cpu(h->num_split_realms);
742 trace_len = le32_to_cpu(h->trace_len);
743 p += sizeof(*h);
744
745 dout("handle_snap from mds%d op %s split %llx tracelen %d\n", mds,
746 ceph_snap_op_name(op), split, trace_len);
747
748 mutex_lock(&session->s_mutex);
749 session->s_seq++;
750 mutex_unlock(&session->s_mutex);
751
752 down_write(&mdsc->snap_rwsem);
753 locked_rwsem = 1;
754
755 if (op == CEPH_SNAP_OP_SPLIT) {
756 struct ceph_mds_snap_realm *ri;
757
758 /*
759 * A "split" breaks part of an existing realm off into
760 * a new realm. The MDS provides a list of inodes
761 * (with caps) and child realms that belong to the new
762 * child.
763 */
764 split_inos = p;
765 p += sizeof(u64) * num_split_inos;
766 split_realms = p;
767 p += sizeof(u64) * num_split_realms;
768 ceph_decode_need(&p, e, sizeof(*ri), bad);
769 /* we will peek at realm info here, but will _not_
770 * advance p, as the realm update will occur below in
771 * ceph_update_snap_trace. */
772 ri = p;
773
774 realm = ceph_lookup_snap_realm(mdsc, split);
775 if (!realm) {
776 realm = ceph_create_snap_realm(mdsc, split);
777 if (IS_ERR(realm))
778 goto out;
779 }
780 ceph_get_snap_realm(mdsc, realm);
781
782 dout("splitting snap_realm %llx %p\n", realm->ino, realm);
783 for (i = 0; i < num_split_inos; i++) {
784 struct ceph_vino vino = {
785 .ino = le64_to_cpu(split_inos[i]),
786 .snap = CEPH_NOSNAP,
787 };
788 struct inode *inode = ceph_find_inode(sb, vino);
789 struct ceph_inode_info *ci;
790
791 if (!inode)
792 continue;
793 ci = ceph_inode(inode);
794
795 spin_lock(&inode->i_lock);
796 if (!ci->i_snap_realm)
797 goto skip_inode;
798 /*
799 * If this inode belongs to a realm that was
800 * created after our new realm, we experienced
801 * a race (due to another split notifications
802 * arriving from a different MDS). So skip
803 * this inode.
804 */
805 if (ci->i_snap_realm->created >
806 le64_to_cpu(ri->created)) {
807 dout(" leaving %p in newer realm %llx %p\n",
808 inode, ci->i_snap_realm->ino,
809 ci->i_snap_realm);
810 goto skip_inode;
811 }
812 dout(" will move %p to split realm %llx %p\n",
813 inode, realm->ino, realm);
814 /*
815 * Remove the inode from the realm's inode
816 * list, but don't add it to the new realm
817 * yet. We don't want the cap_snap to be
818 * queued (again) by ceph_update_snap_trace()
819 * below. Queue it _now_, under the old context.
820 */
821 list_del_init(&ci->i_snap_realm_item);
822 spin_unlock(&inode->i_lock);
823
824 ceph_queue_cap_snap(ci,
825 ci->i_snap_realm->cached_context);
826
827 iput(inode);
828 continue;
829
830skip_inode:
831 spin_unlock(&inode->i_lock);
832 iput(inode);
833 }
834
835 /* we may have taken some of the old realm's children. */
836 for (i = 0; i < num_split_realms; i++) {
837 struct ceph_snap_realm *child =
838 ceph_lookup_snap_realm(mdsc,
839 le64_to_cpu(split_realms[i]));
840 if (!child)
841 continue;
842 adjust_snap_realm_parent(mdsc, child, realm->ino);
843 }
844 }
845
846 /*
847 * update using the provided snap trace. if we are deleting a
848 * snap, we can avoid queueing cap_snaps.
849 */
850 ceph_update_snap_trace(mdsc, p, e,
851 op == CEPH_SNAP_OP_DESTROY);
852
853 if (op == CEPH_SNAP_OP_SPLIT) {
854 /*
855 * ok, _now_ add the inodes into the new realm.
856 */
857 for (i = 0; i < num_split_inos; i++) {
858 struct ceph_vino vino = {
859 .ino = le64_to_cpu(split_inos[i]),
860 .snap = CEPH_NOSNAP,
861 };
862 struct inode *inode = ceph_find_inode(sb, vino);
863 struct ceph_inode_info *ci;
864
865 if (!inode)
866 continue;
867 ci = ceph_inode(inode);
868 spin_lock(&inode->i_lock);
869 if (!ci->i_snap_realm)
870 goto split_skip_inode;
871 ceph_put_snap_realm(mdsc, ci->i_snap_realm);
872 spin_lock(&realm->inodes_with_caps_lock);
873 list_add(&ci->i_snap_realm_item,
874 &realm->inodes_with_caps);
875 ci->i_snap_realm = realm;
876 spin_unlock(&realm->inodes_with_caps_lock);
877 ceph_get_snap_realm(mdsc, realm);
878split_skip_inode:
879 spin_unlock(&inode->i_lock);
880 iput(inode);
881 }
882
883 /* we took a reference when we created the realm, above */
884 ceph_put_snap_realm(mdsc, realm);
885 }
886
887 __cleanup_empty_realms(mdsc);
888
889 up_write(&mdsc->snap_rwsem);
890
891 flush_snaps(mdsc);
892 return;
893
894bad:
895 pr_err("corrupt snap message from mds%d\n", mds);
896 ceph_msg_dump(msg);
897out:
898 if (locked_rwsem)
899 up_write(&mdsc->snap_rwsem);
900 return;
901}
902
903
904
diff --git a/fs/ceph/super.c b/fs/ceph/super.c
new file mode 100644
index 000000000000..4290a6e860b0
--- /dev/null
+++ b/fs/ceph/super.c
@@ -0,0 +1,1030 @@
1
2#include "ceph_debug.h"
3
4#include <linux/backing-dev.h>
5#include <linux/fs.h>
6#include <linux/inet.h>
7#include <linux/in6.h>
8#include <linux/module.h>
9#include <linux/mount.h>
10#include <linux/parser.h>
11#include <linux/rwsem.h>
12#include <linux/sched.h>
13#include <linux/seq_file.h>
14#include <linux/statfs.h>
15#include <linux/string.h>
16#include <linux/version.h>
17#include <linux/vmalloc.h>
18
19#include "decode.h"
20#include "super.h"
21#include "mon_client.h"
22#include "auth.h"
23
24/*
25 * Ceph superblock operations
26 *
27 * Handle the basics of mounting, unmounting.
28 */
29
30
31/*
32 * find filename portion of a path (/foo/bar/baz -> baz)
33 */
34const char *ceph_file_part(const char *s, int len)
35{
36 const char *e = s + len;
37
38 while (e != s && *(e-1) != '/')
39 e--;
40 return e;
41}
42
43
44/*
45 * super ops
46 */
47static void ceph_put_super(struct super_block *s)
48{
49 struct ceph_client *cl = ceph_client(s);
50
51 dout("put_super\n");
52 ceph_mdsc_close_sessions(&cl->mdsc);
53 return;
54}
55
56static int ceph_statfs(struct dentry *dentry, struct kstatfs *buf)
57{
58 struct ceph_client *client = ceph_inode_to_client(dentry->d_inode);
59 struct ceph_monmap *monmap = client->monc.monmap;
60 struct ceph_statfs st;
61 u64 fsid;
62 int err;
63
64 dout("statfs\n");
65 err = ceph_monc_do_statfs(&client->monc, &st);
66 if (err < 0)
67 return err;
68
69 /* fill in kstatfs */
70 buf->f_type = CEPH_SUPER_MAGIC; /* ?? */
71
72 /*
73 * express utilization in terms of large blocks to avoid
74 * overflow on 32-bit machines.
75 */
76 buf->f_bsize = 1 << CEPH_BLOCK_SHIFT;
77 buf->f_blocks = le64_to_cpu(st.kb) >> (CEPH_BLOCK_SHIFT-10);
78 buf->f_bfree = (le64_to_cpu(st.kb) - le64_to_cpu(st.kb_used)) >>
79 (CEPH_BLOCK_SHIFT-10);
80 buf->f_bavail = le64_to_cpu(st.kb_avail) >> (CEPH_BLOCK_SHIFT-10);
81
82 buf->f_files = le64_to_cpu(st.num_objects);
83 buf->f_ffree = -1;
84 buf->f_namelen = PATH_MAX;
85 buf->f_frsize = PAGE_CACHE_SIZE;
86
87 /* leave fsid little-endian, regardless of host endianness */
88 fsid = *(u64 *)(&monmap->fsid) ^ *((u64 *)&monmap->fsid + 1);
89 buf->f_fsid.val[0] = fsid & 0xffffffff;
90 buf->f_fsid.val[1] = fsid >> 32;
91
92 return 0;
93}
94
95
96static int ceph_syncfs(struct super_block *sb, int wait)
97{
98 dout("sync_fs %d\n", wait);
99 ceph_osdc_sync(&ceph_client(sb)->osdc);
100 ceph_mdsc_sync(&ceph_client(sb)->mdsc);
101 dout("sync_fs %d done\n", wait);
102 return 0;
103}
104
105
106/**
107 * ceph_show_options - Show mount options in /proc/mounts
108 * @m: seq_file to write to
109 * @mnt: mount descriptor
110 */
111static int ceph_show_options(struct seq_file *m, struct vfsmount *mnt)
112{
113 struct ceph_client *client = ceph_sb_to_client(mnt->mnt_sb);
114 struct ceph_mount_args *args = client->mount_args;
115
116 if (args->flags & CEPH_OPT_FSID)
117 seq_printf(m, ",fsidmajor=%llu,fsidminor%llu",
118 le64_to_cpu(*(__le64 *)&args->fsid.fsid[0]),
119 le64_to_cpu(*(__le64 *)&args->fsid.fsid[8]));
120 if (args->flags & CEPH_OPT_NOSHARE)
121 seq_puts(m, ",noshare");
122 if (args->flags & CEPH_OPT_DIRSTAT)
123 seq_puts(m, ",dirstat");
124 if ((args->flags & CEPH_OPT_RBYTES) == 0)
125 seq_puts(m, ",norbytes");
126 if (args->flags & CEPH_OPT_NOCRC)
127 seq_puts(m, ",nocrc");
128 if (args->flags & CEPH_OPT_NOASYNCREADDIR)
129 seq_puts(m, ",noasyncreaddir");
130 if (strcmp(args->snapdir_name, CEPH_SNAPDIRNAME_DEFAULT))
131 seq_printf(m, ",snapdirname=%s", args->snapdir_name);
132 if (args->name)
133 seq_printf(m, ",name=%s", args->name);
134 if (args->secret)
135 seq_puts(m, ",secret=<hidden>");
136 return 0;
137}
138
139/*
140 * caches
141 */
142struct kmem_cache *ceph_inode_cachep;
143struct kmem_cache *ceph_cap_cachep;
144struct kmem_cache *ceph_dentry_cachep;
145struct kmem_cache *ceph_file_cachep;
146
147static void ceph_inode_init_once(void *foo)
148{
149 struct ceph_inode_info *ci = foo;
150 inode_init_once(&ci->vfs_inode);
151}
152
153static int default_congestion_kb(void)
154{
155 int congestion_kb;
156
157 /*
158 * Copied from NFS
159 *
160 * congestion size, scale with available memory.
161 *
162 * 64MB: 8192k
163 * 128MB: 11585k
164 * 256MB: 16384k
165 * 512MB: 23170k
166 * 1GB: 32768k
167 * 2GB: 46340k
168 * 4GB: 65536k
169 * 8GB: 92681k
170 * 16GB: 131072k
171 *
172 * This allows larger machines to have larger/more transfers.
173 * Limit the default to 256M
174 */
175 congestion_kb = (16*int_sqrt(totalram_pages)) << (PAGE_SHIFT-10);
176 if (congestion_kb > 256*1024)
177 congestion_kb = 256*1024;
178
179 return congestion_kb;
180}
181
182static int __init init_caches(void)
183{
184 ceph_inode_cachep = kmem_cache_create("ceph_inode_info",
185 sizeof(struct ceph_inode_info),
186 __alignof__(struct ceph_inode_info),
187 (SLAB_RECLAIM_ACCOUNT|SLAB_MEM_SPREAD),
188 ceph_inode_init_once);
189 if (ceph_inode_cachep == NULL)
190 return -ENOMEM;
191
192 ceph_cap_cachep = KMEM_CACHE(ceph_cap,
193 SLAB_RECLAIM_ACCOUNT|SLAB_MEM_SPREAD);
194 if (ceph_cap_cachep == NULL)
195 goto bad_cap;
196
197 ceph_dentry_cachep = KMEM_CACHE(ceph_dentry_info,
198 SLAB_RECLAIM_ACCOUNT|SLAB_MEM_SPREAD);
199 if (ceph_dentry_cachep == NULL)
200 goto bad_dentry;
201
202 ceph_file_cachep = KMEM_CACHE(ceph_file_info,
203 SLAB_RECLAIM_ACCOUNT|SLAB_MEM_SPREAD);
204 if (ceph_file_cachep == NULL)
205 goto bad_file;
206
207 return 0;
208
209bad_file:
210 kmem_cache_destroy(ceph_dentry_cachep);
211bad_dentry:
212 kmem_cache_destroy(ceph_cap_cachep);
213bad_cap:
214 kmem_cache_destroy(ceph_inode_cachep);
215 return -ENOMEM;
216}
217
218static void destroy_caches(void)
219{
220 kmem_cache_destroy(ceph_inode_cachep);
221 kmem_cache_destroy(ceph_cap_cachep);
222 kmem_cache_destroy(ceph_dentry_cachep);
223 kmem_cache_destroy(ceph_file_cachep);
224}
225
226
227/*
228 * ceph_umount_begin - initiate forced umount. Tear down down the
229 * mount, skipping steps that may hang while waiting for server(s).
230 */
231static void ceph_umount_begin(struct super_block *sb)
232{
233 struct ceph_client *client = ceph_sb_to_client(sb);
234
235 dout("ceph_umount_begin - starting forced umount\n");
236 if (!client)
237 return;
238 client->mount_state = CEPH_MOUNT_SHUTDOWN;
239 return;
240}
241
242static const struct super_operations ceph_super_ops = {
243 .alloc_inode = ceph_alloc_inode,
244 .destroy_inode = ceph_destroy_inode,
245 .write_inode = ceph_write_inode,
246 .sync_fs = ceph_syncfs,
247 .put_super = ceph_put_super,
248 .show_options = ceph_show_options,
249 .statfs = ceph_statfs,
250 .umount_begin = ceph_umount_begin,
251};
252
253
254const char *ceph_msg_type_name(int type)
255{
256 switch (type) {
257 case CEPH_MSG_SHUTDOWN: return "shutdown";
258 case CEPH_MSG_PING: return "ping";
259 case CEPH_MSG_AUTH: return "auth";
260 case CEPH_MSG_AUTH_REPLY: return "auth_reply";
261 case CEPH_MSG_MON_MAP: return "mon_map";
262 case CEPH_MSG_MON_GET_MAP: return "mon_get_map";
263 case CEPH_MSG_MON_SUBSCRIBE: return "mon_subscribe";
264 case CEPH_MSG_MON_SUBSCRIBE_ACK: return "mon_subscribe_ack";
265 case CEPH_MSG_STATFS: return "statfs";
266 case CEPH_MSG_STATFS_REPLY: return "statfs_reply";
267 case CEPH_MSG_MDS_MAP: return "mds_map";
268 case CEPH_MSG_CLIENT_SESSION: return "client_session";
269 case CEPH_MSG_CLIENT_RECONNECT: return "client_reconnect";
270 case CEPH_MSG_CLIENT_REQUEST: return "client_request";
271 case CEPH_MSG_CLIENT_REQUEST_FORWARD: return "client_request_forward";
272 case CEPH_MSG_CLIENT_REPLY: return "client_reply";
273 case CEPH_MSG_CLIENT_CAPS: return "client_caps";
274 case CEPH_MSG_CLIENT_CAPRELEASE: return "client_cap_release";
275 case CEPH_MSG_CLIENT_SNAP: return "client_snap";
276 case CEPH_MSG_CLIENT_LEASE: return "client_lease";
277 case CEPH_MSG_OSD_MAP: return "osd_map";
278 case CEPH_MSG_OSD_OP: return "osd_op";
279 case CEPH_MSG_OSD_OPREPLY: return "osd_opreply";
280 default: return "unknown";
281 }
282}
283
284
285/*
286 * mount options
287 */
288enum {
289 Opt_fsidmajor,
290 Opt_fsidminor,
291 Opt_monport,
292 Opt_wsize,
293 Opt_rsize,
294 Opt_osdtimeout,
295 Opt_osdkeepalivetimeout,
296 Opt_mount_timeout,
297 Opt_osd_idle_ttl,
298 Opt_caps_wanted_delay_min,
299 Opt_caps_wanted_delay_max,
300 Opt_readdir_max_entries,
301 Opt_congestion_kb,
302 Opt_last_int,
303 /* int args above */
304 Opt_snapdirname,
305 Opt_name,
306 Opt_secret,
307 Opt_last_string,
308 /* string args above */
309 Opt_ip,
310 Opt_noshare,
311 Opt_dirstat,
312 Opt_nodirstat,
313 Opt_rbytes,
314 Opt_norbytes,
315 Opt_nocrc,
316 Opt_noasyncreaddir,
317};
318
319static match_table_t arg_tokens = {
320 {Opt_fsidmajor, "fsidmajor=%ld"},
321 {Opt_fsidminor, "fsidminor=%ld"},
322 {Opt_monport, "monport=%d"},
323 {Opt_wsize, "wsize=%d"},
324 {Opt_rsize, "rsize=%d"},
325 {Opt_osdtimeout, "osdtimeout=%d"},
326 {Opt_osdkeepalivetimeout, "osdkeepalive=%d"},
327 {Opt_mount_timeout, "mount_timeout=%d"},
328 {Opt_osd_idle_ttl, "osd_idle_ttl=%d"},
329 {Opt_caps_wanted_delay_min, "caps_wanted_delay_min=%d"},
330 {Opt_caps_wanted_delay_max, "caps_wanted_delay_max=%d"},
331 {Opt_readdir_max_entries, "readdir_max_entries=%d"},
332 {Opt_congestion_kb, "write_congestion_kb=%d"},
333 /* int args above */
334 {Opt_snapdirname, "snapdirname=%s"},
335 {Opt_name, "name=%s"},
336 {Opt_secret, "secret=%s"},
337 /* string args above */
338 {Opt_ip, "ip=%s"},
339 {Opt_noshare, "noshare"},
340 {Opt_dirstat, "dirstat"},
341 {Opt_nodirstat, "nodirstat"},
342 {Opt_rbytes, "rbytes"},
343 {Opt_norbytes, "norbytes"},
344 {Opt_nocrc, "nocrc"},
345 {Opt_noasyncreaddir, "noasyncreaddir"},
346 {-1, NULL}
347};
348
349
350static struct ceph_mount_args *parse_mount_args(int flags, char *options,
351 const char *dev_name,
352 const char **path)
353{
354 struct ceph_mount_args *args;
355 const char *c;
356 int err = -ENOMEM;
357 substring_t argstr[MAX_OPT_ARGS];
358
359 args = kzalloc(sizeof(*args), GFP_KERNEL);
360 if (!args)
361 return ERR_PTR(-ENOMEM);
362 args->mon_addr = kcalloc(CEPH_MAX_MON, sizeof(*args->mon_addr),
363 GFP_KERNEL);
364 if (!args->mon_addr)
365 goto out;
366
367 dout("parse_mount_args %p, dev_name '%s'\n", args, dev_name);
368
369 /* start with defaults */
370 args->sb_flags = flags;
371 args->flags = CEPH_OPT_DEFAULT;
372 args->osd_timeout = CEPH_OSD_TIMEOUT_DEFAULT;
373 args->osd_keepalive_timeout = CEPH_OSD_KEEPALIVE_DEFAULT;
374 args->mount_timeout = CEPH_MOUNT_TIMEOUT_DEFAULT; /* seconds */
375 args->osd_idle_ttl = CEPH_OSD_IDLE_TTL_DEFAULT; /* seconds */
376 args->caps_wanted_delay_min = CEPH_CAPS_WANTED_DELAY_MIN_DEFAULT;
377 args->caps_wanted_delay_max = CEPH_CAPS_WANTED_DELAY_MAX_DEFAULT;
378 args->rsize = CEPH_MOUNT_RSIZE_DEFAULT;
379 args->snapdir_name = kstrdup(CEPH_SNAPDIRNAME_DEFAULT, GFP_KERNEL);
380 args->cap_release_safety = CEPH_CAPS_PER_RELEASE * 4;
381 args->max_readdir = 1024;
382 args->congestion_kb = default_congestion_kb();
383
384 /* ip1[:port1][,ip2[:port2]...]:/subdir/in/fs */
385 err = -EINVAL;
386 if (!dev_name)
387 goto out;
388 *path = strstr(dev_name, ":/");
389 if (*path == NULL) {
390 pr_err("device name is missing path (no :/ in %s)\n",
391 dev_name);
392 goto out;
393 }
394
395 /* get mon ip(s) */
396 err = ceph_parse_ips(dev_name, *path, args->mon_addr,
397 CEPH_MAX_MON, &args->num_mon);
398 if (err < 0)
399 goto out;
400
401 /* path on server */
402 *path += 2;
403 dout("server path '%s'\n", *path);
404
405 /* parse mount options */
406 while ((c = strsep(&options, ",")) != NULL) {
407 int token, intval, ret;
408 if (!*c)
409 continue;
410 err = -EINVAL;
411 token = match_token((char *)c, arg_tokens, argstr);
412 if (token < 0) {
413 pr_err("bad mount option at '%s'\n", c);
414 goto out;
415 }
416 if (token < Opt_last_int) {
417 ret = match_int(&argstr[0], &intval);
418 if (ret < 0) {
419 pr_err("bad mount option arg (not int) "
420 "at '%s'\n", c);
421 continue;
422 }
423 dout("got int token %d val %d\n", token, intval);
424 } else if (token > Opt_last_int && token < Opt_last_string) {
425 dout("got string token %d val %s\n", token,
426 argstr[0].from);
427 } else {
428 dout("got token %d\n", token);
429 }
430 switch (token) {
431 case Opt_fsidmajor:
432 *(__le64 *)&args->fsid.fsid[0] = cpu_to_le64(intval);
433 break;
434 case Opt_fsidminor:
435 *(__le64 *)&args->fsid.fsid[8] = cpu_to_le64(intval);
436 break;
437 case Opt_ip:
438 err = ceph_parse_ips(argstr[0].from,
439 argstr[0].to,
440 &args->my_addr,
441 1, NULL);
442 if (err < 0)
443 goto out;
444 args->flags |= CEPH_OPT_MYIP;
445 break;
446
447 case Opt_snapdirname:
448 kfree(args->snapdir_name);
449 args->snapdir_name = kstrndup(argstr[0].from,
450 argstr[0].to-argstr[0].from,
451 GFP_KERNEL);
452 break;
453 case Opt_name:
454 args->name = kstrndup(argstr[0].from,
455 argstr[0].to-argstr[0].from,
456 GFP_KERNEL);
457 break;
458 case Opt_secret:
459 args->secret = kstrndup(argstr[0].from,
460 argstr[0].to-argstr[0].from,
461 GFP_KERNEL);
462 break;
463
464 /* misc */
465 case Opt_wsize:
466 args->wsize = intval;
467 break;
468 case Opt_rsize:
469 args->rsize = intval;
470 break;
471 case Opt_osdtimeout:
472 args->osd_timeout = intval;
473 break;
474 case Opt_osdkeepalivetimeout:
475 args->osd_keepalive_timeout = intval;
476 break;
477 case Opt_mount_timeout:
478 args->mount_timeout = intval;
479 break;
480 case Opt_caps_wanted_delay_min:
481 args->caps_wanted_delay_min = intval;
482 break;
483 case Opt_caps_wanted_delay_max:
484 args->caps_wanted_delay_max = intval;
485 break;
486 case Opt_readdir_max_entries:
487 args->max_readdir = intval;
488 break;
489 case Opt_congestion_kb:
490 args->congestion_kb = intval;
491 break;
492
493 case Opt_noshare:
494 args->flags |= CEPH_OPT_NOSHARE;
495 break;
496
497 case Opt_dirstat:
498 args->flags |= CEPH_OPT_DIRSTAT;
499 break;
500 case Opt_nodirstat:
501 args->flags &= ~CEPH_OPT_DIRSTAT;
502 break;
503 case Opt_rbytes:
504 args->flags |= CEPH_OPT_RBYTES;
505 break;
506 case Opt_norbytes:
507 args->flags &= ~CEPH_OPT_RBYTES;
508 break;
509 case Opt_nocrc:
510 args->flags |= CEPH_OPT_NOCRC;
511 break;
512 case Opt_noasyncreaddir:
513 args->flags |= CEPH_OPT_NOASYNCREADDIR;
514 break;
515
516 default:
517 BUG_ON(token);
518 }
519 }
520 return args;
521
522out:
523 kfree(args->mon_addr);
524 kfree(args);
525 return ERR_PTR(err);
526}
527
528static void destroy_mount_args(struct ceph_mount_args *args)
529{
530 dout("destroy_mount_args %p\n", args);
531 kfree(args->snapdir_name);
532 args->snapdir_name = NULL;
533 kfree(args->name);
534 args->name = NULL;
535 kfree(args->secret);
536 args->secret = NULL;
537 kfree(args);
538}
539
540/*
541 * create a fresh client instance
542 */
543static struct ceph_client *ceph_create_client(struct ceph_mount_args *args)
544{
545 struct ceph_client *client;
546 int err = -ENOMEM;
547
548 client = kzalloc(sizeof(*client), GFP_KERNEL);
549 if (client == NULL)
550 return ERR_PTR(-ENOMEM);
551
552 mutex_init(&client->mount_mutex);
553
554 init_waitqueue_head(&client->auth_wq);
555
556 client->sb = NULL;
557 client->mount_state = CEPH_MOUNT_MOUNTING;
558 client->mount_args = args;
559
560 client->msgr = NULL;
561
562 client->auth_err = 0;
563 atomic_long_set(&client->writeback_count, 0);
564
565 err = bdi_init(&client->backing_dev_info);
566 if (err < 0)
567 goto fail;
568
569 err = -ENOMEM;
570 client->wb_wq = create_workqueue("ceph-writeback");
571 if (client->wb_wq == NULL)
572 goto fail_bdi;
573 client->pg_inv_wq = create_singlethread_workqueue("ceph-pg-invalid");
574 if (client->pg_inv_wq == NULL)
575 goto fail_wb_wq;
576 client->trunc_wq = create_singlethread_workqueue("ceph-trunc");
577 if (client->trunc_wq == NULL)
578 goto fail_pg_inv_wq;
579
580 /* set up mempools */
581 err = -ENOMEM;
582 client->wb_pagevec_pool = mempool_create_kmalloc_pool(10,
583 client->mount_args->wsize >> PAGE_CACHE_SHIFT);
584 if (!client->wb_pagevec_pool)
585 goto fail_trunc_wq;
586
587 /* caps */
588 client->min_caps = args->max_readdir;
589 ceph_adjust_min_caps(client->min_caps);
590
591 /* subsystems */
592 err = ceph_monc_init(&client->monc, client);
593 if (err < 0)
594 goto fail_mempool;
595 err = ceph_osdc_init(&client->osdc, client);
596 if (err < 0)
597 goto fail_monc;
598 err = ceph_mdsc_init(&client->mdsc, client);
599 if (err < 0)
600 goto fail_osdc;
601 return client;
602
603fail_osdc:
604 ceph_osdc_stop(&client->osdc);
605fail_monc:
606 ceph_monc_stop(&client->monc);
607fail_mempool:
608 mempool_destroy(client->wb_pagevec_pool);
609fail_trunc_wq:
610 destroy_workqueue(client->trunc_wq);
611fail_pg_inv_wq:
612 destroy_workqueue(client->pg_inv_wq);
613fail_wb_wq:
614 destroy_workqueue(client->wb_wq);
615fail_bdi:
616 bdi_destroy(&client->backing_dev_info);
617fail:
618 kfree(client);
619 return ERR_PTR(err);
620}
621
622static void ceph_destroy_client(struct ceph_client *client)
623{
624 dout("destroy_client %p\n", client);
625
626 /* unmount */
627 ceph_mdsc_stop(&client->mdsc);
628 ceph_monc_stop(&client->monc);
629 ceph_osdc_stop(&client->osdc);
630
631 ceph_adjust_min_caps(-client->min_caps);
632
633 ceph_debugfs_client_cleanup(client);
634 destroy_workqueue(client->wb_wq);
635 destroy_workqueue(client->pg_inv_wq);
636 destroy_workqueue(client->trunc_wq);
637
638 if (client->msgr)
639 ceph_messenger_destroy(client->msgr);
640 mempool_destroy(client->wb_pagevec_pool);
641
642 destroy_mount_args(client->mount_args);
643
644 kfree(client);
645 dout("destroy_client %p done\n", client);
646}
647
648/*
649 * Initially learn our fsid, or verify an fsid matches.
650 */
651int ceph_check_fsid(struct ceph_client *client, struct ceph_fsid *fsid)
652{
653 if (client->have_fsid) {
654 if (ceph_fsid_compare(&client->fsid, fsid)) {
655 pr_err("bad fsid, had " FSID_FORMAT " got " FSID_FORMAT,
656 PR_FSID(&client->fsid), PR_FSID(fsid));
657 return -1;
658 }
659 } else {
660 pr_info("client%lld fsid " FSID_FORMAT "\n",
661 client->monc.auth->global_id, PR_FSID(fsid));
662 memcpy(&client->fsid, fsid, sizeof(*fsid));
663 ceph_debugfs_client_init(client);
664 client->have_fsid = true;
665 }
666 return 0;
667}
668
669/*
670 * true if we have the mon map (and have thus joined the cluster)
671 */
672static int have_mon_map(struct ceph_client *client)
673{
674 return client->monc.monmap && client->monc.monmap->epoch;
675}
676
677/*
678 * Bootstrap mount by opening the root directory. Note the mount
679 * @started time from caller, and time out if this takes too long.
680 */
681static struct dentry *open_root_dentry(struct ceph_client *client,
682 const char *path,
683 unsigned long started)
684{
685 struct ceph_mds_client *mdsc = &client->mdsc;
686 struct ceph_mds_request *req = NULL;
687 int err;
688 struct dentry *root;
689
690 /* open dir */
691 dout("open_root_inode opening '%s'\n", path);
692 req = ceph_mdsc_create_request(mdsc, CEPH_MDS_OP_GETATTR, USE_ANY_MDS);
693 if (IS_ERR(req))
694 return ERR_PTR(PTR_ERR(req));
695 req->r_path1 = kstrdup(path, GFP_NOFS);
696 req->r_ino1.ino = CEPH_INO_ROOT;
697 req->r_ino1.snap = CEPH_NOSNAP;
698 req->r_started = started;
699 req->r_timeout = client->mount_args->mount_timeout * HZ;
700 req->r_args.getattr.mask = cpu_to_le32(CEPH_STAT_CAP_INODE);
701 req->r_num_caps = 2;
702 err = ceph_mdsc_do_request(mdsc, NULL, req);
703 if (err == 0) {
704 dout("open_root_inode success\n");
705 if (ceph_ino(req->r_target_inode) == CEPH_INO_ROOT &&
706 client->sb->s_root == NULL)
707 root = d_alloc_root(req->r_target_inode);
708 else
709 root = d_obtain_alias(req->r_target_inode);
710 req->r_target_inode = NULL;
711 dout("open_root_inode success, root dentry is %p\n", root);
712 } else {
713 root = ERR_PTR(err);
714 }
715 ceph_mdsc_put_request(req);
716 return root;
717}
718
719/*
720 * mount: join the ceph cluster, and open root directory.
721 */
722static int ceph_mount(struct ceph_client *client, struct vfsmount *mnt,
723 const char *path)
724{
725 struct ceph_entity_addr *myaddr = NULL;
726 int err;
727 unsigned long timeout = client->mount_args->mount_timeout * HZ;
728 unsigned long started = jiffies; /* note the start time */
729 struct dentry *root;
730
731 dout("mount start\n");
732 mutex_lock(&client->mount_mutex);
733
734 /* initialize the messenger */
735 if (client->msgr == NULL) {
736 if (ceph_test_opt(client, MYIP))
737 myaddr = &client->mount_args->my_addr;
738 client->msgr = ceph_messenger_create(myaddr);
739 if (IS_ERR(client->msgr)) {
740 err = PTR_ERR(client->msgr);
741 client->msgr = NULL;
742 goto out;
743 }
744 client->msgr->nocrc = ceph_test_opt(client, NOCRC);
745 }
746
747 /* open session, and wait for mon, mds, and osd maps */
748 err = ceph_monc_open_session(&client->monc);
749 if (err < 0)
750 goto out;
751
752 while (!have_mon_map(client)) {
753 err = -EIO;
754 if (timeout && time_after_eq(jiffies, started + timeout))
755 goto out;
756
757 /* wait */
758 dout("mount waiting for mon_map\n");
759 err = wait_event_interruptible_timeout(client->auth_wq,
760 have_mon_map(client) || (client->auth_err < 0),
761 timeout);
762 if (err == -EINTR || err == -ERESTARTSYS)
763 goto out;
764 if (client->auth_err < 0) {
765 err = client->auth_err;
766 goto out;
767 }
768 }
769
770 dout("mount opening root\n");
771 root = open_root_dentry(client, "", started);
772 if (IS_ERR(root)) {
773 err = PTR_ERR(root);
774 goto out;
775 }
776 if (client->sb->s_root)
777 dput(root);
778 else
779 client->sb->s_root = root;
780
781 if (path[0] == 0) {
782 dget(root);
783 } else {
784 dout("mount opening base mountpoint\n");
785 root = open_root_dentry(client, path, started);
786 if (IS_ERR(root)) {
787 err = PTR_ERR(root);
788 dput(client->sb->s_root);
789 client->sb->s_root = NULL;
790 goto out;
791 }
792 }
793
794 mnt->mnt_root = root;
795 mnt->mnt_sb = client->sb;
796
797 client->mount_state = CEPH_MOUNT_MOUNTED;
798 dout("mount success\n");
799 err = 0;
800
801out:
802 mutex_unlock(&client->mount_mutex);
803 return err;
804}
805
806static int ceph_set_super(struct super_block *s, void *data)
807{
808 struct ceph_client *client = data;
809 int ret;
810
811 dout("set_super %p data %p\n", s, data);
812
813 s->s_flags = client->mount_args->sb_flags;
814 s->s_maxbytes = 1ULL << 40; /* temp value until we get mdsmap */
815
816 s->s_fs_info = client;
817 client->sb = s;
818
819 s->s_op = &ceph_super_ops;
820 s->s_export_op = &ceph_export_ops;
821
822 s->s_time_gran = 1000; /* 1000 ns == 1 us */
823
824 ret = set_anon_super(s, NULL); /* what is that second arg for? */
825 if (ret != 0)
826 goto fail;
827
828 return ret;
829
830fail:
831 s->s_fs_info = NULL;
832 client->sb = NULL;
833 return ret;
834}
835
836/*
837 * share superblock if same fs AND options
838 */
839static int ceph_compare_super(struct super_block *sb, void *data)
840{
841 struct ceph_client *new = data;
842 struct ceph_mount_args *args = new->mount_args;
843 struct ceph_client *other = ceph_sb_to_client(sb);
844 int i;
845
846 dout("ceph_compare_super %p\n", sb);
847 if (args->flags & CEPH_OPT_FSID) {
848 if (ceph_fsid_compare(&args->fsid, &other->fsid)) {
849 dout("fsid doesn't match\n");
850 return 0;
851 }
852 } else {
853 /* do we share (a) monitor? */
854 for (i = 0; i < new->monc.monmap->num_mon; i++)
855 if (ceph_monmap_contains(other->monc.monmap,
856 &new->monc.monmap->mon_inst[i].addr))
857 break;
858 if (i == new->monc.monmap->num_mon) {
859 dout("mon ip not part of monmap\n");
860 return 0;
861 }
862 dout("mon ip matches existing sb %p\n", sb);
863 }
864 if (args->sb_flags != other->mount_args->sb_flags) {
865 dout("flags differ\n");
866 return 0;
867 }
868 return 1;
869}
870
871/*
872 * construct our own bdi so we can control readahead, etc.
873 */
874static int ceph_register_bdi(struct super_block *sb, struct ceph_client *client)
875{
876 int err;
877
878 sb->s_bdi = &client->backing_dev_info;
879
880 /* set ra_pages based on rsize mount option? */
881 if (client->mount_args->rsize >= PAGE_CACHE_SIZE)
882 client->backing_dev_info.ra_pages =
883 (client->mount_args->rsize + PAGE_CACHE_SIZE - 1)
884 >> PAGE_SHIFT;
885 err = bdi_register_dev(&client->backing_dev_info, sb->s_dev);
886 return err;
887}
888
889static int ceph_get_sb(struct file_system_type *fs_type,
890 int flags, const char *dev_name, void *data,
891 struct vfsmount *mnt)
892{
893 struct super_block *sb;
894 struct ceph_client *client;
895 int err;
896 int (*compare_super)(struct super_block *, void *) = ceph_compare_super;
897 const char *path = NULL;
898 struct ceph_mount_args *args;
899
900 dout("ceph_get_sb\n");
901 args = parse_mount_args(flags, data, dev_name, &path);
902 if (IS_ERR(args)) {
903 err = PTR_ERR(args);
904 goto out_final;
905 }
906
907 /* create client (which we may/may not use) */
908 client = ceph_create_client(args);
909 if (IS_ERR(client)) {
910 err = PTR_ERR(client);
911 goto out_final;
912 }
913
914 if (client->mount_args->flags & CEPH_OPT_NOSHARE)
915 compare_super = NULL;
916 sb = sget(fs_type, compare_super, ceph_set_super, client);
917 if (IS_ERR(sb)) {
918 err = PTR_ERR(sb);
919 goto out;
920 }
921
922 if (ceph_client(sb) != client) {
923 ceph_destroy_client(client);
924 client = ceph_client(sb);
925 dout("get_sb got existing client %p\n", client);
926 } else {
927 dout("get_sb using new client %p\n", client);
928 err = ceph_register_bdi(sb, client);
929 if (err < 0)
930 goto out_splat;
931 }
932
933 err = ceph_mount(client, mnt, path);
934 if (err < 0)
935 goto out_splat;
936 dout("root %p inode %p ino %llx.%llx\n", mnt->mnt_root,
937 mnt->mnt_root->d_inode, ceph_vinop(mnt->mnt_root->d_inode));
938 return 0;
939
940out_splat:
941 ceph_mdsc_close_sessions(&client->mdsc);
942 up_write(&sb->s_umount);
943 deactivate_super(sb);
944 goto out_final;
945
946out:
947 ceph_destroy_client(client);
948out_final:
949 dout("ceph_get_sb fail %d\n", err);
950 return err;
951}
952
953static void ceph_kill_sb(struct super_block *s)
954{
955 struct ceph_client *client = ceph_sb_to_client(s);
956 dout("kill_sb %p\n", s);
957 ceph_mdsc_pre_umount(&client->mdsc);
958 kill_anon_super(s); /* will call put_super after sb is r/o */
959 if (s->s_bdi == &client->backing_dev_info)
960 bdi_unregister(&client->backing_dev_info);
961 bdi_destroy(&client->backing_dev_info);
962 ceph_destroy_client(client);
963}
964
965static struct file_system_type ceph_fs_type = {
966 .owner = THIS_MODULE,
967 .name = "ceph",
968 .get_sb = ceph_get_sb,
969 .kill_sb = ceph_kill_sb,
970 .fs_flags = FS_RENAME_DOES_D_MOVE,
971};
972
973#define _STRINGIFY(x) #x
974#define STRINGIFY(x) _STRINGIFY(x)
975
976static int __init init_ceph(void)
977{
978 int ret = 0;
979
980 ret = ceph_debugfs_init();
981 if (ret < 0)
982 goto out;
983
984 ret = ceph_msgr_init();
985 if (ret < 0)
986 goto out_debugfs;
987
988 ret = init_caches();
989 if (ret)
990 goto out_msgr;
991
992 ceph_caps_init();
993
994 ret = register_filesystem(&ceph_fs_type);
995 if (ret)
996 goto out_icache;
997
998 pr_info("loaded %d.%d.%d (mon/mds/osd proto %d/%d/%d)\n",
999 CEPH_VERSION_MAJOR, CEPH_VERSION_MINOR, CEPH_VERSION_PATCH,
1000 CEPH_MONC_PROTOCOL, CEPH_MDSC_PROTOCOL, CEPH_OSDC_PROTOCOL);
1001 return 0;
1002
1003out_icache:
1004 destroy_caches();
1005out_msgr:
1006 ceph_msgr_exit();
1007out_debugfs:
1008 ceph_debugfs_cleanup();
1009out:
1010 return ret;
1011}
1012
1013static void __exit exit_ceph(void)
1014{
1015 dout("exit_ceph\n");
1016 unregister_filesystem(&ceph_fs_type);
1017 ceph_caps_finalize();
1018 destroy_caches();
1019 ceph_msgr_exit();
1020 ceph_debugfs_cleanup();
1021}
1022
1023module_init(init_ceph);
1024module_exit(exit_ceph);
1025
1026MODULE_AUTHOR("Sage Weil <sage@newdream.net>");
1027MODULE_AUTHOR("Yehuda Sadeh <yehuda@hq.newdream.net>");
1028MODULE_AUTHOR("Patience Warnick <patience@newdream.net>");
1029MODULE_DESCRIPTION("Ceph filesystem for Linux");
1030MODULE_LICENSE("GPL");
diff --git a/fs/ceph/super.h b/fs/ceph/super.h
new file mode 100644
index 000000000000..65d12036b670
--- /dev/null
+++ b/fs/ceph/super.h
@@ -0,0 +1,901 @@
1#ifndef _FS_CEPH_SUPER_H
2#define _FS_CEPH_SUPER_H
3
4#include "ceph_debug.h"
5
6#include <asm/unaligned.h>
7#include <linux/backing-dev.h>
8#include <linux/completion.h>
9#include <linux/exportfs.h>
10#include <linux/fs.h>
11#include <linux/mempool.h>
12#include <linux/pagemap.h>
13#include <linux/wait.h>
14#include <linux/writeback.h>
15
16#include "types.h"
17#include "messenger.h"
18#include "msgpool.h"
19#include "mon_client.h"
20#include "mds_client.h"
21#include "osd_client.h"
22#include "ceph_fs.h"
23
24/* f_type in struct statfs */
25#define CEPH_SUPER_MAGIC 0x00c36400
26
27/* large granularity for statfs utilization stats to facilitate
28 * large volume sizes on 32-bit machines. */
29#define CEPH_BLOCK_SHIFT 20 /* 1 MB */
30#define CEPH_BLOCK (1 << CEPH_BLOCK_SHIFT)
31
32/*
33 * mount options
34 */
35#define CEPH_OPT_FSID (1<<0)
36#define CEPH_OPT_NOSHARE (1<<1) /* don't share client with other sbs */
37#define CEPH_OPT_MYIP (1<<2) /* specified my ip */
38#define CEPH_OPT_DIRSTAT (1<<4) /* funky `cat dirname` for stats */
39#define CEPH_OPT_RBYTES (1<<5) /* dir st_bytes = rbytes */
40#define CEPH_OPT_NOCRC (1<<6) /* no data crc on writes */
41#define CEPH_OPT_NOASYNCREADDIR (1<<7) /* no dcache readdir */
42
43#define CEPH_OPT_DEFAULT (CEPH_OPT_RBYTES)
44
45#define ceph_set_opt(client, opt) \
46 (client)->mount_args->flags |= CEPH_OPT_##opt;
47#define ceph_test_opt(client, opt) \
48 (!!((client)->mount_args->flags & CEPH_OPT_##opt))
49
50
51struct ceph_mount_args {
52 int sb_flags;
53 int num_mon;
54 struct ceph_entity_addr *mon_addr;
55 int flags;
56 int mount_timeout;
57 int osd_idle_ttl;
58 int caps_wanted_delay_min, caps_wanted_delay_max;
59 struct ceph_fsid fsid;
60 struct ceph_entity_addr my_addr;
61 int wsize;
62 int rsize; /* max readahead */
63 int max_readdir; /* max readdir size */
64 int congestion_kb; /* max readdir size */
65 int osd_timeout;
66 int osd_keepalive_timeout;
67 char *snapdir_name; /* default ".snap" */
68 char *name;
69 char *secret;
70 int cap_release_safety;
71};
72
73/*
74 * defaults
75 */
76#define CEPH_MOUNT_TIMEOUT_DEFAULT 60
77#define CEPH_OSD_TIMEOUT_DEFAULT 60 /* seconds */
78#define CEPH_OSD_KEEPALIVE_DEFAULT 5
79#define CEPH_OSD_IDLE_TTL_DEFAULT 60
80#define CEPH_MOUNT_RSIZE_DEFAULT (512*1024) /* readahead */
81
82#define CEPH_MSG_MAX_FRONT_LEN (16*1024*1024)
83#define CEPH_MSG_MAX_DATA_LEN (16*1024*1024)
84
85#define CEPH_SNAPDIRNAME_DEFAULT ".snap"
86#define CEPH_AUTH_NAME_DEFAULT "guest"
87
88/*
89 * Delay telling the MDS we no longer want caps, in case we reopen
90 * the file. Delay a minimum amount of time, even if we send a cap
91 * message for some other reason. Otherwise, take the oppotunity to
92 * update the mds to avoid sending another message later.
93 */
94#define CEPH_CAPS_WANTED_DELAY_MIN_DEFAULT 5 /* cap release delay */
95#define CEPH_CAPS_WANTED_DELAY_MAX_DEFAULT 60 /* cap release delay */
96
97
98/* mount state */
99enum {
100 CEPH_MOUNT_MOUNTING,
101 CEPH_MOUNT_MOUNTED,
102 CEPH_MOUNT_UNMOUNTING,
103 CEPH_MOUNT_UNMOUNTED,
104 CEPH_MOUNT_SHUTDOWN,
105};
106
107/*
108 * subtract jiffies
109 */
110static inline unsigned long time_sub(unsigned long a, unsigned long b)
111{
112 BUG_ON(time_after(b, a));
113 return (long)a - (long)b;
114}
115
116/*
117 * per-filesystem client state
118 *
119 * possibly shared by multiple mount points, if they are
120 * mounting the same ceph filesystem/cluster.
121 */
122struct ceph_client {
123 struct ceph_fsid fsid;
124 bool have_fsid;
125
126 struct mutex mount_mutex; /* serialize mount attempts */
127 struct ceph_mount_args *mount_args;
128
129 struct super_block *sb;
130
131 unsigned long mount_state;
132 wait_queue_head_t auth_wq;
133
134 int auth_err;
135
136 int min_caps; /* min caps i added */
137
138 struct ceph_messenger *msgr; /* messenger instance */
139 struct ceph_mon_client monc;
140 struct ceph_mds_client mdsc;
141 struct ceph_osd_client osdc;
142
143 /* writeback */
144 mempool_t *wb_pagevec_pool;
145 struct workqueue_struct *wb_wq;
146 struct workqueue_struct *pg_inv_wq;
147 struct workqueue_struct *trunc_wq;
148 atomic_long_t writeback_count;
149
150 struct backing_dev_info backing_dev_info;
151
152#ifdef CONFIG_DEBUG_FS
153 struct dentry *debugfs_monmap;
154 struct dentry *debugfs_mdsmap, *debugfs_osdmap;
155 struct dentry *debugfs_dir, *debugfs_dentry_lru, *debugfs_caps;
156 struct dentry *debugfs_congestion_kb;
157 struct dentry *debugfs_bdi;
158#endif
159};
160
161static inline struct ceph_client *ceph_client(struct super_block *sb)
162{
163 return sb->s_fs_info;
164}
165
166
167/*
168 * File i/o capability. This tracks shared state with the metadata
169 * server that allows us to cache or writeback attributes or to read
170 * and write data. For any given inode, we should have one or more
171 * capabilities, one issued by each metadata server, and our
172 * cumulative access is the OR of all issued capabilities.
173 *
174 * Each cap is referenced by the inode's i_caps rbtree and by per-mds
175 * session capability lists.
176 */
177struct ceph_cap {
178 struct ceph_inode_info *ci;
179 struct rb_node ci_node; /* per-ci cap tree */
180 struct ceph_mds_session *session;
181 struct list_head session_caps; /* per-session caplist */
182 int mds;
183 u64 cap_id; /* unique cap id (mds provided) */
184 int issued; /* latest, from the mds */
185 int implemented; /* implemented superset of issued (for revocation) */
186 int mds_wanted;
187 u32 seq, issue_seq, mseq;
188 u32 cap_gen; /* active/stale cycle */
189 unsigned long last_used;
190 struct list_head caps_item;
191};
192
193#define CHECK_CAPS_NODELAY 1 /* do not delay any further */
194#define CHECK_CAPS_AUTHONLY 2 /* only check auth cap */
195#define CHECK_CAPS_FLUSH 4 /* flush any dirty caps */
196
197/*
198 * Snapped cap state that is pending flush to mds. When a snapshot occurs,
199 * we first complete any in-process sync writes and writeback any dirty
200 * data before flushing the snapped state (tracked here) back to the MDS.
201 */
202struct ceph_cap_snap {
203 atomic_t nref;
204 struct ceph_inode_info *ci;
205 struct list_head ci_item, flushing_item;
206
207 u64 follows, flush_tid;
208 int issued, dirty;
209 struct ceph_snap_context *context;
210
211 mode_t mode;
212 uid_t uid;
213 gid_t gid;
214
215 void *xattr_blob;
216 int xattr_len;
217 u64 xattr_version;
218
219 u64 size;
220 struct timespec mtime, atime, ctime;
221 u64 time_warp_seq;
222 int writing; /* a sync write is still in progress */
223 int dirty_pages; /* dirty pages awaiting writeback */
224};
225
226static inline void ceph_put_cap_snap(struct ceph_cap_snap *capsnap)
227{
228 if (atomic_dec_and_test(&capsnap->nref))
229 kfree(capsnap);
230}
231
232/*
233 * The frag tree describes how a directory is fragmented, potentially across
234 * multiple metadata servers. It is also used to indicate points where
235 * metadata authority is delegated, and whether/where metadata is replicated.
236 *
237 * A _leaf_ frag will be present in the i_fragtree IFF there is
238 * delegation info. That is, if mds >= 0 || ndist > 0.
239 */
240#define CEPH_MAX_DIRFRAG_REP 4
241
242struct ceph_inode_frag {
243 struct rb_node node;
244
245 /* fragtree state */
246 u32 frag;
247 int split_by; /* i.e. 2^(split_by) children */
248
249 /* delegation and replication info */
250 int mds; /* -1 if same authority as parent */
251 int ndist; /* >0 if replicated */
252 int dist[CEPH_MAX_DIRFRAG_REP];
253};
254
255/*
256 * We cache inode xattrs as an encoded blob until they are first used,
257 * at which point we parse them into an rbtree.
258 */
259struct ceph_inode_xattr {
260 struct rb_node node;
261
262 const char *name;
263 int name_len;
264 const char *val;
265 int val_len;
266 int dirty;
267
268 int should_free_name;
269 int should_free_val;
270};
271
272struct ceph_inode_xattrs_info {
273 /*
274 * (still encoded) xattr blob. we avoid the overhead of parsing
275 * this until someone actually calls getxattr, etc.
276 *
277 * blob->vec.iov_len == 4 implies there are no xattrs; blob ==
278 * NULL means we don't know.
279 */
280 struct ceph_buffer *blob, *prealloc_blob;
281
282 struct rb_root index;
283 bool dirty;
284 int count;
285 int names_size;
286 int vals_size;
287 u64 version, index_version;
288};
289
290/*
291 * Ceph inode.
292 */
293#define CEPH_I_COMPLETE 1 /* we have complete directory cached */
294#define CEPH_I_NODELAY 4 /* do not delay cap release */
295#define CEPH_I_FLUSH 8 /* do not delay flush of dirty metadata */
296#define CEPH_I_NOFLUSH 16 /* do not flush dirty caps */
297
298struct ceph_inode_info {
299 struct ceph_vino i_vino; /* ceph ino + snap */
300
301 u64 i_version;
302 u32 i_time_warp_seq;
303
304 unsigned i_ceph_flags;
305 unsigned long i_release_count;
306
307 struct ceph_file_layout i_layout;
308 char *i_symlink;
309
310 /* for dirs */
311 struct timespec i_rctime;
312 u64 i_rbytes, i_rfiles, i_rsubdirs;
313 u64 i_files, i_subdirs;
314 u64 i_max_offset; /* largest readdir offset, set with I_COMPLETE */
315
316 struct rb_root i_fragtree;
317 struct mutex i_fragtree_mutex;
318
319 struct ceph_inode_xattrs_info i_xattrs;
320
321 /* capabilities. protected _both_ by i_lock and cap->session's
322 * s_mutex. */
323 struct rb_root i_caps; /* cap list */
324 struct ceph_cap *i_auth_cap; /* authoritative cap, if any */
325 unsigned i_dirty_caps, i_flushing_caps; /* mask of dirtied fields */
326 struct list_head i_dirty_item, i_flushing_item;
327 u64 i_cap_flush_seq;
328 /* we need to track cap writeback on a per-cap-bit basis, to allow
329 * overlapping, pipelined cap flushes to the mds. we can probably
330 * reduce the tid to 8 bits if we're concerned about inode size. */
331 u16 i_cap_flush_last_tid, i_cap_flush_tid[CEPH_CAP_BITS];
332 wait_queue_head_t i_cap_wq; /* threads waiting on a capability */
333 unsigned long i_hold_caps_min; /* jiffies */
334 unsigned long i_hold_caps_max; /* jiffies */
335 struct list_head i_cap_delay_list; /* for delayed cap release to mds */
336 int i_cap_exporting_mds; /* to handle cap migration between */
337 unsigned i_cap_exporting_mseq; /* mds's. */
338 unsigned i_cap_exporting_issued;
339 struct ceph_cap_reservation i_cap_migration_resv;
340 struct list_head i_cap_snaps; /* snapped state pending flush to mds */
341 struct ceph_snap_context *i_head_snapc; /* set if wr_buffer_head > 0 */
342 unsigned i_snap_caps; /* cap bits for snapped files */
343
344 int i_nr_by_mode[CEPH_FILE_MODE_NUM]; /* open file counts */
345
346 u32 i_truncate_seq; /* last truncate to smaller size */
347 u64 i_truncate_size; /* and the size we last truncated down to */
348 int i_truncate_pending; /* still need to call vmtruncate */
349
350 u64 i_max_size; /* max file size authorized by mds */
351 u64 i_reported_size; /* (max_)size reported to or requested of mds */
352 u64 i_wanted_max_size; /* offset we'd like to write too */
353 u64 i_requested_max_size; /* max_size we've requested */
354
355 /* held references to caps */
356 int i_pin_ref;
357 int i_rd_ref, i_rdcache_ref, i_wr_ref;
358 int i_wrbuffer_ref, i_wrbuffer_ref_head;
359 u32 i_shared_gen; /* increment each time we get FILE_SHARED */
360 u32 i_rdcache_gen; /* we increment this each time we get
361 FILE_CACHE. If it's non-zero, we
362 _may_ have cached pages. */
363 u32 i_rdcache_revoking; /* RDCACHE gen to async invalidate, if any */
364
365 struct list_head i_unsafe_writes; /* uncommitted sync writes */
366 struct list_head i_unsafe_dirops; /* uncommitted mds dir ops */
367 spinlock_t i_unsafe_lock;
368
369 struct ceph_snap_realm *i_snap_realm; /* snap realm (if caps) */
370 int i_snap_realm_counter; /* snap realm (if caps) */
371 struct list_head i_snap_realm_item;
372 struct list_head i_snap_flush_item;
373
374 struct work_struct i_wb_work; /* writeback work */
375 struct work_struct i_pg_inv_work; /* page invalidation work */
376
377 struct work_struct i_vmtruncate_work;
378
379 struct inode vfs_inode; /* at end */
380};
381
382static inline struct ceph_inode_info *ceph_inode(struct inode *inode)
383{
384 return container_of(inode, struct ceph_inode_info, vfs_inode);
385}
386
387static inline void ceph_i_clear(struct inode *inode, unsigned mask)
388{
389 struct ceph_inode_info *ci = ceph_inode(inode);
390
391 spin_lock(&inode->i_lock);
392 ci->i_ceph_flags &= ~mask;
393 spin_unlock(&inode->i_lock);
394}
395
396static inline void ceph_i_set(struct inode *inode, unsigned mask)
397{
398 struct ceph_inode_info *ci = ceph_inode(inode);
399
400 spin_lock(&inode->i_lock);
401 ci->i_ceph_flags |= mask;
402 spin_unlock(&inode->i_lock);
403}
404
405static inline bool ceph_i_test(struct inode *inode, unsigned mask)
406{
407 struct ceph_inode_info *ci = ceph_inode(inode);
408 bool r;
409
410 smp_mb();
411 r = (ci->i_ceph_flags & mask) == mask;
412 return r;
413}
414
415
416/* find a specific frag @f */
417extern struct ceph_inode_frag *__ceph_find_frag(struct ceph_inode_info *ci,
418 u32 f);
419
420/*
421 * choose fragment for value @v. copy frag content to pfrag, if leaf
422 * exists
423 */
424extern u32 ceph_choose_frag(struct ceph_inode_info *ci, u32 v,
425 struct ceph_inode_frag *pfrag,
426 int *found);
427
428/*
429 * Ceph dentry state
430 */
431struct ceph_dentry_info {
432 struct ceph_mds_session *lease_session;
433 u32 lease_gen, lease_shared_gen;
434 u32 lease_seq;
435 unsigned long lease_renew_after, lease_renew_from;
436 struct list_head lru;
437 struct dentry *dentry;
438 u64 time;
439 u64 offset;
440};
441
442static inline struct ceph_dentry_info *ceph_dentry(struct dentry *dentry)
443{
444 return (struct ceph_dentry_info *)dentry->d_fsdata;
445}
446
447static inline loff_t ceph_make_fpos(unsigned frag, unsigned off)
448{
449 return ((loff_t)frag << 32) | (loff_t)off;
450}
451
452/*
453 * ino_t is <64 bits on many architectures, blech.
454 *
455 * don't include snap in ino hash, at least for now.
456 */
457static inline ino_t ceph_vino_to_ino(struct ceph_vino vino)
458{
459 ino_t ino = (ino_t)vino.ino; /* ^ (vino.snap << 20); */
460#if BITS_PER_LONG == 32
461 ino ^= vino.ino >> (sizeof(u64)-sizeof(ino_t)) * 8;
462 if (!ino)
463 ino = 1;
464#endif
465 return ino;
466}
467
468static inline int ceph_set_ino_cb(struct inode *inode, void *data)
469{
470 ceph_inode(inode)->i_vino = *(struct ceph_vino *)data;
471 inode->i_ino = ceph_vino_to_ino(*(struct ceph_vino *)data);
472 return 0;
473}
474
475static inline struct ceph_vino ceph_vino(struct inode *inode)
476{
477 return ceph_inode(inode)->i_vino;
478}
479
480/* for printf-style formatting */
481#define ceph_vinop(i) ceph_inode(i)->i_vino.ino, ceph_inode(i)->i_vino.snap
482
483static inline u64 ceph_ino(struct inode *inode)
484{
485 return ceph_inode(inode)->i_vino.ino;
486}
487static inline u64 ceph_snap(struct inode *inode)
488{
489 return ceph_inode(inode)->i_vino.snap;
490}
491
492static inline int ceph_ino_compare(struct inode *inode, void *data)
493{
494 struct ceph_vino *pvino = (struct ceph_vino *)data;
495 struct ceph_inode_info *ci = ceph_inode(inode);
496 return ci->i_vino.ino == pvino->ino &&
497 ci->i_vino.snap == pvino->snap;
498}
499
500static inline struct inode *ceph_find_inode(struct super_block *sb,
501 struct ceph_vino vino)
502{
503 ino_t t = ceph_vino_to_ino(vino);
504 return ilookup5(sb, t, ceph_ino_compare, &vino);
505}
506
507
508/*
509 * caps helpers
510 */
511static inline bool __ceph_is_any_real_caps(struct ceph_inode_info *ci)
512{
513 return !RB_EMPTY_ROOT(&ci->i_caps);
514}
515
516extern int __ceph_caps_issued(struct ceph_inode_info *ci, int *implemented);
517extern int __ceph_caps_issued_mask(struct ceph_inode_info *ci, int mask, int t);
518extern int __ceph_caps_issued_other(struct ceph_inode_info *ci,
519 struct ceph_cap *cap);
520
521static inline int ceph_caps_issued(struct ceph_inode_info *ci)
522{
523 int issued;
524 spin_lock(&ci->vfs_inode.i_lock);
525 issued = __ceph_caps_issued(ci, NULL);
526 spin_unlock(&ci->vfs_inode.i_lock);
527 return issued;
528}
529
530static inline int ceph_caps_issued_mask(struct ceph_inode_info *ci, int mask,
531 int touch)
532{
533 int r;
534 spin_lock(&ci->vfs_inode.i_lock);
535 r = __ceph_caps_issued_mask(ci, mask, touch);
536 spin_unlock(&ci->vfs_inode.i_lock);
537 return r;
538}
539
540static inline int __ceph_caps_dirty(struct ceph_inode_info *ci)
541{
542 return ci->i_dirty_caps | ci->i_flushing_caps;
543}
544extern void __ceph_mark_dirty_caps(struct ceph_inode_info *ci, int mask);
545
546extern int ceph_caps_revoking(struct ceph_inode_info *ci, int mask);
547extern int __ceph_caps_used(struct ceph_inode_info *ci);
548
549extern int __ceph_caps_file_wanted(struct ceph_inode_info *ci);
550
551/*
552 * wanted, by virtue of open file modes AND cap refs (buffered/cached data)
553 */
554static inline int __ceph_caps_wanted(struct ceph_inode_info *ci)
555{
556 int w = __ceph_caps_file_wanted(ci) | __ceph_caps_used(ci);
557 if (w & CEPH_CAP_FILE_BUFFER)
558 w |= CEPH_CAP_FILE_EXCL; /* we want EXCL if dirty data */
559 return w;
560}
561
562/* what the mds thinks we want */
563extern int __ceph_caps_mds_wanted(struct ceph_inode_info *ci);
564
565extern void ceph_caps_init(void);
566extern void ceph_caps_finalize(void);
567extern void ceph_adjust_min_caps(int delta);
568extern int ceph_reserve_caps(struct ceph_cap_reservation *ctx, int need);
569extern int ceph_unreserve_caps(struct ceph_cap_reservation *ctx);
570extern void ceph_reservation_status(struct ceph_client *client,
571 int *total, int *avail, int *used,
572 int *reserved, int *min);
573
574static inline struct ceph_client *ceph_inode_to_client(struct inode *inode)
575{
576 return (struct ceph_client *)inode->i_sb->s_fs_info;
577}
578
579static inline struct ceph_client *ceph_sb_to_client(struct super_block *sb)
580{
581 return (struct ceph_client *)sb->s_fs_info;
582}
583
584
585/*
586 * we keep buffered readdir results attached to file->private_data
587 */
588struct ceph_file_info {
589 int fmode; /* initialized on open */
590
591 /* readdir: position within the dir */
592 u32 frag;
593 struct ceph_mds_request *last_readdir;
594 int at_end;
595
596 /* readdir: position within a frag */
597 unsigned offset; /* offset of last chunk, adjusted for . and .. */
598 u64 next_offset; /* offset of next chunk (last_name's + 1) */
599 char *last_name; /* last entry in previous chunk */
600 struct dentry *dentry; /* next dentry (for dcache readdir) */
601 unsigned long dir_release_count;
602
603 /* used for -o dirstat read() on directory thing */
604 char *dir_info;
605 int dir_info_len;
606};
607
608
609
610/*
611 * snapshots
612 */
613
614/*
615 * A "snap context" is the set of existing snapshots when we
616 * write data. It is used by the OSD to guide its COW behavior.
617 *
618 * The ceph_snap_context is refcounted, and attached to each dirty
619 * page, indicating which context the dirty data belonged when it was
620 * dirtied.
621 */
622struct ceph_snap_context {
623 atomic_t nref;
624 u64 seq;
625 int num_snaps;
626 u64 snaps[];
627};
628
629static inline struct ceph_snap_context *
630ceph_get_snap_context(struct ceph_snap_context *sc)
631{
632 /*
633 printk("get_snap_context %p %d -> %d\n", sc, atomic_read(&sc->nref),
634 atomic_read(&sc->nref)+1);
635 */
636 if (sc)
637 atomic_inc(&sc->nref);
638 return sc;
639}
640
641static inline void ceph_put_snap_context(struct ceph_snap_context *sc)
642{
643 if (!sc)
644 return;
645 /*
646 printk("put_snap_context %p %d -> %d\n", sc, atomic_read(&sc->nref),
647 atomic_read(&sc->nref)-1);
648 */
649 if (atomic_dec_and_test(&sc->nref)) {
650 /*printk(" deleting snap_context %p\n", sc);*/
651 kfree(sc);
652 }
653}
654
655/*
656 * A "snap realm" describes a subset of the file hierarchy sharing
657 * the same set of snapshots that apply to it. The realms themselves
658 * are organized into a hierarchy, such that children inherit (some of)
659 * the snapshots of their parents.
660 *
661 * All inodes within the realm that have capabilities are linked into a
662 * per-realm list.
663 */
664struct ceph_snap_realm {
665 u64 ino;
666 atomic_t nref;
667 struct rb_node node;
668
669 u64 created, seq;
670 u64 parent_ino;
671 u64 parent_since; /* snapid when our current parent became so */
672
673 u64 *prior_parent_snaps; /* snaps inherited from any parents we */
674 int num_prior_parent_snaps; /* had prior to parent_since */
675 u64 *snaps; /* snaps specific to this realm */
676 int num_snaps;
677
678 struct ceph_snap_realm *parent;
679 struct list_head children; /* list of child realms */
680 struct list_head child_item;
681
682 struct list_head empty_item; /* if i have ref==0 */
683
684 /* the current set of snaps for this realm */
685 struct ceph_snap_context *cached_context;
686
687 struct list_head inodes_with_caps;
688 spinlock_t inodes_with_caps_lock;
689};
690
691
692
693/*
694 * calculate the number of pages a given length and offset map onto,
695 * if we align the data.
696 */
697static inline int calc_pages_for(u64 off, u64 len)
698{
699 return ((off+len+PAGE_CACHE_SIZE-1) >> PAGE_CACHE_SHIFT) -
700 (off >> PAGE_CACHE_SHIFT);
701}
702
703
704
705/* snap.c */
706struct ceph_snap_realm *ceph_lookup_snap_realm(struct ceph_mds_client *mdsc,
707 u64 ino);
708extern void ceph_get_snap_realm(struct ceph_mds_client *mdsc,
709 struct ceph_snap_realm *realm);
710extern void ceph_put_snap_realm(struct ceph_mds_client *mdsc,
711 struct ceph_snap_realm *realm);
712extern int ceph_update_snap_trace(struct ceph_mds_client *m,
713 void *p, void *e, bool deletion);
714extern void ceph_handle_snap(struct ceph_mds_client *mdsc,
715 struct ceph_mds_session *session,
716 struct ceph_msg *msg);
717extern void ceph_queue_cap_snap(struct ceph_inode_info *ci,
718 struct ceph_snap_context *snapc);
719extern int __ceph_finish_cap_snap(struct ceph_inode_info *ci,
720 struct ceph_cap_snap *capsnap);
721extern void ceph_cleanup_empty_realms(struct ceph_mds_client *mdsc);
722
723/*
724 * a cap_snap is "pending" if it is still awaiting an in-progress
725 * sync write (that may/may not still update size, mtime, etc.).
726 */
727static inline bool __ceph_have_pending_cap_snap(struct ceph_inode_info *ci)
728{
729 return !list_empty(&ci->i_cap_snaps) &&
730 list_entry(ci->i_cap_snaps.prev, struct ceph_cap_snap,
731 ci_item)->writing;
732}
733
734
735/* super.c */
736extern struct kmem_cache *ceph_inode_cachep;
737extern struct kmem_cache *ceph_cap_cachep;
738extern struct kmem_cache *ceph_dentry_cachep;
739extern struct kmem_cache *ceph_file_cachep;
740
741extern const char *ceph_msg_type_name(int type);
742extern int ceph_check_fsid(struct ceph_client *client, struct ceph_fsid *fsid);
743
744#define FSID_FORMAT "%02x%02x%02x%02x-%02x%02x-%02x%02x-%02x%02x-" \
745 "%02x%02x%02x%02x%02x%02x"
746#define PR_FSID(f) (f)->fsid[0], (f)->fsid[1], (f)->fsid[2], (f)->fsid[3], \
747 (f)->fsid[4], (f)->fsid[5], (f)->fsid[6], (f)->fsid[7], \
748 (f)->fsid[8], (f)->fsid[9], (f)->fsid[10], (f)->fsid[11], \
749 (f)->fsid[12], (f)->fsid[13], (f)->fsid[14], (f)->fsid[15]
750
751/* inode.c */
752extern const struct inode_operations ceph_file_iops;
753
754extern struct inode *ceph_alloc_inode(struct super_block *sb);
755extern void ceph_destroy_inode(struct inode *inode);
756
757extern struct inode *ceph_get_inode(struct super_block *sb,
758 struct ceph_vino vino);
759extern struct inode *ceph_get_snapdir(struct inode *parent);
760extern int ceph_fill_file_size(struct inode *inode, int issued,
761 u32 truncate_seq, u64 truncate_size, u64 size);
762extern void ceph_fill_file_time(struct inode *inode, int issued,
763 u64 time_warp_seq, struct timespec *ctime,
764 struct timespec *mtime, struct timespec *atime);
765extern int ceph_fill_trace(struct super_block *sb,
766 struct ceph_mds_request *req,
767 struct ceph_mds_session *session);
768extern int ceph_readdir_prepopulate(struct ceph_mds_request *req,
769 struct ceph_mds_session *session);
770
771extern int ceph_inode_holds_cap(struct inode *inode, int mask);
772
773extern int ceph_inode_set_size(struct inode *inode, loff_t size);
774extern void __ceph_do_pending_vmtruncate(struct inode *inode);
775extern void ceph_queue_vmtruncate(struct inode *inode);
776
777extern void ceph_queue_invalidate(struct inode *inode);
778extern void ceph_queue_writeback(struct inode *inode);
779
780extern int ceph_do_getattr(struct inode *inode, int mask);
781extern int ceph_permission(struct inode *inode, int mask);
782extern int ceph_setattr(struct dentry *dentry, struct iattr *attr);
783extern int ceph_getattr(struct vfsmount *mnt, struct dentry *dentry,
784 struct kstat *stat);
785
786/* xattr.c */
787extern int ceph_setxattr(struct dentry *, const char *, const void *,
788 size_t, int);
789extern ssize_t ceph_getxattr(struct dentry *, const char *, void *, size_t);
790extern ssize_t ceph_listxattr(struct dentry *, char *, size_t);
791extern int ceph_removexattr(struct dentry *, const char *);
792extern void __ceph_build_xattrs_blob(struct ceph_inode_info *ci);
793extern void __ceph_destroy_xattrs(struct ceph_inode_info *ci);
794
795/* caps.c */
796extern const char *ceph_cap_string(int c);
797extern void ceph_handle_caps(struct ceph_mds_session *session,
798 struct ceph_msg *msg);
799extern int ceph_add_cap(struct inode *inode,
800 struct ceph_mds_session *session, u64 cap_id,
801 int fmode, unsigned issued, unsigned wanted,
802 unsigned cap, unsigned seq, u64 realmino, int flags,
803 struct ceph_cap_reservation *caps_reservation);
804extern void __ceph_remove_cap(struct ceph_cap *cap);
805static inline void ceph_remove_cap(struct ceph_cap *cap)
806{
807 struct inode *inode = &cap->ci->vfs_inode;
808 spin_lock(&inode->i_lock);
809 __ceph_remove_cap(cap);
810 spin_unlock(&inode->i_lock);
811}
812extern void ceph_put_cap(struct ceph_cap *cap);
813
814extern void ceph_queue_caps_release(struct inode *inode);
815extern int ceph_write_inode(struct inode *inode, struct writeback_control *wbc);
816extern int ceph_fsync(struct file *file, struct dentry *dentry, int datasync);
817extern void ceph_kick_flushing_caps(struct ceph_mds_client *mdsc,
818 struct ceph_mds_session *session);
819extern int ceph_get_cap_mds(struct inode *inode);
820extern void ceph_get_cap_refs(struct ceph_inode_info *ci, int caps);
821extern void ceph_put_cap_refs(struct ceph_inode_info *ci, int had);
822extern void ceph_put_wrbuffer_cap_refs(struct ceph_inode_info *ci, int nr,
823 struct ceph_snap_context *snapc);
824extern void __ceph_flush_snaps(struct ceph_inode_info *ci,
825 struct ceph_mds_session **psession);
826extern void ceph_check_caps(struct ceph_inode_info *ci, int flags,
827 struct ceph_mds_session *session);
828extern void ceph_check_delayed_caps(struct ceph_mds_client *mdsc);
829extern void ceph_flush_dirty_caps(struct ceph_mds_client *mdsc);
830
831extern int ceph_encode_inode_release(void **p, struct inode *inode,
832 int mds, int drop, int unless, int force);
833extern int ceph_encode_dentry_release(void **p, struct dentry *dn,
834 int mds, int drop, int unless);
835
836extern int ceph_get_caps(struct ceph_inode_info *ci, int need, int want,
837 int *got, loff_t endoff);
838
839/* for counting open files by mode */
840static inline void __ceph_get_fmode(struct ceph_inode_info *ci, int mode)
841{
842 ci->i_nr_by_mode[mode]++;
843}
844extern void ceph_put_fmode(struct ceph_inode_info *ci, int mode);
845
846/* addr.c */
847extern const struct address_space_operations ceph_aops;
848extern int ceph_mmap(struct file *file, struct vm_area_struct *vma);
849
850/* file.c */
851extern const struct file_operations ceph_file_fops;
852extern const struct address_space_operations ceph_aops;
853extern int ceph_open(struct inode *inode, struct file *file);
854extern struct dentry *ceph_lookup_open(struct inode *dir, struct dentry *dentry,
855 struct nameidata *nd, int mode,
856 int locked_dir);
857extern int ceph_release(struct inode *inode, struct file *filp);
858extern void ceph_release_page_vector(struct page **pages, int num_pages);
859
860/* dir.c */
861extern const struct file_operations ceph_dir_fops;
862extern const struct inode_operations ceph_dir_iops;
863extern struct dentry_operations ceph_dentry_ops, ceph_snap_dentry_ops,
864 ceph_snapdir_dentry_ops;
865
866extern int ceph_handle_notrace_create(struct inode *dir, struct dentry *dentry);
867extern struct dentry *ceph_finish_lookup(struct ceph_mds_request *req,
868 struct dentry *dentry, int err);
869
870extern void ceph_dentry_lru_add(struct dentry *dn);
871extern void ceph_dentry_lru_touch(struct dentry *dn);
872extern void ceph_dentry_lru_del(struct dentry *dn);
873
874/*
875 * our d_ops vary depending on whether the inode is live,
876 * snapshotted (read-only), or a virtual ".snap" directory.
877 */
878int ceph_init_dentry(struct dentry *dentry);
879
880
881/* ioctl.c */
882extern long ceph_ioctl(struct file *file, unsigned int cmd, unsigned long arg);
883
884/* export.c */
885extern const struct export_operations ceph_export_ops;
886
887/* debugfs.c */
888extern int ceph_debugfs_init(void);
889extern void ceph_debugfs_cleanup(void);
890extern int ceph_debugfs_client_init(struct ceph_client *client);
891extern void ceph_debugfs_client_cleanup(struct ceph_client *client);
892
893static inline struct inode *get_dentry_parent_inode(struct dentry *dentry)
894{
895 if (dentry && dentry->d_parent)
896 return dentry->d_parent->d_inode;
897
898 return NULL;
899}
900
901#endif /* _FS_CEPH_SUPER_H */
diff --git a/fs/ceph/types.h b/fs/ceph/types.h
new file mode 100644
index 000000000000..28b35a005ec2
--- /dev/null
+++ b/fs/ceph/types.h
@@ -0,0 +1,29 @@
1#ifndef _FS_CEPH_TYPES_H
2#define _FS_CEPH_TYPES_H
3
4/* needed before including ceph_fs.h */
5#include <linux/in.h>
6#include <linux/types.h>
7#include <linux/fcntl.h>
8#include <linux/string.h>
9
10#include "ceph_fs.h"
11#include "ceph_frag.h"
12#include "ceph_hash.h"
13
14/*
15 * Identify inodes by both their ino AND snapshot id (a u64).
16 */
17struct ceph_vino {
18 u64 ino;
19 u64 snap;
20};
21
22
23/* context for the caps reservation mechanism */
24struct ceph_cap_reservation {
25 int count;
26};
27
28
29#endif
diff --git a/fs/ceph/xattr.c b/fs/ceph/xattr.c
new file mode 100644
index 000000000000..37d6ce645691
--- /dev/null
+++ b/fs/ceph/xattr.c
@@ -0,0 +1,844 @@
1#include "ceph_debug.h"
2#include "super.h"
3#include "decode.h"
4
5#include <linux/xattr.h>
6
7static bool ceph_is_valid_xattr(const char *name)
8{
9 return !strncmp(name, XATTR_SECURITY_PREFIX,
10 XATTR_SECURITY_PREFIX_LEN) ||
11 !strncmp(name, XATTR_TRUSTED_PREFIX, XATTR_TRUSTED_PREFIX_LEN) ||
12 !strncmp(name, XATTR_USER_PREFIX, XATTR_USER_PREFIX_LEN);
13}
14
15/*
16 * These define virtual xattrs exposing the recursive directory
17 * statistics and layout metadata.
18 */
19struct ceph_vxattr_cb {
20 bool readonly;
21 char *name;
22 size_t (*getxattr_cb)(struct ceph_inode_info *ci, char *val,
23 size_t size);
24};
25
26/* directories */
27
28static size_t ceph_vxattrcb_entries(struct ceph_inode_info *ci, char *val,
29 size_t size)
30{
31 return snprintf(val, size, "%lld", ci->i_files + ci->i_subdirs);
32}
33
34static size_t ceph_vxattrcb_files(struct ceph_inode_info *ci, char *val,
35 size_t size)
36{
37 return snprintf(val, size, "%lld", ci->i_files);
38}
39
40static size_t ceph_vxattrcb_subdirs(struct ceph_inode_info *ci, char *val,
41 size_t size)
42{
43 return snprintf(val, size, "%lld", ci->i_subdirs);
44}
45
46static size_t ceph_vxattrcb_rentries(struct ceph_inode_info *ci, char *val,
47 size_t size)
48{
49 return snprintf(val, size, "%lld", ci->i_rfiles + ci->i_rsubdirs);
50}
51
52static size_t ceph_vxattrcb_rfiles(struct ceph_inode_info *ci, char *val,
53 size_t size)
54{
55 return snprintf(val, size, "%lld", ci->i_rfiles);
56}
57
58static size_t ceph_vxattrcb_rsubdirs(struct ceph_inode_info *ci, char *val,
59 size_t size)
60{
61 return snprintf(val, size, "%lld", ci->i_rsubdirs);
62}
63
64static size_t ceph_vxattrcb_rbytes(struct ceph_inode_info *ci, char *val,
65 size_t size)
66{
67 return snprintf(val, size, "%lld", ci->i_rbytes);
68}
69
70static size_t ceph_vxattrcb_rctime(struct ceph_inode_info *ci, char *val,
71 size_t size)
72{
73 return snprintf(val, size, "%ld.%ld", (long)ci->i_rctime.tv_sec,
74 (long)ci->i_rctime.tv_nsec);
75}
76
77static struct ceph_vxattr_cb ceph_dir_vxattrs[] = {
78 { true, "user.ceph.dir.entries", ceph_vxattrcb_entries},
79 { true, "user.ceph.dir.files", ceph_vxattrcb_files},
80 { true, "user.ceph.dir.subdirs", ceph_vxattrcb_subdirs},
81 { true, "user.ceph.dir.rentries", ceph_vxattrcb_rentries},
82 { true, "user.ceph.dir.rfiles", ceph_vxattrcb_rfiles},
83 { true, "user.ceph.dir.rsubdirs", ceph_vxattrcb_rsubdirs},
84 { true, "user.ceph.dir.rbytes", ceph_vxattrcb_rbytes},
85 { true, "user.ceph.dir.rctime", ceph_vxattrcb_rctime},
86 { true, NULL, NULL }
87};
88
89/* files */
90
91static size_t ceph_vxattrcb_layout(struct ceph_inode_info *ci, char *val,
92 size_t size)
93{
94 int ret;
95
96 ret = snprintf(val, size,
97 "chunk_bytes=%lld\nstripe_count=%lld\nobject_size=%lld\n",
98 (unsigned long long)ceph_file_layout_su(ci->i_layout),
99 (unsigned long long)ceph_file_layout_stripe_count(ci->i_layout),
100 (unsigned long long)ceph_file_layout_object_size(ci->i_layout));
101 if (ceph_file_layout_pg_preferred(ci->i_layout))
102 ret += snprintf(val + ret, size, "preferred_osd=%lld\n",
103 (unsigned long long)ceph_file_layout_pg_preferred(
104 ci->i_layout));
105 return ret;
106}
107
108static struct ceph_vxattr_cb ceph_file_vxattrs[] = {
109 { true, "user.ceph.layout", ceph_vxattrcb_layout},
110 { NULL, NULL }
111};
112
113static struct ceph_vxattr_cb *ceph_inode_vxattrs(struct inode *inode)
114{
115 if (S_ISDIR(inode->i_mode))
116 return ceph_dir_vxattrs;
117 else if (S_ISREG(inode->i_mode))
118 return ceph_file_vxattrs;
119 return NULL;
120}
121
122static struct ceph_vxattr_cb *ceph_match_vxattr(struct ceph_vxattr_cb *vxattr,
123 const char *name)
124{
125 do {
126 if (strcmp(vxattr->name, name) == 0)
127 return vxattr;
128 vxattr++;
129 } while (vxattr->name);
130 return NULL;
131}
132
133static int __set_xattr(struct ceph_inode_info *ci,
134 const char *name, int name_len,
135 const char *val, int val_len,
136 int dirty,
137 int should_free_name, int should_free_val,
138 struct ceph_inode_xattr **newxattr)
139{
140 struct rb_node **p;
141 struct rb_node *parent = NULL;
142 struct ceph_inode_xattr *xattr = NULL;
143 int c;
144 int new = 0;
145
146 p = &ci->i_xattrs.index.rb_node;
147 while (*p) {
148 parent = *p;
149 xattr = rb_entry(parent, struct ceph_inode_xattr, node);
150 c = strncmp(name, xattr->name, min(name_len, xattr->name_len));
151 if (c < 0)
152 p = &(*p)->rb_left;
153 else if (c > 0)
154 p = &(*p)->rb_right;
155 else {
156 if (name_len == xattr->name_len)
157 break;
158 else if (name_len < xattr->name_len)
159 p = &(*p)->rb_left;
160 else
161 p = &(*p)->rb_right;
162 }
163 xattr = NULL;
164 }
165
166 if (!xattr) {
167 new = 1;
168 xattr = *newxattr;
169 xattr->name = name;
170 xattr->name_len = name_len;
171 xattr->should_free_name = should_free_name;
172
173 ci->i_xattrs.count++;
174 dout("__set_xattr count=%d\n", ci->i_xattrs.count);
175 } else {
176 kfree(*newxattr);
177 *newxattr = NULL;
178 if (xattr->should_free_val)
179 kfree((void *)xattr->val);
180
181 if (should_free_name) {
182 kfree((void *)name);
183 name = xattr->name;
184 }
185 ci->i_xattrs.names_size -= xattr->name_len;
186 ci->i_xattrs.vals_size -= xattr->val_len;
187 }
188 if (!xattr) {
189 pr_err("__set_xattr ENOMEM on %p %llx.%llx xattr %s=%s\n",
190 &ci->vfs_inode, ceph_vinop(&ci->vfs_inode), name,
191 xattr->val);
192 return -ENOMEM;
193 }
194 ci->i_xattrs.names_size += name_len;
195 ci->i_xattrs.vals_size += val_len;
196 if (val)
197 xattr->val = val;
198 else
199 xattr->val = "";
200
201 xattr->val_len = val_len;
202 xattr->dirty = dirty;
203 xattr->should_free_val = (val && should_free_val);
204
205 if (new) {
206 rb_link_node(&xattr->node, parent, p);
207 rb_insert_color(&xattr->node, &ci->i_xattrs.index);
208 dout("__set_xattr_val p=%p\n", p);
209 }
210
211 dout("__set_xattr_val added %llx.%llx xattr %p %s=%.*s\n",
212 ceph_vinop(&ci->vfs_inode), xattr, name, val_len, val);
213
214 return 0;
215}
216
217static struct ceph_inode_xattr *__get_xattr(struct ceph_inode_info *ci,
218 const char *name)
219{
220 struct rb_node **p;
221 struct rb_node *parent = NULL;
222 struct ceph_inode_xattr *xattr = NULL;
223 int c;
224
225 p = &ci->i_xattrs.index.rb_node;
226 while (*p) {
227 parent = *p;
228 xattr = rb_entry(parent, struct ceph_inode_xattr, node);
229 c = strncmp(name, xattr->name, xattr->name_len);
230 if (c < 0)
231 p = &(*p)->rb_left;
232 else if (c > 0)
233 p = &(*p)->rb_right;
234 else {
235 dout("__get_xattr %s: found %.*s\n", name,
236 xattr->val_len, xattr->val);
237 return xattr;
238 }
239 }
240
241 dout("__get_xattr %s: not found\n", name);
242
243 return NULL;
244}
245
246static void __free_xattr(struct ceph_inode_xattr *xattr)
247{
248 BUG_ON(!xattr);
249
250 if (xattr->should_free_name)
251 kfree((void *)xattr->name);
252 if (xattr->should_free_val)
253 kfree((void *)xattr->val);
254
255 kfree(xattr);
256}
257
258static int __remove_xattr(struct ceph_inode_info *ci,
259 struct ceph_inode_xattr *xattr)
260{
261 if (!xattr)
262 return -EOPNOTSUPP;
263
264 rb_erase(&xattr->node, &ci->i_xattrs.index);
265
266 if (xattr->should_free_name)
267 kfree((void *)xattr->name);
268 if (xattr->should_free_val)
269 kfree((void *)xattr->val);
270
271 ci->i_xattrs.names_size -= xattr->name_len;
272 ci->i_xattrs.vals_size -= xattr->val_len;
273 ci->i_xattrs.count--;
274 kfree(xattr);
275
276 return 0;
277}
278
279static int __remove_xattr_by_name(struct ceph_inode_info *ci,
280 const char *name)
281{
282 struct rb_node **p;
283 struct ceph_inode_xattr *xattr;
284 int err;
285
286 p = &ci->i_xattrs.index.rb_node;
287 xattr = __get_xattr(ci, name);
288 err = __remove_xattr(ci, xattr);
289 return err;
290}
291
292static char *__copy_xattr_names(struct ceph_inode_info *ci,
293 char *dest)
294{
295 struct rb_node *p;
296 struct ceph_inode_xattr *xattr = NULL;
297
298 p = rb_first(&ci->i_xattrs.index);
299 dout("__copy_xattr_names count=%d\n", ci->i_xattrs.count);
300
301 while (p) {
302 xattr = rb_entry(p, struct ceph_inode_xattr, node);
303 memcpy(dest, xattr->name, xattr->name_len);
304 dest[xattr->name_len] = '\0';
305
306 dout("dest=%s %p (%s) (%d/%d)\n", dest, xattr, xattr->name,
307 xattr->name_len, ci->i_xattrs.names_size);
308
309 dest += xattr->name_len + 1;
310 p = rb_next(p);
311 }
312
313 return dest;
314}
315
316void __ceph_destroy_xattrs(struct ceph_inode_info *ci)
317{
318 struct rb_node *p, *tmp;
319 struct ceph_inode_xattr *xattr = NULL;
320
321 p = rb_first(&ci->i_xattrs.index);
322
323 dout("__ceph_destroy_xattrs p=%p\n", p);
324
325 while (p) {
326 xattr = rb_entry(p, struct ceph_inode_xattr, node);
327 tmp = p;
328 p = rb_next(tmp);
329 dout("__ceph_destroy_xattrs next p=%p (%.*s)\n", p,
330 xattr->name_len, xattr->name);
331 rb_erase(tmp, &ci->i_xattrs.index);
332
333 __free_xattr(xattr);
334 }
335
336 ci->i_xattrs.names_size = 0;
337 ci->i_xattrs.vals_size = 0;
338 ci->i_xattrs.index_version = 0;
339 ci->i_xattrs.count = 0;
340 ci->i_xattrs.index = RB_ROOT;
341}
342
343static int __build_xattrs(struct inode *inode)
344{
345 u32 namelen;
346 u32 numattr = 0;
347 void *p, *end;
348 u32 len;
349 const char *name, *val;
350 struct ceph_inode_info *ci = ceph_inode(inode);
351 int xattr_version;
352 struct ceph_inode_xattr **xattrs = NULL;
353 int err = 0;
354 int i;
355
356 dout("__build_xattrs() len=%d\n",
357 ci->i_xattrs.blob ? (int)ci->i_xattrs.blob->vec.iov_len : 0);
358
359 if (ci->i_xattrs.index_version >= ci->i_xattrs.version)
360 return 0; /* already built */
361
362 __ceph_destroy_xattrs(ci);
363
364start:
365 /* updated internal xattr rb tree */
366 if (ci->i_xattrs.blob && ci->i_xattrs.blob->vec.iov_len > 4) {
367 p = ci->i_xattrs.blob->vec.iov_base;
368 end = p + ci->i_xattrs.blob->vec.iov_len;
369 ceph_decode_32_safe(&p, end, numattr, bad);
370 xattr_version = ci->i_xattrs.version;
371 spin_unlock(&inode->i_lock);
372
373 xattrs = kcalloc(numattr, sizeof(struct ceph_xattr *),
374 GFP_NOFS);
375 err = -ENOMEM;
376 if (!xattrs)
377 goto bad_lock;
378 memset(xattrs, 0, numattr*sizeof(struct ceph_xattr *));
379 for (i = 0; i < numattr; i++) {
380 xattrs[i] = kmalloc(sizeof(struct ceph_inode_xattr),
381 GFP_NOFS);
382 if (!xattrs[i])
383 goto bad_lock;
384 }
385
386 spin_lock(&inode->i_lock);
387 if (ci->i_xattrs.version != xattr_version) {
388 /* lost a race, retry */
389 for (i = 0; i < numattr; i++)
390 kfree(xattrs[i]);
391 kfree(xattrs);
392 goto start;
393 }
394 err = -EIO;
395 while (numattr--) {
396 ceph_decode_32_safe(&p, end, len, bad);
397 namelen = len;
398 name = p;
399 p += len;
400 ceph_decode_32_safe(&p, end, len, bad);
401 val = p;
402 p += len;
403
404 err = __set_xattr(ci, name, namelen, val, len,
405 0, 0, 0, &xattrs[numattr]);
406
407 if (err < 0)
408 goto bad;
409 }
410 kfree(xattrs);
411 }
412 ci->i_xattrs.index_version = ci->i_xattrs.version;
413 ci->i_xattrs.dirty = false;
414
415 return err;
416bad_lock:
417 spin_lock(&inode->i_lock);
418bad:
419 if (xattrs) {
420 for (i = 0; i < numattr; i++)
421 kfree(xattrs[i]);
422 kfree(xattrs);
423 }
424 ci->i_xattrs.names_size = 0;
425 return err;
426}
427
428static int __get_required_blob_size(struct ceph_inode_info *ci, int name_size,
429 int val_size)
430{
431 /*
432 * 4 bytes for the length, and additional 4 bytes per each xattr name,
433 * 4 bytes per each value
434 */
435 int size = 4 + ci->i_xattrs.count*(4 + 4) +
436 ci->i_xattrs.names_size +
437 ci->i_xattrs.vals_size;
438 dout("__get_required_blob_size c=%d names.size=%d vals.size=%d\n",
439 ci->i_xattrs.count, ci->i_xattrs.names_size,
440 ci->i_xattrs.vals_size);
441
442 if (name_size)
443 size += 4 + 4 + name_size + val_size;
444
445 return size;
446}
447
448/*
449 * If there are dirty xattrs, reencode xattrs into the prealloc_blob
450 * and swap into place.
451 */
452void __ceph_build_xattrs_blob(struct ceph_inode_info *ci)
453{
454 struct rb_node *p;
455 struct ceph_inode_xattr *xattr = NULL;
456 void *dest;
457
458 dout("__build_xattrs_blob %p\n", &ci->vfs_inode);
459 if (ci->i_xattrs.dirty) {
460 int need = __get_required_blob_size(ci, 0, 0);
461
462 BUG_ON(need > ci->i_xattrs.prealloc_blob->alloc_len);
463
464 p = rb_first(&ci->i_xattrs.index);
465 dest = ci->i_xattrs.prealloc_blob->vec.iov_base;
466
467 ceph_encode_32(&dest, ci->i_xattrs.count);
468 while (p) {
469 xattr = rb_entry(p, struct ceph_inode_xattr, node);
470
471 ceph_encode_32(&dest, xattr->name_len);
472 memcpy(dest, xattr->name, xattr->name_len);
473 dest += xattr->name_len;
474 ceph_encode_32(&dest, xattr->val_len);
475 memcpy(dest, xattr->val, xattr->val_len);
476 dest += xattr->val_len;
477
478 p = rb_next(p);
479 }
480
481 /* adjust buffer len; it may be larger than we need */
482 ci->i_xattrs.prealloc_blob->vec.iov_len =
483 dest - ci->i_xattrs.prealloc_blob->vec.iov_base;
484
485 if (ci->i_xattrs.blob)
486 ceph_buffer_put(ci->i_xattrs.blob);
487 ci->i_xattrs.blob = ci->i_xattrs.prealloc_blob;
488 ci->i_xattrs.prealloc_blob = NULL;
489 ci->i_xattrs.dirty = false;
490 }
491}
492
493ssize_t ceph_getxattr(struct dentry *dentry, const char *name, void *value,
494 size_t size)
495{
496 struct inode *inode = dentry->d_inode;
497 struct ceph_inode_info *ci = ceph_inode(inode);
498 struct ceph_vxattr_cb *vxattrs = ceph_inode_vxattrs(inode);
499 int err;
500 struct ceph_inode_xattr *xattr;
501 struct ceph_vxattr_cb *vxattr = NULL;
502
503 if (!ceph_is_valid_xattr(name))
504 return -ENODATA;
505
506 /* let's see if a virtual xattr was requested */
507 if (vxattrs)
508 vxattr = ceph_match_vxattr(vxattrs, name);
509
510 spin_lock(&inode->i_lock);
511 dout("getxattr %p ver=%lld index_ver=%lld\n", inode,
512 ci->i_xattrs.version, ci->i_xattrs.index_version);
513
514 if (__ceph_caps_issued_mask(ci, CEPH_CAP_XATTR_SHARED, 1) &&
515 (ci->i_xattrs.index_version >= ci->i_xattrs.version)) {
516 goto get_xattr;
517 } else {
518 spin_unlock(&inode->i_lock);
519 /* get xattrs from mds (if we don't already have them) */
520 err = ceph_do_getattr(inode, CEPH_STAT_CAP_XATTR);
521 if (err)
522 return err;
523 }
524
525 spin_lock(&inode->i_lock);
526
527 if (vxattr && vxattr->readonly) {
528 err = vxattr->getxattr_cb(ci, value, size);
529 goto out;
530 }
531
532 err = __build_xattrs(inode);
533 if (err < 0)
534 goto out;
535
536get_xattr:
537 err = -ENODATA; /* == ENOATTR */
538 xattr = __get_xattr(ci, name);
539 if (!xattr) {
540 if (vxattr)
541 err = vxattr->getxattr_cb(ci, value, size);
542 goto out;
543 }
544
545 err = -ERANGE;
546 if (size && size < xattr->val_len)
547 goto out;
548
549 err = xattr->val_len;
550 if (size == 0)
551 goto out;
552
553 memcpy(value, xattr->val, xattr->val_len);
554
555out:
556 spin_unlock(&inode->i_lock);
557 return err;
558}
559
560ssize_t ceph_listxattr(struct dentry *dentry, char *names, size_t size)
561{
562 struct inode *inode = dentry->d_inode;
563 struct ceph_inode_info *ci = ceph_inode(inode);
564 struct ceph_vxattr_cb *vxattrs = ceph_inode_vxattrs(inode);
565 u32 vir_namelen = 0;
566 u32 namelen;
567 int err;
568 u32 len;
569 int i;
570
571 spin_lock(&inode->i_lock);
572 dout("listxattr %p ver=%lld index_ver=%lld\n", inode,
573 ci->i_xattrs.version, ci->i_xattrs.index_version);
574
575 if (__ceph_caps_issued_mask(ci, CEPH_CAP_XATTR_SHARED, 1) &&
576 (ci->i_xattrs.index_version > ci->i_xattrs.version)) {
577 goto list_xattr;
578 } else {
579 spin_unlock(&inode->i_lock);
580 err = ceph_do_getattr(inode, CEPH_STAT_CAP_XATTR);
581 if (err)
582 return err;
583 }
584
585 spin_lock(&inode->i_lock);
586
587 err = __build_xattrs(inode);
588 if (err < 0)
589 goto out;
590
591list_xattr:
592 vir_namelen = 0;
593 /* include virtual dir xattrs */
594 if (vxattrs)
595 for (i = 0; vxattrs[i].name; i++)
596 vir_namelen += strlen(vxattrs[i].name) + 1;
597 /* adding 1 byte per each variable due to the null termination */
598 namelen = vir_namelen + ci->i_xattrs.names_size + ci->i_xattrs.count;
599 err = -ERANGE;
600 if (size && namelen > size)
601 goto out;
602
603 err = namelen;
604 if (size == 0)
605 goto out;
606
607 names = __copy_xattr_names(ci, names);
608
609 /* virtual xattr names, too */
610 if (vxattrs)
611 for (i = 0; vxattrs[i].name; i++) {
612 len = sprintf(names, "%s", vxattrs[i].name);
613 names += len + 1;
614 }
615
616out:
617 spin_unlock(&inode->i_lock);
618 return err;
619}
620
621static int ceph_sync_setxattr(struct dentry *dentry, const char *name,
622 const char *value, size_t size, int flags)
623{
624 struct ceph_client *client = ceph_client(dentry->d_sb);
625 struct inode *inode = dentry->d_inode;
626 struct ceph_inode_info *ci = ceph_inode(inode);
627 struct inode *parent_inode = dentry->d_parent->d_inode;
628 struct ceph_mds_request *req;
629 struct ceph_mds_client *mdsc = &client->mdsc;
630 int err;
631 int i, nr_pages;
632 struct page **pages = NULL;
633 void *kaddr;
634
635 /* copy value into some pages */
636 nr_pages = calc_pages_for(0, size);
637 if (nr_pages) {
638 pages = kmalloc(sizeof(pages[0])*nr_pages, GFP_NOFS);
639 if (!pages)
640 return -ENOMEM;
641 err = -ENOMEM;
642 for (i = 0; i < nr_pages; i++) {
643 pages[i] = alloc_page(GFP_NOFS);
644 if (!pages[i]) {
645 nr_pages = i;
646 goto out;
647 }
648 kaddr = kmap(pages[i]);
649 memcpy(kaddr, value + i*PAGE_CACHE_SIZE,
650 min(PAGE_CACHE_SIZE, size-i*PAGE_CACHE_SIZE));
651 }
652 }
653
654 dout("setxattr value=%.*s\n", (int)size, value);
655
656 /* do request */
657 req = ceph_mdsc_create_request(mdsc, CEPH_MDS_OP_SETXATTR,
658 USE_AUTH_MDS);
659 if (IS_ERR(req)) {
660 err = PTR_ERR(req);
661 goto out;
662 }
663 req->r_inode = igrab(inode);
664 req->r_inode_drop = CEPH_CAP_XATTR_SHARED;
665 req->r_num_caps = 1;
666 req->r_args.setxattr.flags = cpu_to_le32(flags);
667 req->r_path2 = kstrdup(name, GFP_NOFS);
668
669 req->r_pages = pages;
670 req->r_num_pages = nr_pages;
671 req->r_data_len = size;
672
673 dout("xattr.ver (before): %lld\n", ci->i_xattrs.version);
674 err = ceph_mdsc_do_request(mdsc, parent_inode, req);
675 ceph_mdsc_put_request(req);
676 dout("xattr.ver (after): %lld\n", ci->i_xattrs.version);
677
678out:
679 if (pages) {
680 for (i = 0; i < nr_pages; i++)
681 __free_page(pages[i]);
682 kfree(pages);
683 }
684 return err;
685}
686
687int ceph_setxattr(struct dentry *dentry, const char *name,
688 const void *value, size_t size, int flags)
689{
690 struct inode *inode = dentry->d_inode;
691 struct ceph_inode_info *ci = ceph_inode(inode);
692 struct ceph_vxattr_cb *vxattrs = ceph_inode_vxattrs(inode);
693 int err;
694 int name_len = strlen(name);
695 int val_len = size;
696 char *newname = NULL;
697 char *newval = NULL;
698 struct ceph_inode_xattr *xattr = NULL;
699 int issued;
700 int required_blob_size;
701
702 if (ceph_snap(inode) != CEPH_NOSNAP)
703 return -EROFS;
704
705 if (!ceph_is_valid_xattr(name))
706 return -EOPNOTSUPP;
707
708 if (vxattrs) {
709 struct ceph_vxattr_cb *vxattr =
710 ceph_match_vxattr(vxattrs, name);
711 if (vxattr && vxattr->readonly)
712 return -EOPNOTSUPP;
713 }
714
715 /* preallocate memory for xattr name, value, index node */
716 err = -ENOMEM;
717 newname = kmalloc(name_len + 1, GFP_NOFS);
718 if (!newname)
719 goto out;
720 memcpy(newname, name, name_len + 1);
721
722 if (val_len) {
723 newval = kmalloc(val_len + 1, GFP_NOFS);
724 if (!newval)
725 goto out;
726 memcpy(newval, value, val_len);
727 newval[val_len] = '\0';
728 }
729
730 xattr = kmalloc(sizeof(struct ceph_inode_xattr), GFP_NOFS);
731 if (!xattr)
732 goto out;
733
734 spin_lock(&inode->i_lock);
735retry:
736 issued = __ceph_caps_issued(ci, NULL);
737 if (!(issued & CEPH_CAP_XATTR_EXCL))
738 goto do_sync;
739 __build_xattrs(inode);
740
741 required_blob_size = __get_required_blob_size(ci, name_len, val_len);
742
743 if (!ci->i_xattrs.prealloc_blob ||
744 required_blob_size > ci->i_xattrs.prealloc_blob->alloc_len) {
745 struct ceph_buffer *blob = NULL;
746
747 spin_unlock(&inode->i_lock);
748 dout(" preaallocating new blob size=%d\n", required_blob_size);
749 blob = ceph_buffer_new(required_blob_size, GFP_NOFS);
750 if (!blob)
751 goto out;
752 spin_lock(&inode->i_lock);
753 if (ci->i_xattrs.prealloc_blob)
754 ceph_buffer_put(ci->i_xattrs.prealloc_blob);
755 ci->i_xattrs.prealloc_blob = blob;
756 goto retry;
757 }
758
759 dout("setxattr %p issued %s\n", inode, ceph_cap_string(issued));
760 err = __set_xattr(ci, newname, name_len, newval,
761 val_len, 1, 1, 1, &xattr);
762 __ceph_mark_dirty_caps(ci, CEPH_CAP_XATTR_EXCL);
763 ci->i_xattrs.dirty = true;
764 inode->i_ctime = CURRENT_TIME;
765 spin_unlock(&inode->i_lock);
766
767 return err;
768
769do_sync:
770 spin_unlock(&inode->i_lock);
771 err = ceph_sync_setxattr(dentry, name, value, size, flags);
772out:
773 kfree(newname);
774 kfree(newval);
775 kfree(xattr);
776 return err;
777}
778
779static int ceph_send_removexattr(struct dentry *dentry, const char *name)
780{
781 struct ceph_client *client = ceph_client(dentry->d_sb);
782 struct ceph_mds_client *mdsc = &client->mdsc;
783 struct inode *inode = dentry->d_inode;
784 struct inode *parent_inode = dentry->d_parent->d_inode;
785 struct ceph_mds_request *req;
786 int err;
787
788 req = ceph_mdsc_create_request(mdsc, CEPH_MDS_OP_RMXATTR,
789 USE_AUTH_MDS);
790 if (IS_ERR(req))
791 return PTR_ERR(req);
792 req->r_inode = igrab(inode);
793 req->r_inode_drop = CEPH_CAP_XATTR_SHARED;
794 req->r_num_caps = 1;
795 req->r_path2 = kstrdup(name, GFP_NOFS);
796
797 err = ceph_mdsc_do_request(mdsc, parent_inode, req);
798 ceph_mdsc_put_request(req);
799 return err;
800}
801
802int ceph_removexattr(struct dentry *dentry, const char *name)
803{
804 struct inode *inode = dentry->d_inode;
805 struct ceph_inode_info *ci = ceph_inode(inode);
806 struct ceph_vxattr_cb *vxattrs = ceph_inode_vxattrs(inode);
807 int issued;
808 int err;
809
810 if (ceph_snap(inode) != CEPH_NOSNAP)
811 return -EROFS;
812
813 if (!ceph_is_valid_xattr(name))
814 return -EOPNOTSUPP;
815
816 if (vxattrs) {
817 struct ceph_vxattr_cb *vxattr =
818 ceph_match_vxattr(vxattrs, name);
819 if (vxattr && vxattr->readonly)
820 return -EOPNOTSUPP;
821 }
822
823 spin_lock(&inode->i_lock);
824 __build_xattrs(inode);
825 issued = __ceph_caps_issued(ci, NULL);
826 dout("removexattr %p issued %s\n", inode, ceph_cap_string(issued));
827
828 if (!(issued & CEPH_CAP_XATTR_EXCL))
829 goto do_sync;
830
831 err = __remove_xattr_by_name(ceph_inode(inode), name);
832 __ceph_mark_dirty_caps(ci, CEPH_CAP_XATTR_EXCL);
833 ci->i_xattrs.dirty = true;
834 inode->i_ctime = CURRENT_TIME;
835
836 spin_unlock(&inode->i_lock);
837
838 return err;
839do_sync:
840 spin_unlock(&inode->i_lock);
841 err = ceph_send_removexattr(dentry, name);
842 return err;
843}
844
diff --git a/fs/cifs/cifsfs.c b/fs/cifs/cifsfs.c
index 8c6a03627176..5183bc2a1916 100644
--- a/fs/cifs/cifsfs.c
+++ b/fs/cifs/cifsfs.c
@@ -312,6 +312,7 @@ cifs_alloc_inode(struct super_block *sb)
312 cifs_inode->clientCanCacheRead = false; 312 cifs_inode->clientCanCacheRead = false;
313 cifs_inode->clientCanCacheAll = false; 313 cifs_inode->clientCanCacheAll = false;
314 cifs_inode->delete_pending = false; 314 cifs_inode->delete_pending = false;
315 cifs_inode->invalid_mapping = false;
315 cifs_inode->vfs_inode.i_blkbits = 14; /* 2**14 = CIFS_MAX_MSGSIZE */ 316 cifs_inode->vfs_inode.i_blkbits = 14; /* 2**14 = CIFS_MAX_MSGSIZE */
316 cifs_inode->server_eof = 0; 317 cifs_inode->server_eof = 0;
317 318
@@ -638,7 +639,7 @@ static loff_t cifs_llseek(struct file *file, loff_t offset, int origin)
638 setting the revalidate time to zero */ 639 setting the revalidate time to zero */
639 CIFS_I(file->f_path.dentry->d_inode)->time = 0; 640 CIFS_I(file->f_path.dentry->d_inode)->time = 0;
640 641
641 retval = cifs_revalidate(file->f_path.dentry); 642 retval = cifs_revalidate_file(file);
642 if (retval < 0) 643 if (retval < 0)
643 return (loff_t)retval; 644 return (loff_t)retval;
644 } 645 }
diff --git a/fs/cifs/cifsfs.h b/fs/cifs/cifsfs.h
index 78c1b86d55f6..7aa57ecdc437 100644
--- a/fs/cifs/cifsfs.h
+++ b/fs/cifs/cifsfs.h
@@ -61,7 +61,8 @@ extern int cifs_mkdir(struct inode *, struct dentry *, int);
61extern int cifs_rmdir(struct inode *, struct dentry *); 61extern int cifs_rmdir(struct inode *, struct dentry *);
62extern int cifs_rename(struct inode *, struct dentry *, struct inode *, 62extern int cifs_rename(struct inode *, struct dentry *, struct inode *,
63 struct dentry *); 63 struct dentry *);
64extern int cifs_revalidate(struct dentry *); 64extern int cifs_revalidate_file(struct file *filp);
65extern int cifs_revalidate_dentry(struct dentry *);
65extern int cifs_getattr(struct vfsmount *, struct dentry *, struct kstat *); 66extern int cifs_getattr(struct vfsmount *, struct dentry *, struct kstat *);
66extern int cifs_setattr(struct dentry *, struct iattr *); 67extern int cifs_setattr(struct dentry *, struct iattr *);
67 68
diff --git a/fs/cifs/cifsglob.h b/fs/cifs/cifsglob.h
index a1c817eb291a..63c89d1d70b5 100644
--- a/fs/cifs/cifsglob.h
+++ b/fs/cifs/cifsglob.h
@@ -389,6 +389,7 @@ struct cifsInodeInfo {
389 bool clientCanCacheRead:1; /* read oplock */ 389 bool clientCanCacheRead:1; /* read oplock */
390 bool clientCanCacheAll:1; /* read and writebehind oplock */ 390 bool clientCanCacheAll:1; /* read and writebehind oplock */
391 bool delete_pending:1; /* DELETE_ON_CLOSE is set */ 391 bool delete_pending:1; /* DELETE_ON_CLOSE is set */
392 bool invalid_mapping:1; /* pagecache is invalid */
392 u64 server_eof; /* current file size on server */ 393 u64 server_eof; /* current file size on server */
393 u64 uniqueid; /* server inode number */ 394 u64 uniqueid; /* server inode number */
394 struct inode vfs_inode; 395 struct inode vfs_inode;
diff --git a/fs/cifs/cifsproto.h b/fs/cifs/cifsproto.h
index 88e2bc44ac58..39e47f46dea5 100644
--- a/fs/cifs/cifsproto.h
+++ b/fs/cifs/cifsproto.h
@@ -104,10 +104,12 @@ extern void cifs_fattr_to_inode(struct inode *inode, struct cifs_fattr *fattr);
104extern struct inode *cifs_iget(struct super_block *sb, 104extern struct inode *cifs_iget(struct super_block *sb,
105 struct cifs_fattr *fattr); 105 struct cifs_fattr *fattr);
106 106
107extern int cifs_get_file_info(struct file *filp);
107extern int cifs_get_inode_info(struct inode **pinode, 108extern int cifs_get_inode_info(struct inode **pinode,
108 const unsigned char *search_path, 109 const unsigned char *search_path,
109 FILE_ALL_INFO *pfile_info, 110 FILE_ALL_INFO *pfile_info,
110 struct super_block *sb, int xid, const __u16 *pfid); 111 struct super_block *sb, int xid, const __u16 *pfid);
112extern int cifs_get_file_info_unix(struct file *filp);
111extern int cifs_get_inode_info_unix(struct inode **pinode, 113extern int cifs_get_inode_info_unix(struct inode **pinode,
112 const unsigned char *search_path, 114 const unsigned char *search_path,
113 struct super_block *sb, int xid); 115 struct super_block *sb, int xid);
@@ -142,6 +144,8 @@ extern int CIFSFindNext(const int xid, struct cifsTconInfo *tcon,
142extern int CIFSFindClose(const int, struct cifsTconInfo *tcon, 144extern int CIFSFindClose(const int, struct cifsTconInfo *tcon,
143 const __u16 search_handle); 145 const __u16 search_handle);
144 146
147extern int CIFSSMBQFileInfo(const int xid, struct cifsTconInfo *tcon,
148 u16 netfid, FILE_ALL_INFO *pFindData);
145extern int CIFSSMBQPathInfo(const int xid, struct cifsTconInfo *tcon, 149extern int CIFSSMBQPathInfo(const int xid, struct cifsTconInfo *tcon,
146 const unsigned char *searchName, 150 const unsigned char *searchName,
147 FILE_ALL_INFO *findData, 151 FILE_ALL_INFO *findData,
@@ -152,6 +156,8 @@ extern int SMBQueryInformation(const int xid, struct cifsTconInfo *tcon,
152 FILE_ALL_INFO *findData, 156 FILE_ALL_INFO *findData,
153 const struct nls_table *nls_codepage, int remap); 157 const struct nls_table *nls_codepage, int remap);
154 158
159extern int CIFSSMBUnixQFileInfo(const int xid, struct cifsTconInfo *tcon,
160 u16 netfid, FILE_UNIX_BASIC_INFO *pFindData);
155extern int CIFSSMBUnixQPathInfo(const int xid, 161extern int CIFSSMBUnixQPathInfo(const int xid,
156 struct cifsTconInfo *tcon, 162 struct cifsTconInfo *tcon,
157 const unsigned char *searchName, 163 const unsigned char *searchName,
diff --git a/fs/cifs/cifssmb.c b/fs/cifs/cifssmb.c
index 611835899844..7cc7f83e9314 100644
--- a/fs/cifs/cifssmb.c
+++ b/fs/cifs/cifssmb.c
@@ -500,7 +500,7 @@ CIFSSMBNegotiate(unsigned int xid, struct cifsSesInfo *ses)
500 } else if (pSMBr->hdr.WordCount == 13) { 500 } else if (pSMBr->hdr.WordCount == 13) {
501 cERROR(1, ("mount failed, cifs module not built " 501 cERROR(1, ("mount failed, cifs module not built "
502 "with CIFS_WEAK_PW_HASH support")); 502 "with CIFS_WEAK_PW_HASH support"));
503 rc = -EOPNOTSUPP; 503 rc = -EOPNOTSUPP;
504#endif /* WEAK_PW_HASH */ 504#endif /* WEAK_PW_HASH */
505 goto neg_err_exit; 505 goto neg_err_exit;
506 } else if (pSMBr->hdr.WordCount != 17) { 506 } else if (pSMBr->hdr.WordCount != 17) {
@@ -3230,8 +3230,72 @@ QInfRetry:
3230 return rc; 3230 return rc;
3231} 3231}
3232 3232
3233int
3234CIFSSMBQFileInfo(const int xid, struct cifsTconInfo *tcon,
3235 u16 netfid, FILE_ALL_INFO *pFindData)
3236{
3237 struct smb_t2_qfi_req *pSMB = NULL;
3238 struct smb_t2_qfi_rsp *pSMBr = NULL;
3239 int rc = 0;
3240 int bytes_returned;
3241 __u16 params, byte_count;
3233 3242
3243QFileInfoRetry:
3244 rc = smb_init(SMB_COM_TRANSACTION2, 15, tcon, (void **) &pSMB,
3245 (void **) &pSMBr);
3246 if (rc)
3247 return rc;
3234 3248
3249 params = 2 /* level */ + 2 /* fid */;
3250 pSMB->t2.TotalDataCount = 0;
3251 pSMB->t2.MaxParameterCount = cpu_to_le16(4);
3252 /* BB find exact max data count below from sess structure BB */
3253 pSMB->t2.MaxDataCount = cpu_to_le16(CIFSMaxBufSize);
3254 pSMB->t2.MaxSetupCount = 0;
3255 pSMB->t2.Reserved = 0;
3256 pSMB->t2.Flags = 0;
3257 pSMB->t2.Timeout = 0;
3258 pSMB->t2.Reserved2 = 0;
3259 pSMB->t2.ParameterOffset = cpu_to_le16(offsetof(struct smb_t2_qfi_req,
3260 Fid) - 4);
3261 pSMB->t2.DataCount = 0;
3262 pSMB->t2.DataOffset = 0;
3263 pSMB->t2.SetupCount = 1;
3264 pSMB->t2.Reserved3 = 0;
3265 pSMB->t2.SubCommand = cpu_to_le16(TRANS2_QUERY_FILE_INFORMATION);
3266 byte_count = params + 1 /* pad */ ;
3267 pSMB->t2.TotalParameterCount = cpu_to_le16(params);
3268 pSMB->t2.ParameterCount = pSMB->t2.TotalParameterCount;
3269 pSMB->InformationLevel = cpu_to_le16(SMB_QUERY_FILE_ALL_INFO);
3270 pSMB->Pad = 0;
3271 pSMB->Fid = netfid;
3272 pSMB->hdr.smb_buf_length += byte_count;
3273
3274 rc = SendReceive(xid, tcon->ses, (struct smb_hdr *) pSMB,
3275 (struct smb_hdr *) pSMBr, &bytes_returned, 0);
3276 if (rc) {
3277 cFYI(1, ("Send error in QPathInfo = %d", rc));
3278 } else { /* decode response */
3279 rc = validate_t2((struct smb_t2_rsp *)pSMBr);
3280
3281 if (rc) /* BB add auto retry on EOPNOTSUPP? */
3282 rc = -EIO;
3283 else if (pSMBr->ByteCount < 40)
3284 rc = -EIO; /* bad smb */
3285 else if (pFindData) {
3286 __u16 data_offset = le16_to_cpu(pSMBr->t2.DataOffset);
3287 memcpy((char *) pFindData,
3288 (char *) &pSMBr->hdr.Protocol +
3289 data_offset, sizeof(FILE_ALL_INFO));
3290 } else
3291 rc = -ENOMEM;
3292 }
3293 cifs_buf_release(pSMB);
3294 if (rc == -EAGAIN)
3295 goto QFileInfoRetry;
3296
3297 return rc;
3298}
3235 3299
3236int 3300int
3237CIFSSMBQPathInfo(const int xid, struct cifsTconInfo *tcon, 3301CIFSSMBQPathInfo(const int xid, struct cifsTconInfo *tcon,
@@ -3335,6 +3399,75 @@ QPathInfoRetry:
3335} 3399}
3336 3400
3337int 3401int
3402CIFSSMBUnixQFileInfo(const int xid, struct cifsTconInfo *tcon,
3403 u16 netfid, FILE_UNIX_BASIC_INFO *pFindData)
3404{
3405 struct smb_t2_qfi_req *pSMB = NULL;
3406 struct smb_t2_qfi_rsp *pSMBr = NULL;
3407 int rc = 0;
3408 int bytes_returned;
3409 __u16 params, byte_count;
3410
3411UnixQFileInfoRetry:
3412 rc = smb_init(SMB_COM_TRANSACTION2, 15, tcon, (void **) &pSMB,
3413 (void **) &pSMBr);
3414 if (rc)
3415 return rc;
3416
3417 params = 2 /* level */ + 2 /* fid */;
3418 pSMB->t2.TotalDataCount = 0;
3419 pSMB->t2.MaxParameterCount = cpu_to_le16(4);
3420 /* BB find exact max data count below from sess structure BB */
3421 pSMB->t2.MaxDataCount = cpu_to_le16(CIFSMaxBufSize);
3422 pSMB->t2.MaxSetupCount = 0;
3423 pSMB->t2.Reserved = 0;
3424 pSMB->t2.Flags = 0;
3425 pSMB->t2.Timeout = 0;
3426 pSMB->t2.Reserved2 = 0;
3427 pSMB->t2.ParameterOffset = cpu_to_le16(offsetof(struct smb_t2_qfi_req,
3428 Fid) - 4);
3429 pSMB->t2.DataCount = 0;
3430 pSMB->t2.DataOffset = 0;
3431 pSMB->t2.SetupCount = 1;
3432 pSMB->t2.Reserved3 = 0;
3433 pSMB->t2.SubCommand = cpu_to_le16(TRANS2_QUERY_FILE_INFORMATION);
3434 byte_count = params + 1 /* pad */ ;
3435 pSMB->t2.TotalParameterCount = cpu_to_le16(params);
3436 pSMB->t2.ParameterCount = pSMB->t2.TotalParameterCount;
3437 pSMB->InformationLevel = cpu_to_le16(SMB_QUERY_FILE_UNIX_BASIC);
3438 pSMB->Pad = 0;
3439 pSMB->Fid = netfid;
3440 pSMB->hdr.smb_buf_length += byte_count;
3441
3442 rc = SendReceive(xid, tcon->ses, (struct smb_hdr *) pSMB,
3443 (struct smb_hdr *) pSMBr, &bytes_returned, 0);
3444 if (rc) {
3445 cFYI(1, ("Send error in QPathInfo = %d", rc));
3446 } else { /* decode response */
3447 rc = validate_t2((struct smb_t2_rsp *)pSMBr);
3448
3449 if (rc || (pSMBr->ByteCount < sizeof(FILE_UNIX_BASIC_INFO))) {
3450 cERROR(1, ("Malformed FILE_UNIX_BASIC_INFO response.\n"
3451 "Unix Extensions can be disabled on mount "
3452 "by specifying the nosfu mount option."));
3453 rc = -EIO; /* bad smb */
3454 } else {
3455 __u16 data_offset = le16_to_cpu(pSMBr->t2.DataOffset);
3456 memcpy((char *) pFindData,
3457 (char *) &pSMBr->hdr.Protocol +
3458 data_offset,
3459 sizeof(FILE_UNIX_BASIC_INFO));
3460 }
3461 }
3462
3463 cifs_buf_release(pSMB);
3464 if (rc == -EAGAIN)
3465 goto UnixQFileInfoRetry;
3466
3467 return rc;
3468}
3469
3470int
3338CIFSSMBUnixQPathInfo(const int xid, struct cifsTconInfo *tcon, 3471CIFSSMBUnixQPathInfo(const int xid, struct cifsTconInfo *tcon,
3339 const unsigned char *searchName, 3472 const unsigned char *searchName,
3340 FILE_UNIX_BASIC_INFO *pFindData, 3473 FILE_UNIX_BASIC_INFO *pFindData,
diff --git a/fs/cifs/dir.c b/fs/cifs/dir.c
index 6ccf7262d1b7..e9f7ecc2714b 100644
--- a/fs/cifs/dir.c
+++ b/fs/cifs/dir.c
@@ -739,7 +739,7 @@ cifs_d_revalidate(struct dentry *direntry, struct nameidata *nd)
739 int isValid = 1; 739 int isValid = 1;
740 740
741 if (direntry->d_inode) { 741 if (direntry->d_inode) {
742 if (cifs_revalidate(direntry)) 742 if (cifs_revalidate_dentry(direntry))
743 return 0; 743 return 0;
744 } else { 744 } else {
745 cFYI(1, ("neg dentry 0x%p name = %s", 745 cFYI(1, ("neg dentry 0x%p name = %s",
diff --git a/fs/cifs/file.c b/fs/cifs/file.c
index 3d8f8a96f5a3..ca2ba7a0193c 100644
--- a/fs/cifs/file.c
+++ b/fs/cifs/file.c
@@ -219,8 +219,8 @@ static inline int cifs_open_inode_helper(struct inode *inode, struct file *file,
219 cFYI(1, ("inode unchanged on server")); 219 cFYI(1, ("inode unchanged on server"));
220 } else { 220 } else {
221 if (file->f_path.dentry->d_inode->i_mapping) { 221 if (file->f_path.dentry->d_inode->i_mapping) {
222 /* BB no need to lock inode until after invalidate 222 /* BB no need to lock inode until after invalidate
223 since namei code should already have it locked? */ 223 since namei code should already have it locked? */
224 rc = filemap_write_and_wait(file->f_path.dentry->d_inode->i_mapping); 224 rc = filemap_write_and_wait(file->f_path.dentry->d_inode->i_mapping);
225 if (rc != 0) 225 if (rc != 0)
226 CIFS_I(file->f_path.dentry->d_inode)->write_behind_rc = rc; 226 CIFS_I(file->f_path.dentry->d_inode)->write_behind_rc = rc;
@@ -1890,11 +1890,10 @@ static ssize_t cifs_read(struct file *file, char *read_data, size_t read_size,
1890 1890
1891int cifs_file_mmap(struct file *file, struct vm_area_struct *vma) 1891int cifs_file_mmap(struct file *file, struct vm_area_struct *vma)
1892{ 1892{
1893 struct dentry *dentry = file->f_path.dentry;
1894 int rc, xid; 1893 int rc, xid;
1895 1894
1896 xid = GetXid(); 1895 xid = GetXid();
1897 rc = cifs_revalidate(dentry); 1896 rc = cifs_revalidate_file(file);
1898 if (rc) { 1897 if (rc) {
1899 cFYI(1, ("Validation prior to mmap failed, error=%d", rc)); 1898 cFYI(1, ("Validation prior to mmap failed, error=%d", rc));
1900 FreeXid(xid); 1899 FreeXid(xid);
diff --git a/fs/cifs/inode.c b/fs/cifs/inode.c
index 8bdbc818164c..723daaccbd0e 100644
--- a/fs/cifs/inode.c
+++ b/fs/cifs/inode.c
@@ -77,6 +77,41 @@ static void cifs_set_ops(struct inode *inode, const bool is_dfs_referral)
77 } 77 }
78} 78}
79 79
80/* check inode attributes against fattr. If they don't match, tag the
81 * inode for cache invalidation
82 */
83static void
84cifs_revalidate_cache(struct inode *inode, struct cifs_fattr *fattr)
85{
86 struct cifsInodeInfo *cifs_i = CIFS_I(inode);
87
88 cFYI(1, ("%s: revalidating inode %llu", __func__, cifs_i->uniqueid));
89
90 if (inode->i_state & I_NEW) {
91 cFYI(1, ("%s: inode %llu is new", __func__, cifs_i->uniqueid));
92 return;
93 }
94
95 /* don't bother with revalidation if we have an oplock */
96 if (cifs_i->clientCanCacheRead) {
97 cFYI(1, ("%s: inode %llu is oplocked", __func__,
98 cifs_i->uniqueid));
99 return;
100 }
101
102 /* revalidate if mtime or size have changed */
103 if (timespec_equal(&inode->i_mtime, &fattr->cf_mtime) &&
104 cifs_i->server_eof == fattr->cf_eof) {
105 cFYI(1, ("%s: inode %llu is unchanged", __func__,
106 cifs_i->uniqueid));
107 return;
108 }
109
110 cFYI(1, ("%s: invalidating inode %llu mapping", __func__,
111 cifs_i->uniqueid));
112 cifs_i->invalid_mapping = true;
113}
114
80/* populate an inode with info from a cifs_fattr struct */ 115/* populate an inode with info from a cifs_fattr struct */
81void 116void
82cifs_fattr_to_inode(struct inode *inode, struct cifs_fattr *fattr) 117cifs_fattr_to_inode(struct inode *inode, struct cifs_fattr *fattr)
@@ -85,6 +120,8 @@ cifs_fattr_to_inode(struct inode *inode, struct cifs_fattr *fattr)
85 struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb); 120 struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
86 unsigned long oldtime = cifs_i->time; 121 unsigned long oldtime = cifs_i->time;
87 122
123 cifs_revalidate_cache(inode, fattr);
124
88 inode->i_atime = fattr->cf_atime; 125 inode->i_atime = fattr->cf_atime;
89 inode->i_mtime = fattr->cf_mtime; 126 inode->i_mtime = fattr->cf_mtime;
90 inode->i_ctime = fattr->cf_ctime; 127 inode->i_ctime = fattr->cf_ctime;
@@ -231,6 +268,31 @@ cifs_create_dfs_fattr(struct cifs_fattr *fattr, struct super_block *sb)
231 fattr->cf_flags |= CIFS_FATTR_DFS_REFERRAL; 268 fattr->cf_flags |= CIFS_FATTR_DFS_REFERRAL;
232} 269}
233 270
271int cifs_get_file_info_unix(struct file *filp)
272{
273 int rc;
274 int xid;
275 FILE_UNIX_BASIC_INFO find_data;
276 struct cifs_fattr fattr;
277 struct inode *inode = filp->f_path.dentry->d_inode;
278 struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
279 struct cifsTconInfo *tcon = cifs_sb->tcon;
280 struct cifsFileInfo *cfile = (struct cifsFileInfo *) filp->private_data;
281
282 xid = GetXid();
283 rc = CIFSSMBUnixQFileInfo(xid, tcon, cfile->netfid, &find_data);
284 if (!rc) {
285 cifs_unix_basic_to_fattr(&fattr, &find_data, cifs_sb);
286 } else if (rc == -EREMOTE) {
287 cifs_create_dfs_fattr(&fattr, inode->i_sb);
288 rc = 0;
289 }
290
291 cifs_fattr_to_inode(inode, &fattr);
292 FreeXid(xid);
293 return rc;
294}
295
234int cifs_get_inode_info_unix(struct inode **pinode, 296int cifs_get_inode_info_unix(struct inode **pinode,
235 const unsigned char *full_path, 297 const unsigned char *full_path,
236 struct super_block *sb, int xid) 298 struct super_block *sb, int xid)
@@ -432,6 +494,47 @@ cifs_all_info_to_fattr(struct cifs_fattr *fattr, FILE_ALL_INFO *info,
432 fattr->cf_gid = cifs_sb->mnt_gid; 494 fattr->cf_gid = cifs_sb->mnt_gid;
433} 495}
434 496
497int cifs_get_file_info(struct file *filp)
498{
499 int rc;
500 int xid;
501 FILE_ALL_INFO find_data;
502 struct cifs_fattr fattr;
503 struct inode *inode = filp->f_path.dentry->d_inode;
504 struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
505 struct cifsTconInfo *tcon = cifs_sb->tcon;
506 struct cifsFileInfo *cfile = (struct cifsFileInfo *) filp->private_data;
507
508 xid = GetXid();
509 rc = CIFSSMBQFileInfo(xid, tcon, cfile->netfid, &find_data);
510 if (rc == -EOPNOTSUPP || rc == -EINVAL) {
511 /*
512 * FIXME: legacy server -- fall back to path-based call?
513 * for now, just skip revalidating and mark inode for
514 * immediate reval.
515 */
516 rc = 0;
517 CIFS_I(inode)->time = 0;
518 goto cgfi_exit;
519 } else if (rc == -EREMOTE) {
520 cifs_create_dfs_fattr(&fattr, inode->i_sb);
521 rc = 0;
522 } else if (rc)
523 goto cgfi_exit;
524
525 /*
526 * don't bother with SFU junk here -- just mark inode as needing
527 * revalidation.
528 */
529 cifs_all_info_to_fattr(&fattr, &find_data, cifs_sb, false);
530 fattr.cf_uniqueid = CIFS_I(inode)->uniqueid;
531 fattr.cf_flags |= CIFS_FATTR_NEED_REVAL;
532 cifs_fattr_to_inode(inode, &fattr);
533cgfi_exit:
534 FreeXid(xid);
535 return rc;
536}
537
435int cifs_get_inode_info(struct inode **pinode, 538int cifs_get_inode_info(struct inode **pinode,
436 const unsigned char *full_path, FILE_ALL_INFO *pfindData, 539 const unsigned char *full_path, FILE_ALL_INFO *pfindData,
437 struct super_block *sb, int xid, const __u16 *pfid) 540 struct super_block *sb, int xid, const __u16 *pfid)
@@ -1389,135 +1492,103 @@ cifs_rename_exit:
1389 return rc; 1492 return rc;
1390} 1493}
1391 1494
1392int cifs_revalidate(struct dentry *direntry) 1495static bool
1496cifs_inode_needs_reval(struct inode *inode)
1393{ 1497{
1394 int xid; 1498 struct cifsInodeInfo *cifs_i = CIFS_I(inode);
1395 int rc = 0, wbrc = 0;
1396 char *full_path;
1397 struct cifs_sb_info *cifs_sb;
1398 struct cifsInodeInfo *cifsInode;
1399 loff_t local_size;
1400 struct timespec local_mtime;
1401 bool invalidate_inode = false;
1402 1499
1403 if (direntry->d_inode == NULL) 1500 if (cifs_i->clientCanCacheRead)
1404 return -ENOENT; 1501 return false;
1405 1502
1406 cifsInode = CIFS_I(direntry->d_inode); 1503 if (!lookupCacheEnabled)
1504 return true;
1407 1505
1408 if (cifsInode == NULL) 1506 if (cifs_i->time == 0)
1409 return -ENOENT; 1507 return true;
1410 1508
1411 /* no sense revalidating inode info on file that no one can write */ 1509 /* FIXME: the actimeo should be tunable */
1412 if (CIFS_I(direntry->d_inode)->clientCanCacheRead) 1510 if (time_after_eq(jiffies, cifs_i->time + HZ))
1413 return rc; 1511 return true;
1512
1513 return false;
1514}
1515
1516/* check invalid_mapping flag and zap the cache if it's set */
1517static void
1518cifs_invalidate_mapping(struct inode *inode)
1519{
1520 int rc;
1521 struct cifsInodeInfo *cifs_i = CIFS_I(inode);
1522
1523 cifs_i->invalid_mapping = false;
1524
1525 /* write back any cached data */
1526 if (inode->i_mapping && inode->i_mapping->nrpages != 0) {
1527 rc = filemap_write_and_wait(inode->i_mapping);
1528 if (rc)
1529 cifs_i->write_behind_rc = rc;
1530 }
1531 invalidate_remote_inode(inode);
1532}
1533
1534int cifs_revalidate_file(struct file *filp)
1535{
1536 int rc = 0;
1537 struct inode *inode = filp->f_path.dentry->d_inode;
1538
1539 if (!cifs_inode_needs_reval(inode))
1540 goto check_inval;
1541
1542 if (CIFS_SB(inode->i_sb)->tcon->unix_ext)
1543 rc = cifs_get_file_info_unix(filp);
1544 else
1545 rc = cifs_get_file_info(filp);
1546
1547check_inval:
1548 if (CIFS_I(inode)->invalid_mapping)
1549 cifs_invalidate_mapping(inode);
1550
1551 return rc;
1552}
1553
1554/* revalidate a dentry's inode attributes */
1555int cifs_revalidate_dentry(struct dentry *dentry)
1556{
1557 int xid;
1558 int rc = 0;
1559 char *full_path = NULL;
1560 struct inode *inode = dentry->d_inode;
1561 struct super_block *sb = dentry->d_sb;
1562
1563 if (inode == NULL)
1564 return -ENOENT;
1414 1565
1415 xid = GetXid(); 1566 xid = GetXid();
1416 1567
1417 cifs_sb = CIFS_SB(direntry->d_sb); 1568 if (!cifs_inode_needs_reval(inode))
1569 goto check_inval;
1418 1570
1419 /* can not safely grab the rename sem here if rename calls revalidate 1571 /* can not safely grab the rename sem here if rename calls revalidate
1420 since that would deadlock */ 1572 since that would deadlock */
1421 full_path = build_path_from_dentry(direntry); 1573 full_path = build_path_from_dentry(dentry);
1422 if (full_path == NULL) { 1574 if (full_path == NULL) {
1423 rc = -ENOMEM; 1575 rc = -ENOMEM;
1424 FreeXid(xid); 1576 goto check_inval;
1425 return rc;
1426 }
1427 cFYI(1, ("Revalidate: %s inode 0x%p count %d dentry: 0x%p d_time %ld "
1428 "jiffies %ld", full_path, direntry->d_inode,
1429 direntry->d_inode->i_count.counter, direntry,
1430 direntry->d_time, jiffies));
1431
1432 if (cifsInode->time == 0) {
1433 /* was set to zero previously to force revalidate */
1434 } else if (time_before(jiffies, cifsInode->time + HZ) &&
1435 lookupCacheEnabled) {
1436 if ((S_ISREG(direntry->d_inode->i_mode) == 0) ||
1437 (direntry->d_inode->i_nlink == 1)) {
1438 kfree(full_path);
1439 FreeXid(xid);
1440 return rc;
1441 } else {
1442 cFYI(1, ("Have to revalidate file due to hardlinks"));
1443 }
1444 }
1445
1446 /* save mtime and size */
1447 local_mtime = direntry->d_inode->i_mtime;
1448 local_size = direntry->d_inode->i_size;
1449
1450 if (cifs_sb->tcon->unix_ext) {
1451 rc = cifs_get_inode_info_unix(&direntry->d_inode, full_path,
1452 direntry->d_sb, xid);
1453 if (rc) {
1454 cFYI(1, ("error on getting revalidate info %d", rc));
1455/* if (rc != -ENOENT)
1456 rc = 0; */ /* BB should we cache info on
1457 certain errors? */
1458 }
1459 } else {
1460 rc = cifs_get_inode_info(&direntry->d_inode, full_path, NULL,
1461 direntry->d_sb, xid, NULL);
1462 if (rc) {
1463 cFYI(1, ("error on getting revalidate info %d", rc));
1464/* if (rc != -ENOENT)
1465 rc = 0; */ /* BB should we cache info on
1466 certain errors? */
1467 }
1468 } 1577 }
1469 /* should we remap certain errors, access denied?, to zero */
1470 1578
1471 /* if not oplocked, we invalidate inode pages if mtime or file size 1579 cFYI(1, ("Revalidate: %s inode 0x%p count %d dentry: 0x%p d_time %ld "
1472 had changed on server */ 1580 "jiffies %ld", full_path, inode, inode->i_count.counter,
1581 dentry, dentry->d_time, jiffies));
1473 1582
1474 if (timespec_equal(&local_mtime, &direntry->d_inode->i_mtime) && 1583 if (CIFS_SB(sb)->tcon->unix_ext)
1475 (local_size == direntry->d_inode->i_size)) { 1584 rc = cifs_get_inode_info_unix(&inode, full_path, sb, xid);
1476 cFYI(1, ("cifs_revalidate - inode unchanged")); 1585 else
1477 } else { 1586 rc = cifs_get_inode_info(&inode, full_path, NULL, sb,
1478 /* file may have changed on server */ 1587 xid, NULL);
1479 if (cifsInode->clientCanCacheRead) {
1480 /* no need to invalidate inode pages since we were the
1481 only ones who could have modified the file and the
1482 server copy is staler than ours */
1483 } else {
1484 invalidate_inode = true;
1485 }
1486 }
1487 1588
1488 /* can not grab this sem since kernel filesys locking documentation 1589check_inval:
1489 indicates i_mutex may be taken by the kernel on lookup and rename 1590 if (CIFS_I(inode)->invalid_mapping)
1490 which could deadlock if we grab the i_mutex here as well */ 1591 cifs_invalidate_mapping(inode);
1491/* mutex_lock(&direntry->d_inode->i_mutex);*/
1492 /* need to write out dirty pages here */
1493 if (direntry->d_inode->i_mapping) {
1494 /* do we need to lock inode until after invalidate completes
1495 below? */
1496 wbrc = filemap_fdatawrite(direntry->d_inode->i_mapping);
1497 if (wbrc)
1498 CIFS_I(direntry->d_inode)->write_behind_rc = wbrc;
1499 }
1500 if (invalidate_inode) {
1501 /* shrink_dcache not necessary now that cifs dentry ops
1502 are exported for negative dentries */
1503/* if (S_ISDIR(direntry->d_inode->i_mode))
1504 shrink_dcache_parent(direntry); */
1505 if (S_ISREG(direntry->d_inode->i_mode)) {
1506 if (direntry->d_inode->i_mapping) {
1507 wbrc = filemap_fdatawait(direntry->d_inode->i_mapping);
1508 if (wbrc)
1509 CIFS_I(direntry->d_inode)->write_behind_rc = wbrc;
1510 }
1511 /* may eventually have to do this for open files too */
1512 if (list_empty(&(cifsInode->openFileList))) {
1513 /* changed on server - flush read ahead pages */
1514 cFYI(1, ("Invalidating read ahead data on "
1515 "closed file"));
1516 invalidate_remote_inode(direntry->d_inode);
1517 }
1518 }
1519 }
1520/* mutex_unlock(&direntry->d_inode->i_mutex); */
1521 1592
1522 kfree(full_path); 1593 kfree(full_path);
1523 FreeXid(xid); 1594 FreeXid(xid);
@@ -1527,7 +1598,7 @@ int cifs_revalidate(struct dentry *direntry)
1527int cifs_getattr(struct vfsmount *mnt, struct dentry *dentry, 1598int cifs_getattr(struct vfsmount *mnt, struct dentry *dentry,
1528 struct kstat *stat) 1599 struct kstat *stat)
1529{ 1600{
1530 int err = cifs_revalidate(dentry); 1601 int err = cifs_revalidate_dentry(dentry);
1531 if (!err) { 1602 if (!err) {
1532 generic_fillattr(dentry->d_inode, stat); 1603 generic_fillattr(dentry->d_inode, stat);
1533 stat->blksize = CIFS_MAX_MSGSIZE; 1604 stat->blksize = CIFS_MAX_MSGSIZE;
diff --git a/fs/jffs2/readinode.c b/fs/jffs2/readinode.c
index e22de8397b74..d32ee9412cb9 100644
--- a/fs/jffs2/readinode.c
+++ b/fs/jffs2/readinode.c
@@ -567,7 +567,7 @@ static void jffs2_free_tmp_dnode_info_list(struct rb_root *list)
567 else BUG(); 567 else BUG();
568 } 568 }
569 } 569 }
570 list->rb_node = NULL; 570 *list = RB_ROOT;
571} 571}
572 572
573static void jffs2_free_full_dirent_list(struct jffs2_full_dirent *fd) 573static void jffs2_free_full_dirent_list(struct jffs2_full_dirent *fd)
diff --git a/fs/nfs/callback_xdr.c b/fs/nfs/callback_xdr.c
index db30c0b398b5..a2b8b4df125d 100644
--- a/fs/nfs/callback_xdr.c
+++ b/fs/nfs/callback_xdr.c
@@ -782,6 +782,7 @@ struct svc_version nfs4_callback_version1 = {
782 .vs_proc = nfs4_callback_procedures1, 782 .vs_proc = nfs4_callback_procedures1,
783 .vs_xdrsize = NFS4_CALLBACK_XDRSIZE, 783 .vs_xdrsize = NFS4_CALLBACK_XDRSIZE,
784 .vs_dispatch = NULL, 784 .vs_dispatch = NULL,
785 .vs_hidden = 1,
785}; 786};
786 787
787struct svc_version nfs4_callback_version4 = { 788struct svc_version nfs4_callback_version4 = {
diff --git a/fs/nfs/delegation.h b/fs/nfs/delegation.h
index 944b627ec6e1..69e7b8140122 100644
--- a/fs/nfs/delegation.h
+++ b/fs/nfs/delegation.h
@@ -71,4 +71,10 @@ static inline int nfs_inode_return_delegation(struct inode *inode)
71} 71}
72#endif 72#endif
73 73
74static inline int nfs_have_delegated_attributes(struct inode *inode)
75{
76 return nfs_have_delegation(inode, FMODE_READ) &&
77 !(NFS_I(inode)->cache_validity & NFS_INO_REVAL_FORCED);
78}
79
74#endif 80#endif
diff --git a/fs/nfs/dir.c b/fs/nfs/dir.c
index a1f6b4438fb1..c6f2750648f4 100644
--- a/fs/nfs/dir.c
+++ b/fs/nfs/dir.c
@@ -1789,7 +1789,7 @@ static int nfs_access_get_cached(struct inode *inode, struct rpc_cred *cred, str
1789 cache = nfs_access_search_rbtree(inode, cred); 1789 cache = nfs_access_search_rbtree(inode, cred);
1790 if (cache == NULL) 1790 if (cache == NULL)
1791 goto out; 1791 goto out;
1792 if (!nfs_have_delegation(inode, FMODE_READ) && 1792 if (!nfs_have_delegated_attributes(inode) &&
1793 !time_in_range_open(jiffies, cache->jiffies, cache->jiffies + nfsi->attrtimeo)) 1793 !time_in_range_open(jiffies, cache->jiffies, cache->jiffies + nfsi->attrtimeo))
1794 goto out_stale; 1794 goto out_stale;
1795 res->jiffies = cache->jiffies; 1795 res->jiffies = cache->jiffies;
diff --git a/fs/nfs/inode.c b/fs/nfs/inode.c
index 657201acda84..e358df75a6ad 100644
--- a/fs/nfs/inode.c
+++ b/fs/nfs/inode.c
@@ -729,7 +729,7 @@ int nfs_attribute_timeout(struct inode *inode)
729{ 729{
730 struct nfs_inode *nfsi = NFS_I(inode); 730 struct nfs_inode *nfsi = NFS_I(inode);
731 731
732 if (nfs_have_delegation(inode, FMODE_READ)) 732 if (nfs_have_delegated_attributes(inode))
733 return 0; 733 return 0;
734 return !time_in_range_open(jiffies, nfsi->read_cache_jiffies, nfsi->read_cache_jiffies + nfsi->attrtimeo); 734 return !time_in_range_open(jiffies, nfsi->read_cache_jiffies, nfsi->read_cache_jiffies + nfsi->attrtimeo);
735} 735}
diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c
index eda74c42d552..f9254fb0c9d0 100644
--- a/fs/nfs/nfs4proc.c
+++ b/fs/nfs/nfs4proc.c
@@ -5107,6 +5107,7 @@ static int nfs41_proc_async_sequence(struct nfs_client *clp,
5107 res = kzalloc(sizeof(*res), GFP_KERNEL); 5107 res = kzalloc(sizeof(*res), GFP_KERNEL);
5108 if (!args || !res) { 5108 if (!args || !res) {
5109 kfree(args); 5109 kfree(args);
5110 kfree(res);
5110 nfs_put_client(clp); 5111 nfs_put_client(clp);
5111 return -ENOMEM; 5112 return -ENOMEM;
5112 } 5113 }
diff --git a/fs/nfs/pagelist.c b/fs/nfs/pagelist.c
index a12c45b65dd4..29d9d36cd5f4 100644
--- a/fs/nfs/pagelist.c
+++ b/fs/nfs/pagelist.c
@@ -112,12 +112,10 @@ void nfs_unlock_request(struct nfs_page *req)
112 */ 112 */
113int nfs_set_page_tag_locked(struct nfs_page *req) 113int nfs_set_page_tag_locked(struct nfs_page *req)
114{ 114{
115 struct nfs_inode *nfsi = NFS_I(req->wb_context->path.dentry->d_inode);
116
117 if (!nfs_lock_request_dontget(req)) 115 if (!nfs_lock_request_dontget(req))
118 return 0; 116 return 0;
119 if (req->wb_page != NULL) 117 if (req->wb_page != NULL)
120 radix_tree_tag_set(&nfsi->nfs_page_tree, req->wb_index, NFS_PAGE_TAG_LOCKED); 118 radix_tree_tag_set(&NFS_I(req->wb_context->path.dentry->d_inode)->nfs_page_tree, req->wb_index, NFS_PAGE_TAG_LOCKED);
121 return 1; 119 return 1;
122} 120}
123 121
@@ -126,10 +124,10 @@ int nfs_set_page_tag_locked(struct nfs_page *req)
126 */ 124 */
127void nfs_clear_page_tag_locked(struct nfs_page *req) 125void nfs_clear_page_tag_locked(struct nfs_page *req)
128{ 126{
129 struct inode *inode = req->wb_context->path.dentry->d_inode;
130 struct nfs_inode *nfsi = NFS_I(inode);
131
132 if (req->wb_page != NULL) { 127 if (req->wb_page != NULL) {
128 struct inode *inode = req->wb_context->path.dentry->d_inode;
129 struct nfs_inode *nfsi = NFS_I(inode);
130
133 spin_lock(&inode->i_lock); 131 spin_lock(&inode->i_lock);
134 radix_tree_tag_clear(&nfsi->nfs_page_tree, req->wb_index, NFS_PAGE_TAG_LOCKED); 132 radix_tree_tag_clear(&nfsi->nfs_page_tree, req->wb_index, NFS_PAGE_TAG_LOCKED);
135 nfs_unlock_request(req); 133 nfs_unlock_request(req);
@@ -142,16 +140,22 @@ void nfs_clear_page_tag_locked(struct nfs_page *req)
142 * nfs_clear_request - Free up all resources allocated to the request 140 * nfs_clear_request - Free up all resources allocated to the request
143 * @req: 141 * @req:
144 * 142 *
145 * Release page resources associated with a write request after it 143 * Release page and open context resources associated with a read/write
146 * has completed. 144 * request after it has completed.
147 */ 145 */
148void nfs_clear_request(struct nfs_page *req) 146void nfs_clear_request(struct nfs_page *req)
149{ 147{
150 struct page *page = req->wb_page; 148 struct page *page = req->wb_page;
149 struct nfs_open_context *ctx = req->wb_context;
150
151 if (page != NULL) { 151 if (page != NULL) {
152 page_cache_release(page); 152 page_cache_release(page);
153 req->wb_page = NULL; 153 req->wb_page = NULL;
154 } 154 }
155 if (ctx != NULL) {
156 put_nfs_open_context(ctx);
157 req->wb_context = NULL;
158 }
155} 159}
156 160
157 161
@@ -165,9 +169,8 @@ static void nfs_free_request(struct kref *kref)
165{ 169{
166 struct nfs_page *req = container_of(kref, struct nfs_page, wb_kref); 170 struct nfs_page *req = container_of(kref, struct nfs_page, wb_kref);
167 171
168 /* Release struct file or cached credential */ 172 /* Release struct file and open context */
169 nfs_clear_request(req); 173 nfs_clear_request(req);
170 put_nfs_open_context(req->wb_context);
171 nfs_page_free(req); 174 nfs_page_free(req);
172} 175}
173 176
diff --git a/fs/nfs/super.c b/fs/nfs/super.c
index f1afee4eea77..6baf9a393466 100644
--- a/fs/nfs/super.c
+++ b/fs/nfs/super.c
@@ -2214,7 +2214,7 @@ static int nfs_get_sb(struct file_system_type *fs_type,
2214 } else { 2214 } else {
2215 error = nfs_bdi_register(server); 2215 error = nfs_bdi_register(server);
2216 if (error) 2216 if (error)
2217 goto error_splat_super; 2217 goto error_splat_bdi;
2218 } 2218 }
2219 2219
2220 if (!s->s_root) { 2220 if (!s->s_root) {
@@ -2256,6 +2256,9 @@ out_err_nosb:
2256error_splat_root: 2256error_splat_root:
2257 dput(mntroot); 2257 dput(mntroot);
2258error_splat_super: 2258error_splat_super:
2259 if (server && !s->s_root)
2260 bdi_unregister(&server->backing_dev_info);
2261error_splat_bdi:
2259 deactivate_locked_super(s); 2262 deactivate_locked_super(s);
2260 goto out; 2263 goto out;
2261} 2264}
@@ -2326,7 +2329,7 @@ static int nfs_xdev_get_sb(struct file_system_type *fs_type, int flags,
2326 } else { 2329 } else {
2327 error = nfs_bdi_register(server); 2330 error = nfs_bdi_register(server);
2328 if (error) 2331 if (error)
2329 goto error_splat_super; 2332 goto error_splat_bdi;
2330 } 2333 }
2331 2334
2332 if (!s->s_root) { 2335 if (!s->s_root) {
@@ -2363,6 +2366,9 @@ out_err_noserver:
2363 return error; 2366 return error;
2364 2367
2365error_splat_super: 2368error_splat_super:
2369 if (server && !s->s_root)
2370 bdi_unregister(&server->backing_dev_info);
2371error_splat_bdi:
2366 deactivate_locked_super(s); 2372 deactivate_locked_super(s);
2367 dprintk("<-- nfs_xdev_get_sb() = %d [splat]\n", error); 2373 dprintk("<-- nfs_xdev_get_sb() = %d [splat]\n", error);
2368 return error; 2374 return error;
@@ -2578,7 +2584,7 @@ static int nfs4_remote_get_sb(struct file_system_type *fs_type,
2578 } else { 2584 } else {
2579 error = nfs_bdi_register(server); 2585 error = nfs_bdi_register(server);
2580 if (error) 2586 if (error)
2581 goto error_splat_super; 2587 goto error_splat_bdi;
2582 } 2588 }
2583 2589
2584 if (!s->s_root) { 2590 if (!s->s_root) {
@@ -2616,6 +2622,9 @@ out_free:
2616error_splat_root: 2622error_splat_root:
2617 dput(mntroot); 2623 dput(mntroot);
2618error_splat_super: 2624error_splat_super:
2625 if (server && !s->s_root)
2626 bdi_unregister(&server->backing_dev_info);
2627error_splat_bdi:
2619 deactivate_locked_super(s); 2628 deactivate_locked_super(s);
2620 goto out; 2629 goto out;
2621} 2630}
@@ -2811,7 +2820,7 @@ static int nfs4_xdev_get_sb(struct file_system_type *fs_type, int flags,
2811 } else { 2820 } else {
2812 error = nfs_bdi_register(server); 2821 error = nfs_bdi_register(server);
2813 if (error) 2822 if (error)
2814 goto error_splat_super; 2823 goto error_splat_bdi;
2815 } 2824 }
2816 2825
2817 if (!s->s_root) { 2826 if (!s->s_root) {
@@ -2847,6 +2856,9 @@ out_err_noserver:
2847 return error; 2856 return error;
2848 2857
2849error_splat_super: 2858error_splat_super:
2859 if (server && !s->s_root)
2860 bdi_unregister(&server->backing_dev_info);
2861error_splat_bdi:
2850 deactivate_locked_super(s); 2862 deactivate_locked_super(s);
2851 dprintk("<-- nfs4_xdev_get_sb() = %d [splat]\n", error); 2863 dprintk("<-- nfs4_xdev_get_sb() = %d [splat]\n", error);
2852 return error; 2864 return error;
@@ -2893,7 +2905,7 @@ static int nfs4_remote_referral_get_sb(struct file_system_type *fs_type,
2893 } else { 2905 } else {
2894 error = nfs_bdi_register(server); 2906 error = nfs_bdi_register(server);
2895 if (error) 2907 if (error)
2896 goto error_splat_super; 2908 goto error_splat_bdi;
2897 } 2909 }
2898 2910
2899 if (!s->s_root) { 2911 if (!s->s_root) {
@@ -2929,6 +2941,9 @@ out_err_noserver:
2929 return error; 2941 return error;
2930 2942
2931error_splat_super: 2943error_splat_super:
2944 if (server && !s->s_root)
2945 bdi_unregister(&server->backing_dev_info);
2946error_splat_bdi:
2932 deactivate_locked_super(s); 2947 deactivate_locked_super(s);
2933 dprintk("<-- nfs4_referral_get_sb() = %d [splat]\n", error); 2948 dprintk("<-- nfs4_referral_get_sb() = %d [splat]\n", error);
2934 return error; 2949 return error;
diff --git a/fs/ntfs/super.c b/fs/ntfs/super.c
index 1cf39dfaee7a..0de1db6cddbf 100644
--- a/fs/ntfs/super.c
+++ b/fs/ntfs/super.c
@@ -31,6 +31,7 @@
31#include <linux/vfs.h> 31#include <linux/vfs.h>
32#include <linux/moduleparam.h> 32#include <linux/moduleparam.h>
33#include <linux/smp_lock.h> 33#include <linux/smp_lock.h>
34#include <linux/bitmap.h>
34 35
35#include "sysctl.h" 36#include "sysctl.h"
36#include "logfile.h" 37#include "logfile.h"
@@ -2458,7 +2459,6 @@ static void ntfs_put_super(struct super_block *sb)
2458static s64 get_nr_free_clusters(ntfs_volume *vol) 2459static s64 get_nr_free_clusters(ntfs_volume *vol)
2459{ 2460{
2460 s64 nr_free = vol->nr_clusters; 2461 s64 nr_free = vol->nr_clusters;
2461 u32 *kaddr;
2462 struct address_space *mapping = vol->lcnbmp_ino->i_mapping; 2462 struct address_space *mapping = vol->lcnbmp_ino->i_mapping;
2463 struct page *page; 2463 struct page *page;
2464 pgoff_t index, max_index; 2464 pgoff_t index, max_index;
@@ -2477,7 +2477,8 @@ static s64 get_nr_free_clusters(ntfs_volume *vol)
2477 ntfs_debug("Reading $Bitmap, max_index = 0x%lx, max_size = 0x%lx.", 2477 ntfs_debug("Reading $Bitmap, max_index = 0x%lx, max_size = 0x%lx.",
2478 max_index, PAGE_CACHE_SIZE / 4); 2478 max_index, PAGE_CACHE_SIZE / 4);
2479 for (index = 0; index < max_index; index++) { 2479 for (index = 0; index < max_index; index++) {
2480 unsigned int i; 2480 unsigned long *kaddr;
2481
2481 /* 2482 /*
2482 * Read the page from page cache, getting it from backing store 2483 * Read the page from page cache, getting it from backing store
2483 * if necessary, and increment the use count. 2484 * if necessary, and increment the use count.
@@ -2490,16 +2491,16 @@ static s64 get_nr_free_clusters(ntfs_volume *vol)
2490 nr_free -= PAGE_CACHE_SIZE * 8; 2491 nr_free -= PAGE_CACHE_SIZE * 8;
2491 continue; 2492 continue;
2492 } 2493 }
2493 kaddr = (u32*)kmap_atomic(page, KM_USER0); 2494 kaddr = kmap_atomic(page, KM_USER0);
2494 /* 2495 /*
2495 * For each 4 bytes, subtract the number of set bits. If this 2496 * Subtract the number of set bits. If this
2496 * is the last page and it is partial we don't really care as 2497 * is the last page and it is partial we don't really care as
2497 * it just means we do a little extra work but it won't affect 2498 * it just means we do a little extra work but it won't affect
2498 * the result as all out of range bytes are set to zero by 2499 * the result as all out of range bytes are set to zero by
2499 * ntfs_readpage(). 2500 * ntfs_readpage().
2500 */ 2501 */
2501 for (i = 0; i < PAGE_CACHE_SIZE / 4; i++) 2502 nr_free -= bitmap_weight(kaddr,
2502 nr_free -= (s64)hweight32(kaddr[i]); 2503 PAGE_CACHE_SIZE * BITS_PER_BYTE);
2503 kunmap_atomic(kaddr, KM_USER0); 2504 kunmap_atomic(kaddr, KM_USER0);
2504 page_cache_release(page); 2505 page_cache_release(page);
2505 } 2506 }
@@ -2538,7 +2539,6 @@ static s64 get_nr_free_clusters(ntfs_volume *vol)
2538static unsigned long __get_nr_free_mft_records(ntfs_volume *vol, 2539static unsigned long __get_nr_free_mft_records(ntfs_volume *vol,
2539 s64 nr_free, const pgoff_t max_index) 2540 s64 nr_free, const pgoff_t max_index)
2540{ 2541{
2541 u32 *kaddr;
2542 struct address_space *mapping = vol->mftbmp_ino->i_mapping; 2542 struct address_space *mapping = vol->mftbmp_ino->i_mapping;
2543 struct page *page; 2543 struct page *page;
2544 pgoff_t index; 2544 pgoff_t index;
@@ -2548,7 +2548,8 @@ static unsigned long __get_nr_free_mft_records(ntfs_volume *vol,
2548 ntfs_debug("Reading $MFT/$BITMAP, max_index = 0x%lx, max_size = " 2548 ntfs_debug("Reading $MFT/$BITMAP, max_index = 0x%lx, max_size = "
2549 "0x%lx.", max_index, PAGE_CACHE_SIZE / 4); 2549 "0x%lx.", max_index, PAGE_CACHE_SIZE / 4);
2550 for (index = 0; index < max_index; index++) { 2550 for (index = 0; index < max_index; index++) {
2551 unsigned int i; 2551 unsigned long *kaddr;
2552
2552 /* 2553 /*
2553 * Read the page from page cache, getting it from backing store 2554 * Read the page from page cache, getting it from backing store
2554 * if necessary, and increment the use count. 2555 * if necessary, and increment the use count.
@@ -2561,16 +2562,16 @@ static unsigned long __get_nr_free_mft_records(ntfs_volume *vol,
2561 nr_free -= PAGE_CACHE_SIZE * 8; 2562 nr_free -= PAGE_CACHE_SIZE * 8;
2562 continue; 2563 continue;
2563 } 2564 }
2564 kaddr = (u32*)kmap_atomic(page, KM_USER0); 2565 kaddr = kmap_atomic(page, KM_USER0);
2565 /* 2566 /*
2566 * For each 4 bytes, subtract the number of set bits. If this 2567 * Subtract the number of set bits. If this
2567 * is the last page and it is partial we don't really care as 2568 * is the last page and it is partial we don't really care as
2568 * it just means we do a little extra work but it won't affect 2569 * it just means we do a little extra work but it won't affect
2569 * the result as all out of range bytes are set to zero by 2570 * the result as all out of range bytes are set to zero by
2570 * ntfs_readpage(). 2571 * ntfs_readpage().
2571 */ 2572 */
2572 for (i = 0; i < PAGE_CACHE_SIZE / 4; i++) 2573 nr_free -= bitmap_weight(kaddr,
2573 nr_free -= (s64)hweight32(kaddr[i]); 2574 PAGE_CACHE_SIZE * BITS_PER_BYTE);
2574 kunmap_atomic(kaddr, KM_USER0); 2575 kunmap_atomic(kaddr, KM_USER0);
2575 page_cache_release(page); 2576 page_cache_release(page);
2576 } 2577 }
diff --git a/fs/xfs/linux-2.6/xfs_aops.c b/fs/xfs/linux-2.6/xfs_aops.c
index 9083357f9e44..99628508cb11 100644
--- a/fs/xfs/linux-2.6/xfs_aops.c
+++ b/fs/xfs/linux-2.6/xfs_aops.c
@@ -932,6 +932,9 @@ xfs_aops_discard_page(
932 if (!xfs_is_delayed_page(page, IOMAP_DELAY)) 932 if (!xfs_is_delayed_page(page, IOMAP_DELAY))
933 goto out_invalidate; 933 goto out_invalidate;
934 934
935 if (XFS_FORCED_SHUTDOWN(ip->i_mount))
936 goto out_invalidate;
937
935 xfs_fs_cmn_err(CE_ALERT, ip->i_mount, 938 xfs_fs_cmn_err(CE_ALERT, ip->i_mount,
936 "page discard on page %p, inode 0x%llx, offset %llu.", 939 "page discard on page %p, inode 0x%llx, offset %llu.",
937 page, ip->i_ino, offset); 940 page, ip->i_ino, offset);
@@ -964,8 +967,10 @@ xfs_aops_discard_page(
964 967
965 if (error) { 968 if (error) {
966 /* something screwed, just bail */ 969 /* something screwed, just bail */
967 xfs_fs_cmn_err(CE_ALERT, ip->i_mount, 970 if (!XFS_FORCED_SHUTDOWN(ip->i_mount)) {
968 "page discard failed delalloc mapping lookup."); 971 xfs_fs_cmn_err(CE_ALERT, ip->i_mount,
972 "page discard failed delalloc mapping lookup.");
973 }
969 break; 974 break;
970 } 975 }
971 if (!nimaps) { 976 if (!nimaps) {
@@ -991,8 +996,10 @@ xfs_aops_discard_page(
991 ASSERT(!flist.xbf_count && !flist.xbf_first); 996 ASSERT(!flist.xbf_count && !flist.xbf_first);
992 if (error) { 997 if (error) {
993 /* something screwed, just bail */ 998 /* something screwed, just bail */
994 xfs_fs_cmn_err(CE_ALERT, ip->i_mount, 999 if (!XFS_FORCED_SHUTDOWN(ip->i_mount)) {
1000 xfs_fs_cmn_err(CE_ALERT, ip->i_mount,
995 "page discard unable to remove delalloc mapping."); 1001 "page discard unable to remove delalloc mapping.");
1002 }
996 break; 1003 break;
997 } 1004 }
998next_buffer: 1005next_buffer:
diff --git a/fs/xfs/linux-2.6/xfs_buf.c b/fs/xfs/linux-2.6/xfs_buf.c
index 6f76ba85f193..bd111b7e1daa 100644
--- a/fs/xfs/linux-2.6/xfs_buf.c
+++ b/fs/xfs/linux-2.6/xfs_buf.c
@@ -168,75 +168,6 @@ test_page_region(
168} 168}
169 169
170/* 170/*
171 * Mapping of multi-page buffers into contiguous virtual space
172 */
173
174typedef struct a_list {
175 void *vm_addr;
176 struct a_list *next;
177} a_list_t;
178
179static a_list_t *as_free_head;
180static int as_list_len;
181static DEFINE_SPINLOCK(as_lock);
182
183/*
184 * Try to batch vunmaps because they are costly.
185 */
186STATIC void
187free_address(
188 void *addr)
189{
190 a_list_t *aentry;
191
192#ifdef CONFIG_XEN
193 /*
194 * Xen needs to be able to make sure it can get an exclusive
195 * RO mapping of pages it wants to turn into a pagetable. If
196 * a newly allocated page is also still being vmap()ed by xfs,
197 * it will cause pagetable construction to fail. This is a
198 * quick workaround to always eagerly unmap pages so that Xen
199 * is happy.
200 */
201 vunmap(addr);
202 return;
203#endif
204
205 aentry = kmalloc(sizeof(a_list_t), GFP_NOWAIT);
206 if (likely(aentry)) {
207 spin_lock(&as_lock);
208 aentry->next = as_free_head;
209 aentry->vm_addr = addr;
210 as_free_head = aentry;
211 as_list_len++;
212 spin_unlock(&as_lock);
213 } else {
214 vunmap(addr);
215 }
216}
217
218STATIC void
219purge_addresses(void)
220{
221 a_list_t *aentry, *old;
222
223 if (as_free_head == NULL)
224 return;
225
226 spin_lock(&as_lock);
227 aentry = as_free_head;
228 as_free_head = NULL;
229 as_list_len = 0;
230 spin_unlock(&as_lock);
231
232 while ((old = aentry) != NULL) {
233 vunmap(aentry->vm_addr);
234 aentry = aentry->next;
235 kfree(old);
236 }
237}
238
239/*
240 * Internal xfs_buf_t object manipulation 171 * Internal xfs_buf_t object manipulation
241 */ 172 */
242 173
@@ -337,7 +268,8 @@ xfs_buf_free(
337 uint i; 268 uint i;
338 269
339 if (xfs_buf_is_vmapped(bp)) 270 if (xfs_buf_is_vmapped(bp))
340 free_address(bp->b_addr - bp->b_offset); 271 vm_unmap_ram(bp->b_addr - bp->b_offset,
272 bp->b_page_count);
341 273
342 for (i = 0; i < bp->b_page_count; i++) { 274 for (i = 0; i < bp->b_page_count; i++) {
343 struct page *page = bp->b_pages[i]; 275 struct page *page = bp->b_pages[i];
@@ -457,10 +389,8 @@ _xfs_buf_map_pages(
457 bp->b_addr = page_address(bp->b_pages[0]) + bp->b_offset; 389 bp->b_addr = page_address(bp->b_pages[0]) + bp->b_offset;
458 bp->b_flags |= XBF_MAPPED; 390 bp->b_flags |= XBF_MAPPED;
459 } else if (flags & XBF_MAPPED) { 391 } else if (flags & XBF_MAPPED) {
460 if (as_list_len > 64) 392 bp->b_addr = vm_map_ram(bp->b_pages, bp->b_page_count,
461 purge_addresses(); 393 -1, PAGE_KERNEL);
462 bp->b_addr = vmap(bp->b_pages, bp->b_page_count,
463 VM_MAP, PAGE_KERNEL);
464 if (unlikely(bp->b_addr == NULL)) 394 if (unlikely(bp->b_addr == NULL))
465 return -ENOMEM; 395 return -ENOMEM;
466 bp->b_addr += bp->b_offset; 396 bp->b_addr += bp->b_offset;
@@ -1955,9 +1885,6 @@ xfsbufd(
1955 xfs_buf_iostrategy(bp); 1885 xfs_buf_iostrategy(bp);
1956 count++; 1886 count++;
1957 } 1887 }
1958
1959 if (as_list_len > 0)
1960 purge_addresses();
1961 if (count) 1888 if (count)
1962 blk_run_address_space(target->bt_mapping); 1889 blk_run_address_space(target->bt_mapping);
1963 1890
diff --git a/include/linux/amba/clcd.h b/include/linux/amba/clcd.h
index 29c0448265cf..ca16c3801a1e 100644
--- a/include/linux/amba/clcd.h
+++ b/include/linux/amba/clcd.h
@@ -21,22 +21,21 @@
21#define CLCD_UBAS 0x00000010 21#define CLCD_UBAS 0x00000010
22#define CLCD_LBAS 0x00000014 22#define CLCD_LBAS 0x00000014
23 23
24#if !defined(CONFIG_ARCH_VERSATILE) && !defined(CONFIG_ARCH_REALVIEW) 24#define CLCD_PL110_IENB 0x00000018
25#define CLCD_IENB 0x00000018 25#define CLCD_PL110_CNTL 0x0000001c
26#define CLCD_CNTL 0x0000001c 26#define CLCD_PL110_STAT 0x00000020
27#else 27#define CLCD_PL110_INTR 0x00000024
28/* 28#define CLCD_PL110_UCUR 0x00000028
29 * Someone rearranged these two registers on the Versatile 29#define CLCD_PL110_LCUR 0x0000002C
30 * platform... 30
31 */ 31#define CLCD_PL111_CNTL 0x00000018
32#define CLCD_IENB 0x0000001c 32#define CLCD_PL111_IENB 0x0000001c
33#define CLCD_CNTL 0x00000018 33#define CLCD_PL111_RIS 0x00000020
34#endif 34#define CLCD_PL111_MIS 0x00000024
35 35#define CLCD_PL111_ICR 0x00000028
36#define CLCD_STAT 0x00000020 36#define CLCD_PL111_UCUR 0x0000002c
37#define CLCD_INTR 0x00000024 37#define CLCD_PL111_LCUR 0x00000030
38#define CLCD_UCUR 0x00000028 38
39#define CLCD_LCUR 0x0000002C
40#define CLCD_PALL 0x00000200 39#define CLCD_PALL 0x00000200
41#define CLCD_PALETTE 0x00000200 40#define CLCD_PALETTE 0x00000200
42 41
@@ -147,6 +146,8 @@ struct clcd_fb {
147 struct clcd_board *board; 146 struct clcd_board *board;
148 void *board_data; 147 void *board_data;
149 void __iomem *regs; 148 void __iomem *regs;
149 u16 off_ienb;
150 u16 off_cntl;
150 u32 clcd_cntl; 151 u32 clcd_cntl;
151 u32 cmap[16]; 152 u32 cmap[16];
152}; 153};
diff --git a/include/linux/backlight.h b/include/linux/backlight.h
index 8c4f884db6b4..4a3d52e545e1 100644
--- a/include/linux/backlight.h
+++ b/include/linux/backlight.h
@@ -36,18 +36,18 @@ struct backlight_device;
36struct fb_info; 36struct fb_info;
37 37
38struct backlight_ops { 38struct backlight_ops {
39 const unsigned int options; 39 unsigned int options;
40 40
41#define BL_CORE_SUSPENDRESUME (1 << 0) 41#define BL_CORE_SUSPENDRESUME (1 << 0)
42 42
43 /* Notify the backlight driver some property has changed */ 43 /* Notify the backlight driver some property has changed */
44 int (* const update_status)(struct backlight_device *); 44 int (*update_status)(struct backlight_device *);
45 /* Return the current backlight brightness (accounting for power, 45 /* Return the current backlight brightness (accounting for power,
46 fb_blank etc.) */ 46 fb_blank etc.) */
47 int (* const get_brightness)(struct backlight_device *); 47 int (*get_brightness)(struct backlight_device *);
48 /* Check if given framebuffer device is the one bound to this backlight; 48 /* Check if given framebuffer device is the one bound to this backlight;
49 return 0 if not, !=0 if it is. If NULL, backlight always matches the fb. */ 49 return 0 if not, !=0 if it is. If NULL, backlight always matches the fb. */
50 int (* const check_fb)(struct fb_info *); 50 int (*check_fb)(struct backlight_device *, struct fb_info *);
51}; 51};
52 52
53/* This structure defines all the properties of a backlight */ 53/* This structure defines all the properties of a backlight */
@@ -103,7 +103,8 @@ static inline void backlight_update_status(struct backlight_device *bd)
103} 103}
104 104
105extern struct backlight_device *backlight_device_register(const char *name, 105extern struct backlight_device *backlight_device_register(const char *name,
106 struct device *dev, void *devdata, const struct backlight_ops *ops); 106 struct device *dev, void *devdata, const struct backlight_ops *ops,
107 const struct backlight_properties *props);
107extern void backlight_device_unregister(struct backlight_device *bd); 108extern void backlight_device_unregister(struct backlight_device *bd);
108extern void backlight_force_update(struct backlight_device *bd, 109extern void backlight_force_update(struct backlight_device *bd,
109 enum backlight_update_reason reason); 110 enum backlight_update_reason reason);
diff --git a/include/linux/ftrace_event.h b/include/linux/ftrace_event.h
index 6b7c444ab8f6..c0f4b364c711 100644
--- a/include/linux/ftrace_event.h
+++ b/include/linux/ftrace_event.h
@@ -131,12 +131,12 @@ struct ftrace_event_call {
131 void *mod; 131 void *mod;
132 void *data; 132 void *data;
133 133
134 int profile_count; 134 int perf_refcount;
135 int (*profile_enable)(struct ftrace_event_call *); 135 int (*perf_event_enable)(struct ftrace_event_call *);
136 void (*profile_disable)(struct ftrace_event_call *); 136 void (*perf_event_disable)(struct ftrace_event_call *);
137}; 137};
138 138
139#define FTRACE_MAX_PROFILE_SIZE 2048 139#define PERF_MAX_TRACE_SIZE 2048
140 140
141#define MAX_FILTER_PRED 32 141#define MAX_FILTER_PRED 32
142#define MAX_FILTER_STR_VAL 256 /* Should handle KSYM_SYMBOL_LEN */ 142#define MAX_FILTER_STR_VAL 256 /* Should handle KSYM_SYMBOL_LEN */
@@ -187,22 +187,25 @@ do { \
187 187
188#ifdef CONFIG_PERF_EVENTS 188#ifdef CONFIG_PERF_EVENTS
189struct perf_event; 189struct perf_event;
190extern int ftrace_profile_enable(int event_id); 190
191extern void ftrace_profile_disable(int event_id); 191DECLARE_PER_CPU(struct pt_regs, perf_trace_regs);
192
193extern int perf_trace_enable(int event_id);
194extern void perf_trace_disable(int event_id);
192extern int ftrace_profile_set_filter(struct perf_event *event, int event_id, 195extern int ftrace_profile_set_filter(struct perf_event *event, int event_id,
193 char *filter_str); 196 char *filter_str);
194extern void ftrace_profile_free_filter(struct perf_event *event); 197extern void ftrace_profile_free_filter(struct perf_event *event);
195extern void * 198extern void *
196ftrace_perf_buf_prepare(int size, unsigned short type, int *rctxp, 199perf_trace_buf_prepare(int size, unsigned short type, int *rctxp,
197 unsigned long *irq_flags); 200 unsigned long *irq_flags);
198 201
199static inline void 202static inline void
200ftrace_perf_buf_submit(void *raw_data, int size, int rctx, u64 addr, 203perf_trace_buf_submit(void *raw_data, int size, int rctx, u64 addr,
201 u64 count, unsigned long irq_flags) 204 u64 count, unsigned long irq_flags, struct pt_regs *regs)
202{ 205{
203 struct trace_entry *entry = raw_data; 206 struct trace_entry *entry = raw_data;
204 207
205 perf_tp_event(entry->type, addr, count, raw_data, size); 208 perf_tp_event(entry->type, addr, count, raw_data, size, regs);
206 perf_swevent_put_recursion_context(rctx); 209 perf_swevent_put_recursion_context(rctx);
207 local_irq_restore(irq_flags); 210 local_irq_restore(irq_flags);
208} 211}
diff --git a/include/linux/if_tunnel.h b/include/linux/if_tunnel.h
index 1822d635be6b..16b92d008bed 100644
--- a/include/linux/if_tunnel.h
+++ b/include/linux/if_tunnel.h
@@ -2,6 +2,7 @@
2#define _IF_TUNNEL_H_ 2#define _IF_TUNNEL_H_
3 3
4#include <linux/types.h> 4#include <linux/types.h>
5#include <asm/byteorder.h>
5 6
6#ifdef __KERNEL__ 7#ifdef __KERNEL__
7#include <linux/ip.h> 8#include <linux/ip.h>
diff --git a/include/linux/memory.h b/include/linux/memory.h
index 1adfe779eb99..85582e1bcee9 100644
--- a/include/linux/memory.h
+++ b/include/linux/memory.h
@@ -36,6 +36,8 @@ struct memory_block {
36 struct sys_device sysdev; 36 struct sys_device sysdev;
37}; 37};
38 38
39int arch_get_memory_phys_device(unsigned long start_pfn);
40
39/* These states are exposed to userspace as text strings in sysfs */ 41/* These states are exposed to userspace as text strings in sysfs */
40#define MEM_ONLINE (1<<0) /* exposed to userspace */ 42#define MEM_ONLINE (1<<0) /* exposed to userspace */
41#define MEM_GOING_OFFLINE (1<<1) /* exposed to userspace */ 43#define MEM_GOING_OFFLINE (1<<1) /* exposed to userspace */
diff --git a/include/linux/netfilter_ipv6.h b/include/linux/netfilter_ipv6.h
index d654873aa25a..1f7e300094cd 100644
--- a/include/linux/netfilter_ipv6.h
+++ b/include/linux/netfilter_ipv6.h
@@ -59,6 +59,7 @@
59enum nf_ip6_hook_priorities { 59enum nf_ip6_hook_priorities {
60 NF_IP6_PRI_FIRST = INT_MIN, 60 NF_IP6_PRI_FIRST = INT_MIN,
61 NF_IP6_PRI_CONNTRACK_DEFRAG = -400, 61 NF_IP6_PRI_CONNTRACK_DEFRAG = -400,
62 NF_IP6_PRI_RAW = -300,
62 NF_IP6_PRI_SELINUX_FIRST = -225, 63 NF_IP6_PRI_SELINUX_FIRST = -225,
63 NF_IP6_PRI_CONNTRACK = -200, 64 NF_IP6_PRI_CONNTRACK = -200,
64 NF_IP6_PRI_MANGLE = -150, 65 NF_IP6_PRI_MANGLE = -150,
diff --git a/include/linux/of.h b/include/linux/of.h
index f6d9cbc39c9c..a367e19bb3af 100644
--- a/include/linux/of.h
+++ b/include/linux/of.h
@@ -127,7 +127,7 @@ static inline unsigned long of_read_ulong(const __be32 *cell, int size)
127 127
128/* Default string compare functions, Allow arch asm/prom.h to override */ 128/* Default string compare functions, Allow arch asm/prom.h to override */
129#if !defined(of_compat_cmp) 129#if !defined(of_compat_cmp)
130#define of_compat_cmp(s1, s2, l) strncasecmp((s1), (s2), (l)) 130#define of_compat_cmp(s1, s2, l) strcasecmp((s1), (s2))
131#define of_prop_cmp(s1, s2) strcmp((s1), (s2)) 131#define of_prop_cmp(s1, s2) strcmp((s1), (s2))
132#define of_node_cmp(s1, s2) strcasecmp((s1), (s2)) 132#define of_node_cmp(s1, s2) strcasecmp((s1), (s2))
133#endif 133#endif
diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h
index 6f8cd7da1a01..95477038a72a 100644
--- a/include/linux/perf_event.h
+++ b/include/linux/perf_event.h
@@ -452,6 +452,8 @@ enum perf_callchain_context {
452#include <linux/fs.h> 452#include <linux/fs.h>
453#include <linux/pid_namespace.h> 453#include <linux/pid_namespace.h>
454#include <linux/workqueue.h> 454#include <linux/workqueue.h>
455#include <linux/ftrace.h>
456#include <linux/cpu.h>
455#include <asm/atomic.h> 457#include <asm/atomic.h>
456 458
457#define PERF_MAX_STACK_DEPTH 255 459#define PERF_MAX_STACK_DEPTH 255
@@ -847,6 +849,44 @@ perf_sw_event(u32 event_id, u64 nr, int nmi, struct pt_regs *regs, u64 addr)
847 __perf_sw_event(event_id, nr, nmi, regs, addr); 849 __perf_sw_event(event_id, nr, nmi, regs, addr);
848} 850}
849 851
852extern void
853perf_arch_fetch_caller_regs(struct pt_regs *regs, unsigned long ip, int skip);
854
855/*
856 * Take a snapshot of the regs. Skip ip and frame pointer to
857 * the nth caller. We only need a few of the regs:
858 * - ip for PERF_SAMPLE_IP
859 * - cs for user_mode() tests
860 * - bp for callchains
861 * - eflags, for future purposes, just in case
862 */
863static inline void perf_fetch_caller_regs(struct pt_regs *regs, int skip)
864{
865 unsigned long ip;
866
867 memset(regs, 0, sizeof(*regs));
868
869 switch (skip) {
870 case 1 :
871 ip = CALLER_ADDR0;
872 break;
873 case 2 :
874 ip = CALLER_ADDR1;
875 break;
876 case 3 :
877 ip = CALLER_ADDR2;
878 break;
879 case 4:
880 ip = CALLER_ADDR3;
881 break;
882 /* No need to support further for now */
883 default:
884 ip = 0;
885 }
886
887 return perf_arch_fetch_caller_regs(regs, ip, skip);
888}
889
850extern void __perf_event_mmap(struct vm_area_struct *vma); 890extern void __perf_event_mmap(struct vm_area_struct *vma);
851 891
852static inline void perf_event_mmap(struct vm_area_struct *vma) 892static inline void perf_event_mmap(struct vm_area_struct *vma)
@@ -880,7 +920,8 @@ static inline bool perf_paranoid_kernel(void)
880} 920}
881 921
882extern void perf_event_init(void); 922extern void perf_event_init(void);
883extern void perf_tp_event(int event_id, u64 addr, u64 count, void *record, int entry_size); 923extern void perf_tp_event(int event_id, u64 addr, u64 count, void *record,
924 int entry_size, struct pt_regs *regs);
884extern void perf_bp_event(struct perf_event *event, void *data); 925extern void perf_bp_event(struct perf_event *event, void *data);
885 926
886#ifndef perf_misc_flags 927#ifndef perf_misc_flags
@@ -936,5 +977,21 @@ static inline void perf_event_disable(struct perf_event *event) { }
936#define perf_output_put(handle, x) \ 977#define perf_output_put(handle, x) \
937 perf_output_copy((handle), &(x), sizeof(x)) 978 perf_output_copy((handle), &(x), sizeof(x))
938 979
980/*
981 * This has to have a higher priority than migration_notifier in sched.c.
982 */
983#define perf_cpu_notifier(fn) \
984do { \
985 static struct notifier_block fn##_nb __cpuinitdata = \
986 { .notifier_call = fn, .priority = 20 }; \
987 fn(&fn##_nb, (unsigned long)CPU_UP_PREPARE, \
988 (void *)(unsigned long)smp_processor_id()); \
989 fn(&fn##_nb, (unsigned long)CPU_STARTING, \
990 (void *)(unsigned long)smp_processor_id()); \
991 fn(&fn##_nb, (unsigned long)CPU_ONLINE, \
992 (void *)(unsigned long)smp_processor_id()); \
993 register_cpu_notifier(&fn##_nb); \
994} while (0)
995
939#endif /* __KERNEL__ */ 996#endif /* __KERNEL__ */
940#endif /* _LINUX_PERF_EVENT_H */ 997#endif /* _LINUX_PERF_EVENT_H */
diff --git a/include/linux/serial_sci.h b/include/linux/serial_sci.h
index 1b177d29a7f0..193d4bfe42ff 100644
--- a/include/linux/serial_sci.h
+++ b/include/linux/serial_sci.h
@@ -2,7 +2,9 @@
2#define __LINUX_SERIAL_SCI_H 2#define __LINUX_SERIAL_SCI_H
3 3
4#include <linux/serial_core.h> 4#include <linux/serial_core.h>
5#ifdef CONFIG_SERIAL_SH_SCI_DMA
5#include <asm/dmaengine.h> 6#include <asm/dmaengine.h>
7#endif
6 8
7/* 9/*
8 * Generic header for SuperH SCI(F) (used by sh/sh64/h8300 and related parts) 10 * Generic header for SuperH SCI(F) (used by sh/sh64/h8300 and related parts)
@@ -30,8 +32,10 @@ struct plat_sci_port {
30 upf_t flags; /* UPF_* flags */ 32 upf_t flags; /* UPF_* flags */
31 char *clk; /* clock string */ 33 char *clk; /* clock string */
32 struct device *dma_dev; 34 struct device *dma_dev;
35#ifdef CONFIG_SERIAL_SH_SCI_DMA
33 enum sh_dmae_slave_chan_id dma_slave_tx; 36 enum sh_dmae_slave_chan_id dma_slave_tx;
34 enum sh_dmae_slave_chan_id dma_slave_rx; 37 enum sh_dmae_slave_chan_id dma_slave_rx;
38#endif
35}; 39};
36 40
37#endif /* __LINUX_SERIAL_SCI_H */ 41#endif /* __LINUX_SERIAL_SCI_H */
diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
index def10b064f29..cf42f194616e 100644
--- a/include/linux/skbuff.h
+++ b/include/linux/skbuff.h
@@ -190,9 +190,6 @@ struct skb_shared_info {
190 atomic_t dataref; 190 atomic_t dataref;
191 unsigned short nr_frags; 191 unsigned short nr_frags;
192 unsigned short gso_size; 192 unsigned short gso_size;
193#ifdef CONFIG_HAS_DMA
194 dma_addr_t dma_head;
195#endif
196 /* Warning: this field is not always filled in (UFO)! */ 193 /* Warning: this field is not always filled in (UFO)! */
197 unsigned short gso_segs; 194 unsigned short gso_segs;
198 unsigned short gso_type; 195 unsigned short gso_type;
@@ -201,9 +198,6 @@ struct skb_shared_info {
201 struct sk_buff *frag_list; 198 struct sk_buff *frag_list;
202 struct skb_shared_hwtstamps hwtstamps; 199 struct skb_shared_hwtstamps hwtstamps;
203 skb_frag_t frags[MAX_SKB_FRAGS]; 200 skb_frag_t frags[MAX_SKB_FRAGS];
204#ifdef CONFIG_HAS_DMA
205 dma_addr_t dma_maps[MAX_SKB_FRAGS];
206#endif
207 /* Intermediate layers must ensure that destructor_arg 201 /* Intermediate layers must ensure that destructor_arg
208 * remains valid until skb destructor */ 202 * remains valid until skb destructor */
209 void * destructor_arg; 203 void * destructor_arg;
diff --git a/include/linux/socket.h b/include/linux/socket.h
index 960659bd8f78..032a19eb61b1 100644
--- a/include/linux/socket.h
+++ b/include/linux/socket.h
@@ -257,6 +257,7 @@ struct ucred {
257#define MSG_ERRQUEUE 0x2000 /* Fetch message from error queue */ 257#define MSG_ERRQUEUE 0x2000 /* Fetch message from error queue */
258#define MSG_NOSIGNAL 0x4000 /* Do not generate SIGPIPE */ 258#define MSG_NOSIGNAL 0x4000 /* Do not generate SIGPIPE */
259#define MSG_MORE 0x8000 /* Sender will send more */ 259#define MSG_MORE 0x8000 /* Sender will send more */
260#define MSG_WAITFORONE 0x10000 /* recvmmsg(): block until 1+ packets avail */
260 261
261#define MSG_EOF MSG_FIN 262#define MSG_EOF MSG_FIN
262 263
diff --git a/include/linux/spi/l4f00242t03.h b/include/linux/spi/l4f00242t03.h
new file mode 100644
index 000000000000..aee1dbda4edc
--- /dev/null
+++ b/include/linux/spi/l4f00242t03.h
@@ -0,0 +1,31 @@
1/*
2 * l4f00242t03.h -- Platform glue for Epson L4F00242T03 LCD
3 *
4 * Copyright (c) 2009 Alberto Panizzo <maramaopercheseimorto@gmail.com>
5 * Based on Marek Vasut work in lms283gf05.h
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19*/
20
21#ifndef _INCLUDE_LINUX_SPI_L4F00242T03_H_
22#define _INCLUDE_LINUX_SPI_L4F00242T03_H_
23
24struct l4f00242t03_pdata {
25 unsigned int reset_gpio;
26 unsigned int data_enable_gpio;
27 const char *io_supply; /* will be set to 1.8 V */
28 const char *core_supply; /* will be set to 2.8 V */
29};
30
31#endif /* _INCLUDE_LINUX_SPI_L4F00242T03_H_ */
diff --git a/include/linux/syscalls.h b/include/linux/syscalls.h
index 44f2ad0e8825..057929b0a651 100644
--- a/include/linux/syscalls.h
+++ b/include/linux/syscalls.h
@@ -105,18 +105,18 @@ struct perf_event_attr;
105 105
106#ifdef CONFIG_PERF_EVENTS 106#ifdef CONFIG_PERF_EVENTS
107 107
108#define TRACE_SYS_ENTER_PROFILE_INIT(sname) \ 108#define TRACE_SYS_ENTER_PERF_INIT(sname) \
109 .profile_enable = prof_sysenter_enable, \ 109 .perf_event_enable = perf_sysenter_enable, \
110 .profile_disable = prof_sysenter_disable, 110 .perf_event_disable = perf_sysenter_disable,
111 111
112#define TRACE_SYS_EXIT_PROFILE_INIT(sname) \ 112#define TRACE_SYS_EXIT_PERF_INIT(sname) \
113 .profile_enable = prof_sysexit_enable, \ 113 .perf_event_enable = perf_sysexit_enable, \
114 .profile_disable = prof_sysexit_disable, 114 .perf_event_disable = perf_sysexit_disable,
115#else 115#else
116#define TRACE_SYS_ENTER_PROFILE(sname) 116#define TRACE_SYS_ENTER_PERF(sname)
117#define TRACE_SYS_ENTER_PROFILE_INIT(sname) 117#define TRACE_SYS_ENTER_PERF_INIT(sname)
118#define TRACE_SYS_EXIT_PROFILE(sname) 118#define TRACE_SYS_EXIT_PERF(sname)
119#define TRACE_SYS_EXIT_PROFILE_INIT(sname) 119#define TRACE_SYS_EXIT_PERF_INIT(sname)
120#endif /* CONFIG_PERF_EVENTS */ 120#endif /* CONFIG_PERF_EVENTS */
121 121
122#ifdef CONFIG_FTRACE_SYSCALLS 122#ifdef CONFIG_FTRACE_SYSCALLS
@@ -153,7 +153,7 @@ struct perf_event_attr;
153 .regfunc = reg_event_syscall_enter, \ 153 .regfunc = reg_event_syscall_enter, \
154 .unregfunc = unreg_event_syscall_enter, \ 154 .unregfunc = unreg_event_syscall_enter, \
155 .data = (void *)&__syscall_meta_##sname,\ 155 .data = (void *)&__syscall_meta_##sname,\
156 TRACE_SYS_ENTER_PROFILE_INIT(sname) \ 156 TRACE_SYS_ENTER_PERF_INIT(sname) \
157 } 157 }
158 158
159#define SYSCALL_TRACE_EXIT_EVENT(sname) \ 159#define SYSCALL_TRACE_EXIT_EVENT(sname) \
@@ -175,7 +175,7 @@ struct perf_event_attr;
175 .regfunc = reg_event_syscall_exit, \ 175 .regfunc = reg_event_syscall_exit, \
176 .unregfunc = unreg_event_syscall_exit, \ 176 .unregfunc = unreg_event_syscall_exit, \
177 .data = (void *)&__syscall_meta_##sname,\ 177 .data = (void *)&__syscall_meta_##sname,\
178 TRACE_SYS_EXIT_PROFILE_INIT(sname) \ 178 TRACE_SYS_EXIT_PERF_INIT(sname) \
179 } 179 }
180 180
181#define SYSCALL_METADATA(sname, nb) \ 181#define SYSCALL_METADATA(sname, nb) \
@@ -688,7 +688,7 @@ asmlinkage long sys_shmat(int shmid, char __user *shmaddr, int shmflg);
688asmlinkage long sys_shmget(key_t key, size_t size, int flag); 688asmlinkage long sys_shmget(key_t key, size_t size, int flag);
689asmlinkage long sys_shmdt(char __user *shmaddr); 689asmlinkage long sys_shmdt(char __user *shmaddr);
690asmlinkage long sys_shmctl(int shmid, int cmd, struct shmid_ds __user *buf); 690asmlinkage long sys_shmctl(int shmid, int cmd, struct shmid_ds __user *buf);
691asmlinkage long sys_ipc(unsigned int call, int first, int second, 691asmlinkage long sys_ipc(unsigned int call, int first, unsigned long second,
692 unsigned long third, void __user *ptr, long fifth); 692 unsigned long third, void __user *ptr, long fifth);
693 693
694asmlinkage long sys_mq_open(const char __user *name, int oflag, mode_t mode, struct mq_attr __user *attr); 694asmlinkage long sys_mq_open(const char __user *name, int oflag, mode_t mode, struct mq_attr __user *attr);
diff --git a/include/linux/tty.h b/include/linux/tty.h
index 71c7e9c96b23..bb44fa9ae135 100644
--- a/include/linux/tty.h
+++ b/include/linux/tty.h
@@ -70,12 +70,13 @@ struct tty_buffer {
70 70
71/* 71/*
72 * We default to dicing tty buffer allocations to this many characters 72 * We default to dicing tty buffer allocations to this many characters
73 * in order to avoid multiple page allocations. We assume tty_buffer itself 73 * in order to avoid multiple page allocations. We know the size of
74 * is under 256 bytes. See tty_buffer_find for the allocation logic this 74 * tty_buffer itself but it must also be taken into account that the
75 * must match 75 * the buffer is 256 byte aligned. See tty_buffer_find for the allocation
76 * logic this must match
76 */ 77 */
77 78
78#define TTY_BUFFER_PAGE ((PAGE_SIZE - 256) / 2) 79#define TTY_BUFFER_PAGE (((PAGE_SIZE - sizeof(struct tty_buffer)) / 2) & ~0xFF)
79 80
80 81
81struct tty_bufhead { 82struct tty_bufhead {
@@ -223,6 +224,7 @@ struct tty_port {
223 wait_queue_head_t close_wait; /* Close waiters */ 224 wait_queue_head_t close_wait; /* Close waiters */
224 wait_queue_head_t delta_msr_wait; /* Modem status change */ 225 wait_queue_head_t delta_msr_wait; /* Modem status change */
225 unsigned long flags; /* TTY flags ASY_*/ 226 unsigned long flags; /* TTY flags ASY_*/
227 unsigned char console:1; /* port is a console */
226 struct mutex mutex; /* Locking */ 228 struct mutex mutex; /* Locking */
227 struct mutex buf_mutex; /* Buffer alloc lock */ 229 struct mutex buf_mutex; /* Buffer alloc lock */
228 unsigned char *xmit_buf; /* Optional buffer */ 230 unsigned char *xmit_buf; /* Optional buffer */
diff --git a/include/linux/usb.h b/include/linux/usb.h
index 8c9f053111bb..ce1323c4e47c 100644
--- a/include/linux/usb.h
+++ b/include/linux/usb.h
@@ -1055,7 +1055,8 @@ typedef void (*usb_complete_t)(struct urb *);
1055 * @number_of_packets: Lists the number of ISO transfer buffers. 1055 * @number_of_packets: Lists the number of ISO transfer buffers.
1056 * @interval: Specifies the polling interval for interrupt or isochronous 1056 * @interval: Specifies the polling interval for interrupt or isochronous
1057 * transfers. The units are frames (milliseconds) for full and low 1057 * transfers. The units are frames (milliseconds) for full and low
1058 * speed devices, and microframes (1/8 millisecond) for highspeed ones. 1058 * speed devices, and microframes (1/8 millisecond) for highspeed
1059 * and SuperSpeed devices.
1059 * @error_count: Returns the number of ISO transfers that reported errors. 1060 * @error_count: Returns the number of ISO transfers that reported errors.
1060 * @context: For use in completion functions. This normally points to 1061 * @context: For use in completion functions. This normally points to
1061 * request-specific driver context. 1062 * request-specific driver context.
@@ -1286,9 +1287,16 @@ static inline void usb_fill_bulk_urb(struct urb *urb,
1286 * 1287 *
1287 * Initializes a interrupt urb with the proper information needed to submit 1288 * Initializes a interrupt urb with the proper information needed to submit
1288 * it to a device. 1289 * it to a device.
1289 * Note that high speed interrupt endpoints use a logarithmic encoding of 1290 *
1290 * the endpoint interval, and express polling intervals in microframes 1291 * Note that High Speed and SuperSpeed interrupt endpoints use a logarithmic
1291 * (eight per millisecond) rather than in frames (one per millisecond). 1292 * encoding of the endpoint interval, and express polling intervals in
1293 * microframes (eight per millisecond) rather than in frames (one per
1294 * millisecond).
1295 *
1296 * Wireless USB also uses the logarithmic encoding, but specifies it in units of
1297 * 128us instead of 125us. For Wireless USB devices, the interval is passed
1298 * through to the host controller, rather than being translated into microframe
1299 * units.
1292 */ 1300 */
1293static inline void usb_fill_int_urb(struct urb *urb, 1301static inline void usb_fill_int_urb(struct urb *urb,
1294 struct usb_device *dev, 1302 struct usb_device *dev,
@@ -1305,7 +1313,7 @@ static inline void usb_fill_int_urb(struct urb *urb,
1305 urb->transfer_buffer_length = buffer_length; 1313 urb->transfer_buffer_length = buffer_length;
1306 urb->complete = complete_fn; 1314 urb->complete = complete_fn;
1307 urb->context = context; 1315 urb->context = context;
1308 if (dev->speed == USB_SPEED_HIGH) 1316 if (dev->speed == USB_SPEED_HIGH || dev->speed == USB_SPEED_SUPER)
1309 urb->interval = 1 << (interval - 1); 1317 urb->interval = 1 << (interval - 1);
1310 else 1318 else
1311 urb->interval = interval; 1319 urb->interval = interval;
diff --git a/include/linux/vt.h b/include/linux/vt.h
index 778b7b2a47d4..d5dd0bc408fd 100644
--- a/include/linux/vt.h
+++ b/include/linux/vt.h
@@ -27,7 +27,7 @@ struct vt_mode {
27#define VT_SETMODE 0x5602 /* set mode of active vt */ 27#define VT_SETMODE 0x5602 /* set mode of active vt */
28#define VT_AUTO 0x00 /* auto vt switching */ 28#define VT_AUTO 0x00 /* auto vt switching */
29#define VT_PROCESS 0x01 /* process controls switching */ 29#define VT_PROCESS 0x01 /* process controls switching */
30#define VT_PROCESS_AUTO 0x02 /* process is notified of switching */ 30#define VT_ACKACQ 0x02 /* acknowledge switch */
31 31
32struct vt_stat { 32struct vt_stat {
33 unsigned short v_active; /* active vt */ 33 unsigned short v_active; /* active vt */
@@ -38,7 +38,6 @@ struct vt_stat {
38#define VT_SENDSIG 0x5604 /* signal to send to bitmask of vts */ 38#define VT_SENDSIG 0x5604 /* signal to send to bitmask of vts */
39 39
40#define VT_RELDISP 0x5605 /* release display */ 40#define VT_RELDISP 0x5605 /* release display */
41#define VT_ACKACQ 0x02 /* acknowledge switch */
42 41
43#define VT_ACTIVATE 0x5606 /* make vt active */ 42#define VT_ACTIVATE 0x5606 /* make vt active */
44#define VT_WAITACTIVE 0x5607 /* wait for vt active */ 43#define VT_WAITACTIVE 0x5607 /* wait for vt active */
diff --git a/include/net/bluetooth/bluetooth.h b/include/net/bluetooth/bluetooth.h
index 04a6908e38d2..ff77e8f882f1 100644
--- a/include/net/bluetooth/bluetooth.h
+++ b/include/net/bluetooth/bluetooth.h
@@ -176,6 +176,6 @@ extern void hci_sock_cleanup(void);
176extern int bt_sysfs_init(void); 176extern int bt_sysfs_init(void);
177extern void bt_sysfs_cleanup(void); 177extern void bt_sysfs_cleanup(void);
178 178
179extern struct class *bt_class; 179extern struct dentry *bt_debugfs;
180 180
181#endif /* __BLUETOOTH_H */ 181#endif /* __BLUETOOTH_H */
diff --git a/include/scsi/libiscsi.h b/include/scsi/libiscsi.h
index ff92b46f5153..ae5196aae1a5 100644
--- a/include/scsi/libiscsi.h
+++ b/include/scsi/libiscsi.h
@@ -338,7 +338,8 @@ struct iscsi_host {
338extern int iscsi_change_queue_depth(struct scsi_device *sdev, int depth, 338extern int iscsi_change_queue_depth(struct scsi_device *sdev, int depth,
339 int reason); 339 int reason);
340extern int iscsi_eh_abort(struct scsi_cmnd *sc); 340extern int iscsi_eh_abort(struct scsi_cmnd *sc);
341extern int iscsi_eh_target_reset(struct scsi_cmnd *sc); 341extern int iscsi_eh_recover_target(struct scsi_cmnd *sc);
342extern int iscsi_eh_session_reset(struct scsi_cmnd *sc);
342extern int iscsi_eh_device_reset(struct scsi_cmnd *sc); 343extern int iscsi_eh_device_reset(struct scsi_cmnd *sc);
343extern int iscsi_queuecommand(struct scsi_cmnd *sc, 344extern int iscsi_queuecommand(struct scsi_cmnd *sc,
344 void (*done)(struct scsi_cmnd *)); 345 void (*done)(struct scsi_cmnd *));
diff --git a/include/trace/ftrace.h b/include/trace/ftrace.h
index 601ad7744247..ea6f9d4a20e9 100644
--- a/include/trace/ftrace.h
+++ b/include/trace/ftrace.h
@@ -401,18 +401,18 @@ static inline notrace int ftrace_get_offsets_##call( \
401#undef DEFINE_EVENT 401#undef DEFINE_EVENT
402#define DEFINE_EVENT(template, name, proto, args) \ 402#define DEFINE_EVENT(template, name, proto, args) \
403 \ 403 \
404static void ftrace_profile_##name(proto); \ 404static void perf_trace_##name(proto); \
405 \ 405 \
406static notrace int \ 406static notrace int \
407ftrace_profile_enable_##name(struct ftrace_event_call *unused) \ 407perf_trace_enable_##name(struct ftrace_event_call *unused) \
408{ \ 408{ \
409 return register_trace_##name(ftrace_profile_##name); \ 409 return register_trace_##name(perf_trace_##name); \
410} \ 410} \
411 \ 411 \
412static notrace void \ 412static notrace void \
413ftrace_profile_disable_##name(struct ftrace_event_call *unused) \ 413perf_trace_disable_##name(struct ftrace_event_call *unused) \
414{ \ 414{ \
415 unregister_trace_##name(ftrace_profile_##name); \ 415 unregister_trace_##name(perf_trace_##name); \
416} 416}
417 417
418#undef DEFINE_EVENT_PRINT 418#undef DEFINE_EVENT_PRINT
@@ -507,12 +507,12 @@ ftrace_profile_disable_##name(struct ftrace_event_call *unused) \
507 507
508#ifdef CONFIG_PERF_EVENTS 508#ifdef CONFIG_PERF_EVENTS
509 509
510#define _TRACE_PROFILE_INIT(call) \ 510#define _TRACE_PERF_INIT(call) \
511 .profile_enable = ftrace_profile_enable_##call, \ 511 .perf_event_enable = perf_trace_enable_##call, \
512 .profile_disable = ftrace_profile_disable_##call, 512 .perf_event_disable = perf_trace_disable_##call,
513 513
514#else 514#else
515#define _TRACE_PROFILE_INIT(call) 515#define _TRACE_PERF_INIT(call)
516#endif /* CONFIG_PERF_EVENTS */ 516#endif /* CONFIG_PERF_EVENTS */
517 517
518#undef __entry 518#undef __entry
@@ -638,7 +638,7 @@ __attribute__((section("_ftrace_events"))) event_##call = { \
638 .unregfunc = ftrace_raw_unreg_event_##call, \ 638 .unregfunc = ftrace_raw_unreg_event_##call, \
639 .print_fmt = print_fmt_##template, \ 639 .print_fmt = print_fmt_##template, \
640 .define_fields = ftrace_define_fields_##template, \ 640 .define_fields = ftrace_define_fields_##template, \
641 _TRACE_PROFILE_INIT(call) \ 641 _TRACE_PERF_INIT(call) \
642} 642}
643 643
644#undef DEFINE_EVENT_PRINT 644#undef DEFINE_EVENT_PRINT
@@ -657,18 +657,18 @@ __attribute__((section("_ftrace_events"))) event_##call = { \
657 .unregfunc = ftrace_raw_unreg_event_##call, \ 657 .unregfunc = ftrace_raw_unreg_event_##call, \
658 .print_fmt = print_fmt_##call, \ 658 .print_fmt = print_fmt_##call, \
659 .define_fields = ftrace_define_fields_##template, \ 659 .define_fields = ftrace_define_fields_##template, \
660 _TRACE_PROFILE_INIT(call) \ 660 _TRACE_PERF_INIT(call) \
661} 661}
662 662
663#include TRACE_INCLUDE(TRACE_INCLUDE_FILE) 663#include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
664 664
665/* 665/*
666 * Define the insertion callback to profile events 666 * Define the insertion callback to perf events
667 * 667 *
668 * The job is very similar to ftrace_raw_event_<call> except that we don't 668 * The job is very similar to ftrace_raw_event_<call> except that we don't
669 * insert in the ring buffer but in a perf counter. 669 * insert in the ring buffer but in a perf counter.
670 * 670 *
671 * static void ftrace_profile_<call>(proto) 671 * static void ftrace_perf_<call>(proto)
672 * { 672 * {
673 * struct ftrace_data_offsets_<call> __maybe_unused __data_offsets; 673 * struct ftrace_data_offsets_<call> __maybe_unused __data_offsets;
674 * struct ftrace_event_call *event_call = &event_<call>; 674 * struct ftrace_event_call *event_call = &event_<call>;
@@ -757,13 +757,14 @@ __attribute__((section("_ftrace_events"))) event_##call = { \
757#undef DECLARE_EVENT_CLASS 757#undef DECLARE_EVENT_CLASS
758#define DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print) \ 758#define DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print) \
759static notrace void \ 759static notrace void \
760ftrace_profile_templ_##call(struct ftrace_event_call *event_call, \ 760perf_trace_templ_##call(struct ftrace_event_call *event_call, \
761 proto) \ 761 proto) \
762{ \ 762{ \
763 struct ftrace_data_offsets_##call __maybe_unused __data_offsets;\ 763 struct ftrace_data_offsets_##call __maybe_unused __data_offsets;\
764 struct ftrace_raw_##call *entry; \ 764 struct ftrace_raw_##call *entry; \
765 u64 __addr = 0, __count = 1; \ 765 u64 __addr = 0, __count = 1; \
766 unsigned long irq_flags; \ 766 unsigned long irq_flags; \
767 struct pt_regs *__regs; \
767 int __entry_size; \ 768 int __entry_size; \
768 int __data_size; \ 769 int __data_size; \
769 int rctx; \ 770 int rctx; \
@@ -773,10 +774,10 @@ ftrace_profile_templ_##call(struct ftrace_event_call *event_call, \
773 sizeof(u64)); \ 774 sizeof(u64)); \
774 __entry_size -= sizeof(u32); \ 775 __entry_size -= sizeof(u32); \
775 \ 776 \
776 if (WARN_ONCE(__entry_size > FTRACE_MAX_PROFILE_SIZE, \ 777 if (WARN_ONCE(__entry_size > PERF_MAX_TRACE_SIZE, \
777 "profile buffer not large enough")) \ 778 "profile buffer not large enough")) \
778 return; \ 779 return; \
779 entry = (struct ftrace_raw_##call *)ftrace_perf_buf_prepare( \ 780 entry = (struct ftrace_raw_##call *)perf_trace_buf_prepare( \
780 __entry_size, event_call->id, &rctx, &irq_flags); \ 781 __entry_size, event_call->id, &rctx, &irq_flags); \
781 if (!entry) \ 782 if (!entry) \
782 return; \ 783 return; \
@@ -784,17 +785,20 @@ ftrace_profile_templ_##call(struct ftrace_event_call *event_call, \
784 \ 785 \
785 { assign; } \ 786 { assign; } \
786 \ 787 \
787 ftrace_perf_buf_submit(entry, __entry_size, rctx, __addr, \ 788 __regs = &__get_cpu_var(perf_trace_regs); \
788 __count, irq_flags); \ 789 perf_fetch_caller_regs(__regs, 2); \
790 \
791 perf_trace_buf_submit(entry, __entry_size, rctx, __addr, \
792 __count, irq_flags, __regs); \
789} 793}
790 794
791#undef DEFINE_EVENT 795#undef DEFINE_EVENT
792#define DEFINE_EVENT(template, call, proto, args) \ 796#define DEFINE_EVENT(template, call, proto, args) \
793static notrace void ftrace_profile_##call(proto) \ 797static notrace void perf_trace_##call(proto) \
794{ \ 798{ \
795 struct ftrace_event_call *event_call = &event_##call; \ 799 struct ftrace_event_call *event_call = &event_##call; \
796 \ 800 \
797 ftrace_profile_templ_##template(event_call, args); \ 801 perf_trace_templ_##template(event_call, args); \
798} 802}
799 803
800#undef DEFINE_EVENT_PRINT 804#undef DEFINE_EVENT_PRINT
diff --git a/include/trace/syscall.h b/include/trace/syscall.h
index 0387100752f0..e5e5f48dbfb3 100644
--- a/include/trace/syscall.h
+++ b/include/trace/syscall.h
@@ -47,10 +47,10 @@ enum print_line_t print_syscall_exit(struct trace_iterator *iter, int flags);
47#endif 47#endif
48 48
49#ifdef CONFIG_PERF_EVENTS 49#ifdef CONFIG_PERF_EVENTS
50int prof_sysenter_enable(struct ftrace_event_call *call); 50int perf_sysenter_enable(struct ftrace_event_call *call);
51void prof_sysenter_disable(struct ftrace_event_call *call); 51void perf_sysenter_disable(struct ftrace_event_call *call);
52int prof_sysexit_enable(struct ftrace_event_call *call); 52int perf_sysexit_enable(struct ftrace_event_call *call);
53void prof_sysexit_disable(struct ftrace_event_call *call); 53void perf_sysexit_disable(struct ftrace_event_call *call);
54#endif 54#endif
55 55
56#endif /* _TRACE_SYSCALL_H */ 56#endif /* _TRACE_SYSCALL_H */
diff --git a/ipc/syscall.c b/ipc/syscall.c
index 355a3da9ec73..1d6f53f6b562 100644
--- a/ipc/syscall.c
+++ b/ipc/syscall.c
@@ -13,7 +13,7 @@
13#include <linux/syscalls.h> 13#include <linux/syscalls.h>
14#include <linux/uaccess.h> 14#include <linux/uaccess.h>
15 15
16SYSCALL_DEFINE6(ipc, unsigned int, call, int, first, int, second, 16SYSCALL_DEFINE6(ipc, unsigned int, call, int, first, unsigned long, second,
17 unsigned long, third, void __user *, ptr, long, fifth) 17 unsigned long, third, void __user *, ptr, long, fifth)
18{ 18{
19 int version, ret; 19 int version, ret;
diff --git a/kernel/kprobes.c b/kernel/kprobes.c
index fa034d29cf73..0ed46f3e51e9 100644
--- a/kernel/kprobes.c
+++ b/kernel/kprobes.c
@@ -259,7 +259,8 @@ static void __kprobes __free_insn_slot(struct kprobe_insn_cache *c,
259 struct kprobe_insn_page *kip; 259 struct kprobe_insn_page *kip;
260 260
261 list_for_each_entry(kip, &c->pages, list) { 261 list_for_each_entry(kip, &c->pages, list) {
262 long idx = ((long)slot - (long)kip->insns) / c->insn_size; 262 long idx = ((long)slot - (long)kip->insns) /
263 (c->insn_size * sizeof(kprobe_opcode_t));
263 if (idx >= 0 && idx < slots_per_page(c)) { 264 if (idx >= 0 && idx < slots_per_page(c)) {
264 WARN_ON(kip->slot_used[idx] != SLOT_USED); 265 WARN_ON(kip->slot_used[idx] != SLOT_USED);
265 if (dirty) { 266 if (dirty) {
diff --git a/kernel/lockdep.c b/kernel/lockdep.c
index 681bc2e1e187..c927a549db2c 100644
--- a/kernel/lockdep.c
+++ b/kernel/lockdep.c
@@ -3211,8 +3211,6 @@ void lock_acquire(struct lockdep_map *lock, unsigned int subclass,
3211{ 3211{
3212 unsigned long flags; 3212 unsigned long flags;
3213 3213
3214 trace_lock_acquire(lock, subclass, trylock, read, check, nest_lock, ip);
3215
3216 if (unlikely(current->lockdep_recursion)) 3214 if (unlikely(current->lockdep_recursion))
3217 return; 3215 return;
3218 3216
@@ -3220,6 +3218,7 @@ void lock_acquire(struct lockdep_map *lock, unsigned int subclass,
3220 check_flags(flags); 3218 check_flags(flags);
3221 3219
3222 current->lockdep_recursion = 1; 3220 current->lockdep_recursion = 1;
3221 trace_lock_acquire(lock, subclass, trylock, read, check, nest_lock, ip);
3223 __lock_acquire(lock, subclass, trylock, read, check, 3222 __lock_acquire(lock, subclass, trylock, read, check,
3224 irqs_disabled_flags(flags), nest_lock, ip, 0); 3223 irqs_disabled_flags(flags), nest_lock, ip, 0);
3225 current->lockdep_recursion = 0; 3224 current->lockdep_recursion = 0;
@@ -3232,14 +3231,13 @@ void lock_release(struct lockdep_map *lock, int nested,
3232{ 3231{
3233 unsigned long flags; 3232 unsigned long flags;
3234 3233
3235 trace_lock_release(lock, nested, ip);
3236
3237 if (unlikely(current->lockdep_recursion)) 3234 if (unlikely(current->lockdep_recursion))
3238 return; 3235 return;
3239 3236
3240 raw_local_irq_save(flags); 3237 raw_local_irq_save(flags);
3241 check_flags(flags); 3238 check_flags(flags);
3242 current->lockdep_recursion = 1; 3239 current->lockdep_recursion = 1;
3240 trace_lock_release(lock, nested, ip);
3243 __lock_release(lock, nested, ip); 3241 __lock_release(lock, nested, ip);
3244 current->lockdep_recursion = 0; 3242 current->lockdep_recursion = 0;
3245 raw_local_irq_restore(flags); 3243 raw_local_irq_restore(flags);
@@ -3413,8 +3411,6 @@ void lock_contended(struct lockdep_map *lock, unsigned long ip)
3413{ 3411{
3414 unsigned long flags; 3412 unsigned long flags;
3415 3413
3416 trace_lock_contended(lock, ip);
3417
3418 if (unlikely(!lock_stat)) 3414 if (unlikely(!lock_stat))
3419 return; 3415 return;
3420 3416
@@ -3424,6 +3420,7 @@ void lock_contended(struct lockdep_map *lock, unsigned long ip)
3424 raw_local_irq_save(flags); 3420 raw_local_irq_save(flags);
3425 check_flags(flags); 3421 check_flags(flags);
3426 current->lockdep_recursion = 1; 3422 current->lockdep_recursion = 1;
3423 trace_lock_contended(lock, ip);
3427 __lock_contended(lock, ip); 3424 __lock_contended(lock, ip);
3428 current->lockdep_recursion = 0; 3425 current->lockdep_recursion = 0;
3429 raw_local_irq_restore(flags); 3426 raw_local_irq_restore(flags);
diff --git a/kernel/perf_event.c b/kernel/perf_event.c
index 4393b9e73740..574ee58a3046 100644
--- a/kernel/perf_event.c
+++ b/kernel/perf_event.c
@@ -81,10 +81,6 @@ extern __weak const struct pmu *hw_perf_event_init(struct perf_event *event)
81void __weak hw_perf_disable(void) { barrier(); } 81void __weak hw_perf_disable(void) { barrier(); }
82void __weak hw_perf_enable(void) { barrier(); } 82void __weak hw_perf_enable(void) { barrier(); }
83 83
84void __weak hw_perf_event_setup(int cpu) { barrier(); }
85void __weak hw_perf_event_setup_online(int cpu) { barrier(); }
86void __weak hw_perf_event_setup_offline(int cpu) { barrier(); }
87
88int __weak 84int __weak
89hw_perf_group_sched_in(struct perf_event *group_leader, 85hw_perf_group_sched_in(struct perf_event *group_leader,
90 struct perf_cpu_context *cpuctx, 86 struct perf_cpu_context *cpuctx,
@@ -97,25 +93,15 @@ void __weak perf_event_print_debug(void) { }
97 93
98static DEFINE_PER_CPU(int, perf_disable_count); 94static DEFINE_PER_CPU(int, perf_disable_count);
99 95
100void __perf_disable(void)
101{
102 __get_cpu_var(perf_disable_count)++;
103}
104
105bool __perf_enable(void)
106{
107 return !--__get_cpu_var(perf_disable_count);
108}
109
110void perf_disable(void) 96void perf_disable(void)
111{ 97{
112 __perf_disable(); 98 if (!__get_cpu_var(perf_disable_count)++)
113 hw_perf_disable(); 99 hw_perf_disable();
114} 100}
115 101
116void perf_enable(void) 102void perf_enable(void)
117{ 103{
118 if (__perf_enable()) 104 if (!--__get_cpu_var(perf_disable_count))
119 hw_perf_enable(); 105 hw_perf_enable();
120} 106}
121 107
@@ -1538,12 +1524,15 @@ static void perf_ctx_adjust_freq(struct perf_event_context *ctx)
1538 */ 1524 */
1539 if (interrupts == MAX_INTERRUPTS) { 1525 if (interrupts == MAX_INTERRUPTS) {
1540 perf_log_throttle(event, 1); 1526 perf_log_throttle(event, 1);
1527 perf_disable();
1541 event->pmu->unthrottle(event); 1528 event->pmu->unthrottle(event);
1529 perf_enable();
1542 } 1530 }
1543 1531
1544 if (!event->attr.freq || !event->attr.sample_freq) 1532 if (!event->attr.freq || !event->attr.sample_freq)
1545 continue; 1533 continue;
1546 1534
1535 perf_disable();
1547 event->pmu->read(event); 1536 event->pmu->read(event);
1548 now = atomic64_read(&event->count); 1537 now = atomic64_read(&event->count);
1549 delta = now - hwc->freq_count_stamp; 1538 delta = now - hwc->freq_count_stamp;
@@ -1551,6 +1540,7 @@ static void perf_ctx_adjust_freq(struct perf_event_context *ctx)
1551 1540
1552 if (delta > 0) 1541 if (delta > 0)
1553 perf_adjust_period(event, TICK_NSEC, delta); 1542 perf_adjust_period(event, TICK_NSEC, delta);
1543 perf_enable();
1554 } 1544 }
1555 raw_spin_unlock(&ctx->lock); 1545 raw_spin_unlock(&ctx->lock);
1556} 1546}
@@ -1560,9 +1550,6 @@ static void perf_ctx_adjust_freq(struct perf_event_context *ctx)
1560 */ 1550 */
1561static void rotate_ctx(struct perf_event_context *ctx) 1551static void rotate_ctx(struct perf_event_context *ctx)
1562{ 1552{
1563 if (!ctx->nr_events)
1564 return;
1565
1566 raw_spin_lock(&ctx->lock); 1553 raw_spin_lock(&ctx->lock);
1567 1554
1568 /* Rotate the first entry last of non-pinned groups */ 1555 /* Rotate the first entry last of non-pinned groups */
@@ -1575,19 +1562,28 @@ void perf_event_task_tick(struct task_struct *curr)
1575{ 1562{
1576 struct perf_cpu_context *cpuctx; 1563 struct perf_cpu_context *cpuctx;
1577 struct perf_event_context *ctx; 1564 struct perf_event_context *ctx;
1565 int rotate = 0;
1578 1566
1579 if (!atomic_read(&nr_events)) 1567 if (!atomic_read(&nr_events))
1580 return; 1568 return;
1581 1569
1582 cpuctx = &__get_cpu_var(perf_cpu_context); 1570 cpuctx = &__get_cpu_var(perf_cpu_context);
1583 ctx = curr->perf_event_ctxp; 1571 if (cpuctx->ctx.nr_events &&
1572 cpuctx->ctx.nr_events != cpuctx->ctx.nr_active)
1573 rotate = 1;
1584 1574
1585 perf_disable(); 1575 ctx = curr->perf_event_ctxp;
1576 if (ctx && ctx->nr_events && ctx->nr_events != ctx->nr_active)
1577 rotate = 1;
1586 1578
1587 perf_ctx_adjust_freq(&cpuctx->ctx); 1579 perf_ctx_adjust_freq(&cpuctx->ctx);
1588 if (ctx) 1580 if (ctx)
1589 perf_ctx_adjust_freq(ctx); 1581 perf_ctx_adjust_freq(ctx);
1590 1582
1583 if (!rotate)
1584 return;
1585
1586 perf_disable();
1591 cpu_ctx_sched_out(cpuctx, EVENT_FLEXIBLE); 1587 cpu_ctx_sched_out(cpuctx, EVENT_FLEXIBLE);
1592 if (ctx) 1588 if (ctx)
1593 task_ctx_sched_out(ctx, EVENT_FLEXIBLE); 1589 task_ctx_sched_out(ctx, EVENT_FLEXIBLE);
@@ -1599,7 +1595,6 @@ void perf_event_task_tick(struct task_struct *curr)
1599 cpu_ctx_sched_in(cpuctx, EVENT_FLEXIBLE); 1595 cpu_ctx_sched_in(cpuctx, EVENT_FLEXIBLE);
1600 if (ctx) 1596 if (ctx)
1601 task_ctx_sched_in(curr, EVENT_FLEXIBLE); 1597 task_ctx_sched_in(curr, EVENT_FLEXIBLE);
1602
1603 perf_enable(); 1598 perf_enable();
1604} 1599}
1605 1600
@@ -2791,6 +2786,13 @@ __weak struct perf_callchain_entry *perf_callchain(struct pt_regs *regs)
2791 return NULL; 2786 return NULL;
2792} 2787}
2793 2788
2789#ifdef CONFIG_EVENT_TRACING
2790__weak
2791void perf_arch_fetch_caller_regs(struct pt_regs *regs, unsigned long ip, int skip)
2792{
2793}
2794#endif
2795
2794/* 2796/*
2795 * Output 2797 * Output
2796 */ 2798 */
@@ -4318,9 +4320,8 @@ static const struct pmu perf_ops_task_clock = {
4318#ifdef CONFIG_EVENT_TRACING 4320#ifdef CONFIG_EVENT_TRACING
4319 4321
4320void perf_tp_event(int event_id, u64 addr, u64 count, void *record, 4322void perf_tp_event(int event_id, u64 addr, u64 count, void *record,
4321 int entry_size) 4323 int entry_size, struct pt_regs *regs)
4322{ 4324{
4323 struct pt_regs *regs = get_irq_regs();
4324 struct perf_sample_data data; 4325 struct perf_sample_data data;
4325 struct perf_raw_record raw = { 4326 struct perf_raw_record raw = {
4326 .size = entry_size, 4327 .size = entry_size,
@@ -4330,12 +4331,9 @@ void perf_tp_event(int event_id, u64 addr, u64 count, void *record,
4330 perf_sample_data_init(&data, addr); 4331 perf_sample_data_init(&data, addr);
4331 data.raw = &raw; 4332 data.raw = &raw;
4332 4333
4333 if (!regs)
4334 regs = task_pt_regs(current);
4335
4336 /* Trace events already protected against recursion */ 4334 /* Trace events already protected against recursion */
4337 do_perf_sw_event(PERF_TYPE_TRACEPOINT, event_id, count, 1, 4335 do_perf_sw_event(PERF_TYPE_TRACEPOINT, event_id, count, 1,
4338 &data, regs); 4336 &data, regs);
4339} 4337}
4340EXPORT_SYMBOL_GPL(perf_tp_event); 4338EXPORT_SYMBOL_GPL(perf_tp_event);
4341 4339
@@ -4351,7 +4349,7 @@ static int perf_tp_event_match(struct perf_event *event,
4351 4349
4352static void tp_perf_event_destroy(struct perf_event *event) 4350static void tp_perf_event_destroy(struct perf_event *event)
4353{ 4351{
4354 ftrace_profile_disable(event->attr.config); 4352 perf_trace_disable(event->attr.config);
4355} 4353}
4356 4354
4357static const struct pmu *tp_perf_event_init(struct perf_event *event) 4355static const struct pmu *tp_perf_event_init(struct perf_event *event)
@@ -4365,7 +4363,7 @@ static const struct pmu *tp_perf_event_init(struct perf_event *event)
4365 !capable(CAP_SYS_ADMIN)) 4363 !capable(CAP_SYS_ADMIN))
4366 return ERR_PTR(-EPERM); 4364 return ERR_PTR(-EPERM);
4367 4365
4368 if (ftrace_profile_enable(event->attr.config)) 4366 if (perf_trace_enable(event->attr.config))
4369 return NULL; 4367 return NULL;
4370 4368
4371 event->destroy = tp_perf_event_destroy; 4369 event->destroy = tp_perf_event_destroy;
@@ -5372,18 +5370,26 @@ int perf_event_init_task(struct task_struct *child)
5372 return ret; 5370 return ret;
5373} 5371}
5374 5372
5373static void __init perf_event_init_all_cpus(void)
5374{
5375 int cpu;
5376 struct perf_cpu_context *cpuctx;
5377
5378 for_each_possible_cpu(cpu) {
5379 cpuctx = &per_cpu(perf_cpu_context, cpu);
5380 __perf_event_init_context(&cpuctx->ctx, NULL);
5381 }
5382}
5383
5375static void __cpuinit perf_event_init_cpu(int cpu) 5384static void __cpuinit perf_event_init_cpu(int cpu)
5376{ 5385{
5377 struct perf_cpu_context *cpuctx; 5386 struct perf_cpu_context *cpuctx;
5378 5387
5379 cpuctx = &per_cpu(perf_cpu_context, cpu); 5388 cpuctx = &per_cpu(perf_cpu_context, cpu);
5380 __perf_event_init_context(&cpuctx->ctx, NULL);
5381 5389
5382 spin_lock(&perf_resource_lock); 5390 spin_lock(&perf_resource_lock);
5383 cpuctx->max_pertask = perf_max_events - perf_reserved_percpu; 5391 cpuctx->max_pertask = perf_max_events - perf_reserved_percpu;
5384 spin_unlock(&perf_resource_lock); 5392 spin_unlock(&perf_resource_lock);
5385
5386 hw_perf_event_setup(cpu);
5387} 5393}
5388 5394
5389#ifdef CONFIG_HOTPLUG_CPU 5395#ifdef CONFIG_HOTPLUG_CPU
@@ -5423,20 +5429,11 @@ perf_cpu_notify(struct notifier_block *self, unsigned long action, void *hcpu)
5423 perf_event_init_cpu(cpu); 5429 perf_event_init_cpu(cpu);
5424 break; 5430 break;
5425 5431
5426 case CPU_ONLINE:
5427 case CPU_ONLINE_FROZEN:
5428 hw_perf_event_setup_online(cpu);
5429 break;
5430
5431 case CPU_DOWN_PREPARE: 5432 case CPU_DOWN_PREPARE:
5432 case CPU_DOWN_PREPARE_FROZEN: 5433 case CPU_DOWN_PREPARE_FROZEN:
5433 perf_event_exit_cpu(cpu); 5434 perf_event_exit_cpu(cpu);
5434 break; 5435 break;
5435 5436
5436 case CPU_DEAD:
5437 hw_perf_event_setup_offline(cpu);
5438 break;
5439
5440 default: 5437 default:
5441 break; 5438 break;
5442 } 5439 }
@@ -5454,6 +5451,7 @@ static struct notifier_block __cpuinitdata perf_cpu_nb = {
5454 5451
5455void __init perf_event_init(void) 5452void __init perf_event_init(void)
5456{ 5453{
5454 perf_event_init_all_cpus();
5457 perf_cpu_notify(&perf_cpu_nb, (unsigned long)CPU_UP_PREPARE, 5455 perf_cpu_notify(&perf_cpu_nb, (unsigned long)CPU_UP_PREPARE,
5458 (void *)(long)smp_processor_id()); 5456 (void *)(long)smp_processor_id());
5459 perf_cpu_notify(&perf_cpu_nb, (unsigned long)CPU_ONLINE, 5457 perf_cpu_notify(&perf_cpu_nb, (unsigned long)CPU_ONLINE,
diff --git a/kernel/trace/Makefile b/kernel/trace/Makefile
index d00c6fe23f54..78edc6490038 100644
--- a/kernel/trace/Makefile
+++ b/kernel/trace/Makefile
@@ -52,7 +52,7 @@ obj-$(CONFIG_EVENT_TRACING) += trace_events.o
52obj-$(CONFIG_EVENT_TRACING) += trace_export.o 52obj-$(CONFIG_EVENT_TRACING) += trace_export.o
53obj-$(CONFIG_FTRACE_SYSCALLS) += trace_syscalls.o 53obj-$(CONFIG_FTRACE_SYSCALLS) += trace_syscalls.o
54ifeq ($(CONFIG_PERF_EVENTS),y) 54ifeq ($(CONFIG_PERF_EVENTS),y)
55obj-$(CONFIG_EVENT_TRACING) += trace_event_profile.o 55obj-$(CONFIG_EVENT_TRACING) += trace_event_perf.o
56endif 56endif
57obj-$(CONFIG_EVENT_TRACING) += trace_events_filter.o 57obj-$(CONFIG_EVENT_TRACING) += trace_events_filter.o
58obj-$(CONFIG_KPROBE_EVENT) += trace_kprobe.o 58obj-$(CONFIG_KPROBE_EVENT) += trace_kprobe.o
diff --git a/kernel/trace/trace_event_profile.c b/kernel/trace/trace_event_perf.c
index c1cc3ab633de..81f691eb3a30 100644
--- a/kernel/trace/trace_event_profile.c
+++ b/kernel/trace/trace_event_perf.c
@@ -1,32 +1,36 @@
1/* 1/*
2 * trace event based perf counter profiling 2 * trace event based perf event profiling/tracing
3 * 3 *
4 * Copyright (C) 2009 Red Hat Inc, Peter Zijlstra <pzijlstr@redhat.com> 4 * Copyright (C) 2009 Red Hat Inc, Peter Zijlstra <pzijlstr@redhat.com>
5 * 5 * Copyright (C) 2009-2010 Frederic Weisbecker <fweisbec@gmail.com>
6 */ 6 */
7 7
8#include <linux/module.h> 8#include <linux/module.h>
9#include <linux/kprobes.h> 9#include <linux/kprobes.h>
10#include "trace.h" 10#include "trace.h"
11 11
12DEFINE_PER_CPU(struct pt_regs, perf_trace_regs);
13EXPORT_PER_CPU_SYMBOL_GPL(perf_trace_regs);
14
15EXPORT_SYMBOL_GPL(perf_arch_fetch_caller_regs);
12 16
13static char *perf_trace_buf; 17static char *perf_trace_buf;
14static char *perf_trace_buf_nmi; 18static char *perf_trace_buf_nmi;
15 19
16typedef typeof(char [FTRACE_MAX_PROFILE_SIZE]) perf_trace_t ; 20typedef typeof(char [PERF_MAX_TRACE_SIZE]) perf_trace_t ;
17 21
18/* Count the events in use (per event id, not per instance) */ 22/* Count the events in use (per event id, not per instance) */
19static int total_profile_count; 23static int total_ref_count;
20 24
21static int ftrace_profile_enable_event(struct ftrace_event_call *event) 25static int perf_trace_event_enable(struct ftrace_event_call *event)
22{ 26{
23 char *buf; 27 char *buf;
24 int ret = -ENOMEM; 28 int ret = -ENOMEM;
25 29
26 if (event->profile_count++ > 0) 30 if (event->perf_refcount++ > 0)
27 return 0; 31 return 0;
28 32
29 if (!total_profile_count) { 33 if (!total_ref_count) {
30 buf = (char *)alloc_percpu(perf_trace_t); 34 buf = (char *)alloc_percpu(perf_trace_t);
31 if (!buf) 35 if (!buf)
32 goto fail_buf; 36 goto fail_buf;
@@ -40,35 +44,35 @@ static int ftrace_profile_enable_event(struct ftrace_event_call *event)
40 rcu_assign_pointer(perf_trace_buf_nmi, buf); 44 rcu_assign_pointer(perf_trace_buf_nmi, buf);
41 } 45 }
42 46
43 ret = event->profile_enable(event); 47 ret = event->perf_event_enable(event);
44 if (!ret) { 48 if (!ret) {
45 total_profile_count++; 49 total_ref_count++;
46 return 0; 50 return 0;
47 } 51 }
48 52
49fail_buf_nmi: 53fail_buf_nmi:
50 if (!total_profile_count) { 54 if (!total_ref_count) {
51 free_percpu(perf_trace_buf_nmi); 55 free_percpu(perf_trace_buf_nmi);
52 free_percpu(perf_trace_buf); 56 free_percpu(perf_trace_buf);
53 perf_trace_buf_nmi = NULL; 57 perf_trace_buf_nmi = NULL;
54 perf_trace_buf = NULL; 58 perf_trace_buf = NULL;
55 } 59 }
56fail_buf: 60fail_buf:
57 event->profile_count--; 61 event->perf_refcount--;
58 62
59 return ret; 63 return ret;
60} 64}
61 65
62int ftrace_profile_enable(int event_id) 66int perf_trace_enable(int event_id)
63{ 67{
64 struct ftrace_event_call *event; 68 struct ftrace_event_call *event;
65 int ret = -EINVAL; 69 int ret = -EINVAL;
66 70
67 mutex_lock(&event_mutex); 71 mutex_lock(&event_mutex);
68 list_for_each_entry(event, &ftrace_events, list) { 72 list_for_each_entry(event, &ftrace_events, list) {
69 if (event->id == event_id && event->profile_enable && 73 if (event->id == event_id && event->perf_event_enable &&
70 try_module_get(event->mod)) { 74 try_module_get(event->mod)) {
71 ret = ftrace_profile_enable_event(event); 75 ret = perf_trace_event_enable(event);
72 break; 76 break;
73 } 77 }
74 } 78 }
@@ -77,16 +81,16 @@ int ftrace_profile_enable(int event_id)
77 return ret; 81 return ret;
78} 82}
79 83
80static void ftrace_profile_disable_event(struct ftrace_event_call *event) 84static void perf_trace_event_disable(struct ftrace_event_call *event)
81{ 85{
82 char *buf, *nmi_buf; 86 char *buf, *nmi_buf;
83 87
84 if (--event->profile_count > 0) 88 if (--event->perf_refcount > 0)
85 return; 89 return;
86 90
87 event->profile_disable(event); 91 event->perf_event_disable(event);
88 92
89 if (!--total_profile_count) { 93 if (!--total_ref_count) {
90 buf = perf_trace_buf; 94 buf = perf_trace_buf;
91 rcu_assign_pointer(perf_trace_buf, NULL); 95 rcu_assign_pointer(perf_trace_buf, NULL);
92 96
@@ -104,14 +108,14 @@ static void ftrace_profile_disable_event(struct ftrace_event_call *event)
104 } 108 }
105} 109}
106 110
107void ftrace_profile_disable(int event_id) 111void perf_trace_disable(int event_id)
108{ 112{
109 struct ftrace_event_call *event; 113 struct ftrace_event_call *event;
110 114
111 mutex_lock(&event_mutex); 115 mutex_lock(&event_mutex);
112 list_for_each_entry(event, &ftrace_events, list) { 116 list_for_each_entry(event, &ftrace_events, list) {
113 if (event->id == event_id) { 117 if (event->id == event_id) {
114 ftrace_profile_disable_event(event); 118 perf_trace_event_disable(event);
115 module_put(event->mod); 119 module_put(event->mod);
116 break; 120 break;
117 } 121 }
@@ -119,8 +123,8 @@ void ftrace_profile_disable(int event_id)
119 mutex_unlock(&event_mutex); 123 mutex_unlock(&event_mutex);
120} 124}
121 125
122__kprobes void *ftrace_perf_buf_prepare(int size, unsigned short type, 126__kprobes void *perf_trace_buf_prepare(int size, unsigned short type,
123 int *rctxp, unsigned long *irq_flags) 127 int *rctxp, unsigned long *irq_flags)
124{ 128{
125 struct trace_entry *entry; 129 struct trace_entry *entry;
126 char *trace_buf, *raw_data; 130 char *trace_buf, *raw_data;
@@ -161,4 +165,4 @@ err_recursion:
161 local_irq_restore(*irq_flags); 165 local_irq_restore(*irq_flags);
162 return NULL; 166 return NULL;
163} 167}
164EXPORT_SYMBOL_GPL(ftrace_perf_buf_prepare); 168EXPORT_SYMBOL_GPL(perf_trace_buf_prepare);
diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c
index 3f972ad98d04..beab8bf2f310 100644
--- a/kernel/trace/trace_events.c
+++ b/kernel/trace/trace_events.c
@@ -938,7 +938,7 @@ event_create_dir(struct ftrace_event_call *call, struct dentry *d_events,
938 trace_create_file("enable", 0644, call->dir, call, 938 trace_create_file("enable", 0644, call->dir, call,
939 enable); 939 enable);
940 940
941 if (call->id && call->profile_enable) 941 if (call->id && call->perf_event_enable)
942 trace_create_file("id", 0444, call->dir, call, 942 trace_create_file("id", 0444, call->dir, call,
943 id); 943 id);
944 944
diff --git a/kernel/trace/trace_kprobe.c b/kernel/trace/trace_kprobe.c
index 505c92273b1a..1251e367bae9 100644
--- a/kernel/trace/trace_kprobe.c
+++ b/kernel/trace/trace_kprobe.c
@@ -1214,7 +1214,7 @@ static int set_print_fmt(struct trace_probe *tp)
1214#ifdef CONFIG_PERF_EVENTS 1214#ifdef CONFIG_PERF_EVENTS
1215 1215
1216/* Kprobe profile handler */ 1216/* Kprobe profile handler */
1217static __kprobes void kprobe_profile_func(struct kprobe *kp, 1217static __kprobes void kprobe_perf_func(struct kprobe *kp,
1218 struct pt_regs *regs) 1218 struct pt_regs *regs)
1219{ 1219{
1220 struct trace_probe *tp = container_of(kp, struct trace_probe, rp.kp); 1220 struct trace_probe *tp = container_of(kp, struct trace_probe, rp.kp);
@@ -1227,11 +1227,11 @@ static __kprobes void kprobe_profile_func(struct kprobe *kp,
1227 __size = SIZEOF_KPROBE_TRACE_ENTRY(tp->nr_args); 1227 __size = SIZEOF_KPROBE_TRACE_ENTRY(tp->nr_args);
1228 size = ALIGN(__size + sizeof(u32), sizeof(u64)); 1228 size = ALIGN(__size + sizeof(u32), sizeof(u64));
1229 size -= sizeof(u32); 1229 size -= sizeof(u32);
1230 if (WARN_ONCE(size > FTRACE_MAX_PROFILE_SIZE, 1230 if (WARN_ONCE(size > PERF_MAX_TRACE_SIZE,
1231 "profile buffer not large enough")) 1231 "profile buffer not large enough"))
1232 return; 1232 return;
1233 1233
1234 entry = ftrace_perf_buf_prepare(size, call->id, &rctx, &irq_flags); 1234 entry = perf_trace_buf_prepare(size, call->id, &rctx, &irq_flags);
1235 if (!entry) 1235 if (!entry)
1236 return; 1236 return;
1237 1237
@@ -1240,11 +1240,11 @@ static __kprobes void kprobe_profile_func(struct kprobe *kp,
1240 for (i = 0; i < tp->nr_args; i++) 1240 for (i = 0; i < tp->nr_args; i++)
1241 entry->args[i] = call_fetch(&tp->args[i].fetch, regs); 1241 entry->args[i] = call_fetch(&tp->args[i].fetch, regs);
1242 1242
1243 ftrace_perf_buf_submit(entry, size, rctx, entry->ip, 1, irq_flags); 1243 perf_trace_buf_submit(entry, size, rctx, entry->ip, 1, irq_flags, regs);
1244} 1244}
1245 1245
1246/* Kretprobe profile handler */ 1246/* Kretprobe profile handler */
1247static __kprobes void kretprobe_profile_func(struct kretprobe_instance *ri, 1247static __kprobes void kretprobe_perf_func(struct kretprobe_instance *ri,
1248 struct pt_regs *regs) 1248 struct pt_regs *regs)
1249{ 1249{
1250 struct trace_probe *tp = container_of(ri->rp, struct trace_probe, rp); 1250 struct trace_probe *tp = container_of(ri->rp, struct trace_probe, rp);
@@ -1257,11 +1257,11 @@ static __kprobes void kretprobe_profile_func(struct kretprobe_instance *ri,
1257 __size = SIZEOF_KRETPROBE_TRACE_ENTRY(tp->nr_args); 1257 __size = SIZEOF_KRETPROBE_TRACE_ENTRY(tp->nr_args);
1258 size = ALIGN(__size + sizeof(u32), sizeof(u64)); 1258 size = ALIGN(__size + sizeof(u32), sizeof(u64));
1259 size -= sizeof(u32); 1259 size -= sizeof(u32);
1260 if (WARN_ONCE(size > FTRACE_MAX_PROFILE_SIZE, 1260 if (WARN_ONCE(size > PERF_MAX_TRACE_SIZE,
1261 "profile buffer not large enough")) 1261 "profile buffer not large enough"))
1262 return; 1262 return;
1263 1263
1264 entry = ftrace_perf_buf_prepare(size, call->id, &rctx, &irq_flags); 1264 entry = perf_trace_buf_prepare(size, call->id, &rctx, &irq_flags);
1265 if (!entry) 1265 if (!entry)
1266 return; 1266 return;
1267 1267
@@ -1271,10 +1271,11 @@ static __kprobes void kretprobe_profile_func(struct kretprobe_instance *ri,
1271 for (i = 0; i < tp->nr_args; i++) 1271 for (i = 0; i < tp->nr_args; i++)
1272 entry->args[i] = call_fetch(&tp->args[i].fetch, regs); 1272 entry->args[i] = call_fetch(&tp->args[i].fetch, regs);
1273 1273
1274 ftrace_perf_buf_submit(entry, size, rctx, entry->ret_ip, 1, irq_flags); 1274 perf_trace_buf_submit(entry, size, rctx, entry->ret_ip, 1,
1275 irq_flags, regs);
1275} 1276}
1276 1277
1277static int probe_profile_enable(struct ftrace_event_call *call) 1278static int probe_perf_enable(struct ftrace_event_call *call)
1278{ 1279{
1279 struct trace_probe *tp = (struct trace_probe *)call->data; 1280 struct trace_probe *tp = (struct trace_probe *)call->data;
1280 1281
@@ -1286,7 +1287,7 @@ static int probe_profile_enable(struct ftrace_event_call *call)
1286 return enable_kprobe(&tp->rp.kp); 1287 return enable_kprobe(&tp->rp.kp);
1287} 1288}
1288 1289
1289static void probe_profile_disable(struct ftrace_event_call *call) 1290static void probe_perf_disable(struct ftrace_event_call *call)
1290{ 1291{
1291 struct trace_probe *tp = (struct trace_probe *)call->data; 1292 struct trace_probe *tp = (struct trace_probe *)call->data;
1292 1293
@@ -1311,7 +1312,7 @@ int kprobe_dispatcher(struct kprobe *kp, struct pt_regs *regs)
1311 kprobe_trace_func(kp, regs); 1312 kprobe_trace_func(kp, regs);
1312#ifdef CONFIG_PERF_EVENTS 1313#ifdef CONFIG_PERF_EVENTS
1313 if (tp->flags & TP_FLAG_PROFILE) 1314 if (tp->flags & TP_FLAG_PROFILE)
1314 kprobe_profile_func(kp, regs); 1315 kprobe_perf_func(kp, regs);
1315#endif 1316#endif
1316 return 0; /* We don't tweek kernel, so just return 0 */ 1317 return 0; /* We don't tweek kernel, so just return 0 */
1317} 1318}
@@ -1325,7 +1326,7 @@ int kretprobe_dispatcher(struct kretprobe_instance *ri, struct pt_regs *regs)
1325 kretprobe_trace_func(ri, regs); 1326 kretprobe_trace_func(ri, regs);
1326#ifdef CONFIG_PERF_EVENTS 1327#ifdef CONFIG_PERF_EVENTS
1327 if (tp->flags & TP_FLAG_PROFILE) 1328 if (tp->flags & TP_FLAG_PROFILE)
1328 kretprobe_profile_func(ri, regs); 1329 kretprobe_perf_func(ri, regs);
1329#endif 1330#endif
1330 return 0; /* We don't tweek kernel, so just return 0 */ 1331 return 0; /* We don't tweek kernel, so just return 0 */
1331} 1332}
@@ -1358,8 +1359,8 @@ static int register_probe_event(struct trace_probe *tp)
1358 call->unregfunc = probe_event_disable; 1359 call->unregfunc = probe_event_disable;
1359 1360
1360#ifdef CONFIG_PERF_EVENTS 1361#ifdef CONFIG_PERF_EVENTS
1361 call->profile_enable = probe_profile_enable; 1362 call->perf_event_enable = probe_perf_enable;
1362 call->profile_disable = probe_profile_disable; 1363 call->perf_event_disable = probe_perf_disable;
1363#endif 1364#endif
1364 call->data = tp; 1365 call->data = tp;
1365 ret = trace_add_event_call(call); 1366 ret = trace_add_event_call(call);
diff --git a/kernel/trace/trace_syscalls.c b/kernel/trace/trace_syscalls.c
index cba47d7935cc..33c2a5b769dc 100644
--- a/kernel/trace/trace_syscalls.c
+++ b/kernel/trace/trace_syscalls.c
@@ -428,12 +428,12 @@ core_initcall(init_ftrace_syscalls);
428 428
429#ifdef CONFIG_PERF_EVENTS 429#ifdef CONFIG_PERF_EVENTS
430 430
431static DECLARE_BITMAP(enabled_prof_enter_syscalls, NR_syscalls); 431static DECLARE_BITMAP(enabled_perf_enter_syscalls, NR_syscalls);
432static DECLARE_BITMAP(enabled_prof_exit_syscalls, NR_syscalls); 432static DECLARE_BITMAP(enabled_perf_exit_syscalls, NR_syscalls);
433static int sys_prof_refcount_enter; 433static int sys_perf_refcount_enter;
434static int sys_prof_refcount_exit; 434static int sys_perf_refcount_exit;
435 435
436static void prof_syscall_enter(struct pt_regs *regs, long id) 436static void perf_syscall_enter(struct pt_regs *regs, long id)
437{ 437{
438 struct syscall_metadata *sys_data; 438 struct syscall_metadata *sys_data;
439 struct syscall_trace_enter *rec; 439 struct syscall_trace_enter *rec;
@@ -443,7 +443,7 @@ static void prof_syscall_enter(struct pt_regs *regs, long id)
443 int size; 443 int size;
444 444
445 syscall_nr = syscall_get_nr(current, regs); 445 syscall_nr = syscall_get_nr(current, regs);
446 if (!test_bit(syscall_nr, enabled_prof_enter_syscalls)) 446 if (!test_bit(syscall_nr, enabled_perf_enter_syscalls))
447 return; 447 return;
448 448
449 sys_data = syscall_nr_to_meta(syscall_nr); 449 sys_data = syscall_nr_to_meta(syscall_nr);
@@ -455,11 +455,11 @@ static void prof_syscall_enter(struct pt_regs *regs, long id)
455 size = ALIGN(size + sizeof(u32), sizeof(u64)); 455 size = ALIGN(size + sizeof(u32), sizeof(u64));
456 size -= sizeof(u32); 456 size -= sizeof(u32);
457 457
458 if (WARN_ONCE(size > FTRACE_MAX_PROFILE_SIZE, 458 if (WARN_ONCE(size > PERF_MAX_TRACE_SIZE,
459 "profile buffer not large enough")) 459 "perf buffer not large enough"))
460 return; 460 return;
461 461
462 rec = (struct syscall_trace_enter *)ftrace_perf_buf_prepare(size, 462 rec = (struct syscall_trace_enter *)perf_trace_buf_prepare(size,
463 sys_data->enter_event->id, &rctx, &flags); 463 sys_data->enter_event->id, &rctx, &flags);
464 if (!rec) 464 if (!rec)
465 return; 465 return;
@@ -467,10 +467,10 @@ static void prof_syscall_enter(struct pt_regs *regs, long id)
467 rec->nr = syscall_nr; 467 rec->nr = syscall_nr;
468 syscall_get_arguments(current, regs, 0, sys_data->nb_args, 468 syscall_get_arguments(current, regs, 0, sys_data->nb_args,
469 (unsigned long *)&rec->args); 469 (unsigned long *)&rec->args);
470 ftrace_perf_buf_submit(rec, size, rctx, 0, 1, flags); 470 perf_trace_buf_submit(rec, size, rctx, 0, 1, flags, regs);
471} 471}
472 472
473int prof_sysenter_enable(struct ftrace_event_call *call) 473int perf_sysenter_enable(struct ftrace_event_call *call)
474{ 474{
475 int ret = 0; 475 int ret = 0;
476 int num; 476 int num;
@@ -478,34 +478,34 @@ int prof_sysenter_enable(struct ftrace_event_call *call)
478 num = ((struct syscall_metadata *)call->data)->syscall_nr; 478 num = ((struct syscall_metadata *)call->data)->syscall_nr;
479 479
480 mutex_lock(&syscall_trace_lock); 480 mutex_lock(&syscall_trace_lock);
481 if (!sys_prof_refcount_enter) 481 if (!sys_perf_refcount_enter)
482 ret = register_trace_sys_enter(prof_syscall_enter); 482 ret = register_trace_sys_enter(perf_syscall_enter);
483 if (ret) { 483 if (ret) {
484 pr_info("event trace: Could not activate" 484 pr_info("event trace: Could not activate"
485 "syscall entry trace point"); 485 "syscall entry trace point");
486 } else { 486 } else {
487 set_bit(num, enabled_prof_enter_syscalls); 487 set_bit(num, enabled_perf_enter_syscalls);
488 sys_prof_refcount_enter++; 488 sys_perf_refcount_enter++;
489 } 489 }
490 mutex_unlock(&syscall_trace_lock); 490 mutex_unlock(&syscall_trace_lock);
491 return ret; 491 return ret;
492} 492}
493 493
494void prof_sysenter_disable(struct ftrace_event_call *call) 494void perf_sysenter_disable(struct ftrace_event_call *call)
495{ 495{
496 int num; 496 int num;
497 497
498 num = ((struct syscall_metadata *)call->data)->syscall_nr; 498 num = ((struct syscall_metadata *)call->data)->syscall_nr;
499 499
500 mutex_lock(&syscall_trace_lock); 500 mutex_lock(&syscall_trace_lock);
501 sys_prof_refcount_enter--; 501 sys_perf_refcount_enter--;
502 clear_bit(num, enabled_prof_enter_syscalls); 502 clear_bit(num, enabled_perf_enter_syscalls);
503 if (!sys_prof_refcount_enter) 503 if (!sys_perf_refcount_enter)
504 unregister_trace_sys_enter(prof_syscall_enter); 504 unregister_trace_sys_enter(perf_syscall_enter);
505 mutex_unlock(&syscall_trace_lock); 505 mutex_unlock(&syscall_trace_lock);
506} 506}
507 507
508static void prof_syscall_exit(struct pt_regs *regs, long ret) 508static void perf_syscall_exit(struct pt_regs *regs, long ret)
509{ 509{
510 struct syscall_metadata *sys_data; 510 struct syscall_metadata *sys_data;
511 struct syscall_trace_exit *rec; 511 struct syscall_trace_exit *rec;
@@ -515,7 +515,7 @@ static void prof_syscall_exit(struct pt_regs *regs, long ret)
515 int size; 515 int size;
516 516
517 syscall_nr = syscall_get_nr(current, regs); 517 syscall_nr = syscall_get_nr(current, regs);
518 if (!test_bit(syscall_nr, enabled_prof_exit_syscalls)) 518 if (!test_bit(syscall_nr, enabled_perf_exit_syscalls))
519 return; 519 return;
520 520
521 sys_data = syscall_nr_to_meta(syscall_nr); 521 sys_data = syscall_nr_to_meta(syscall_nr);
@@ -530,11 +530,11 @@ static void prof_syscall_exit(struct pt_regs *regs, long ret)
530 * Impossible, but be paranoid with the future 530 * Impossible, but be paranoid with the future
531 * How to put this check outside runtime? 531 * How to put this check outside runtime?
532 */ 532 */
533 if (WARN_ONCE(size > FTRACE_MAX_PROFILE_SIZE, 533 if (WARN_ONCE(size > PERF_MAX_TRACE_SIZE,
534 "exit event has grown above profile buffer size")) 534 "exit event has grown above perf buffer size"))
535 return; 535 return;
536 536
537 rec = (struct syscall_trace_exit *)ftrace_perf_buf_prepare(size, 537 rec = (struct syscall_trace_exit *)perf_trace_buf_prepare(size,
538 sys_data->exit_event->id, &rctx, &flags); 538 sys_data->exit_event->id, &rctx, &flags);
539 if (!rec) 539 if (!rec)
540 return; 540 return;
@@ -542,10 +542,10 @@ static void prof_syscall_exit(struct pt_regs *regs, long ret)
542 rec->nr = syscall_nr; 542 rec->nr = syscall_nr;
543 rec->ret = syscall_get_return_value(current, regs); 543 rec->ret = syscall_get_return_value(current, regs);
544 544
545 ftrace_perf_buf_submit(rec, size, rctx, 0, 1, flags); 545 perf_trace_buf_submit(rec, size, rctx, 0, 1, flags, regs);
546} 546}
547 547
548int prof_sysexit_enable(struct ftrace_event_call *call) 548int perf_sysexit_enable(struct ftrace_event_call *call)
549{ 549{
550 int ret = 0; 550 int ret = 0;
551 int num; 551 int num;
@@ -553,30 +553,30 @@ int prof_sysexit_enable(struct ftrace_event_call *call)
553 num = ((struct syscall_metadata *)call->data)->syscall_nr; 553 num = ((struct syscall_metadata *)call->data)->syscall_nr;
554 554
555 mutex_lock(&syscall_trace_lock); 555 mutex_lock(&syscall_trace_lock);
556 if (!sys_prof_refcount_exit) 556 if (!sys_perf_refcount_exit)
557 ret = register_trace_sys_exit(prof_syscall_exit); 557 ret = register_trace_sys_exit(perf_syscall_exit);
558 if (ret) { 558 if (ret) {
559 pr_info("event trace: Could not activate" 559 pr_info("event trace: Could not activate"
560 "syscall exit trace point"); 560 "syscall exit trace point");
561 } else { 561 } else {
562 set_bit(num, enabled_prof_exit_syscalls); 562 set_bit(num, enabled_perf_exit_syscalls);
563 sys_prof_refcount_exit++; 563 sys_perf_refcount_exit++;
564 } 564 }
565 mutex_unlock(&syscall_trace_lock); 565 mutex_unlock(&syscall_trace_lock);
566 return ret; 566 return ret;
567} 567}
568 568
569void prof_sysexit_disable(struct ftrace_event_call *call) 569void perf_sysexit_disable(struct ftrace_event_call *call)
570{ 570{
571 int num; 571 int num;
572 572
573 num = ((struct syscall_metadata *)call->data)->syscall_nr; 573 num = ((struct syscall_metadata *)call->data)->syscall_nr;
574 574
575 mutex_lock(&syscall_trace_lock); 575 mutex_lock(&syscall_trace_lock);
576 sys_prof_refcount_exit--; 576 sys_perf_refcount_exit--;
577 clear_bit(num, enabled_prof_exit_syscalls); 577 clear_bit(num, enabled_perf_exit_syscalls);
578 if (!sys_prof_refcount_exit) 578 if (!sys_perf_refcount_exit)
579 unregister_trace_sys_exit(prof_syscall_exit); 579 unregister_trace_sys_exit(perf_syscall_exit);
580 mutex_unlock(&syscall_trace_lock); 580 mutex_unlock(&syscall_trace_lock);
581} 581}
582 582
diff --git a/mm/page_cgroup.c b/mm/page_cgroup.c
index 3dd88539a0e6..6c0081441a32 100644
--- a/mm/page_cgroup.c
+++ b/mm/page_cgroup.c
@@ -284,6 +284,7 @@ static DEFINE_MUTEX(swap_cgroup_mutex);
284struct swap_cgroup_ctrl { 284struct swap_cgroup_ctrl {
285 struct page **map; 285 struct page **map;
286 unsigned long length; 286 unsigned long length;
287 spinlock_t lock;
287}; 288};
288 289
289struct swap_cgroup_ctrl swap_cgroup_ctrl[MAX_SWAPFILES]; 290struct swap_cgroup_ctrl swap_cgroup_ctrl[MAX_SWAPFILES];
@@ -353,16 +354,22 @@ unsigned short swap_cgroup_cmpxchg(swp_entry_t ent,
353 struct swap_cgroup_ctrl *ctrl; 354 struct swap_cgroup_ctrl *ctrl;
354 struct page *mappage; 355 struct page *mappage;
355 struct swap_cgroup *sc; 356 struct swap_cgroup *sc;
357 unsigned long flags;
358 unsigned short retval;
356 359
357 ctrl = &swap_cgroup_ctrl[type]; 360 ctrl = &swap_cgroup_ctrl[type];
358 361
359 mappage = ctrl->map[idx]; 362 mappage = ctrl->map[idx];
360 sc = page_address(mappage); 363 sc = page_address(mappage);
361 sc += pos; 364 sc += pos;
362 if (cmpxchg(&sc->id, old, new) == old) 365 spin_lock_irqsave(&ctrl->lock, flags);
363 return old; 366 retval = sc->id;
367 if (retval == old)
368 sc->id = new;
364 else 369 else
365 return 0; 370 retval = 0;
371 spin_unlock_irqrestore(&ctrl->lock, flags);
372 return retval;
366} 373}
367 374
368/** 375/**
@@ -383,13 +390,17 @@ unsigned short swap_cgroup_record(swp_entry_t ent, unsigned short id)
383 struct page *mappage; 390 struct page *mappage;
384 struct swap_cgroup *sc; 391 struct swap_cgroup *sc;
385 unsigned short old; 392 unsigned short old;
393 unsigned long flags;
386 394
387 ctrl = &swap_cgroup_ctrl[type]; 395 ctrl = &swap_cgroup_ctrl[type];
388 396
389 mappage = ctrl->map[idx]; 397 mappage = ctrl->map[idx];
390 sc = page_address(mappage); 398 sc = page_address(mappage);
391 sc += pos; 399 sc += pos;
392 old = xchg(&sc->id, id); 400 spin_lock_irqsave(&ctrl->lock, flags);
401 old = sc->id;
402 sc->id = id;
403 spin_unlock_irqrestore(&ctrl->lock, flags);
393 404
394 return old; 405 return old;
395} 406}
@@ -441,6 +452,7 @@ int swap_cgroup_swapon(int type, unsigned long max_pages)
441 mutex_lock(&swap_cgroup_mutex); 452 mutex_lock(&swap_cgroup_mutex);
442 ctrl->length = length; 453 ctrl->length = length;
443 ctrl->map = array; 454 ctrl->map = array;
455 spin_lock_init(&ctrl->lock);
444 if (swap_cgroup_prepare(type)) { 456 if (swap_cgroup_prepare(type)) {
445 /* memory shortage */ 457 /* memory shortage */
446 ctrl->map = NULL; 458 ctrl->map = NULL;
diff --git a/net/8021q/vlan.c b/net/8021q/vlan.c
index bd33f02013ec..7b13206185ba 100644
--- a/net/8021q/vlan.c
+++ b/net/8021q/vlan.c
@@ -378,6 +378,8 @@ static void vlan_transfer_features(struct net_device *dev,
378#if defined(CONFIG_FCOE) || defined(CONFIG_FCOE_MODULE) 378#if defined(CONFIG_FCOE) || defined(CONFIG_FCOE_MODULE)
379 vlandev->fcoe_ddp_xid = dev->fcoe_ddp_xid; 379 vlandev->fcoe_ddp_xid = dev->fcoe_ddp_xid;
380#endif 380#endif
381 vlandev->real_num_tx_queues = dev->real_num_tx_queues;
382 BUG_ON(vlandev->real_num_tx_queues > vlandev->num_tx_queues);
381 383
382 if (old_features != vlandev->features) 384 if (old_features != vlandev->features)
383 netdev_features_change(vlandev); 385 netdev_features_change(vlandev);
diff --git a/net/8021q/vlan_dev.c b/net/8021q/vlan_dev.c
index 7f4d247237e4..f7d2fe431ee0 100644
--- a/net/8021q/vlan_dev.c
+++ b/net/8021q/vlan_dev.c
@@ -361,6 +361,14 @@ static netdev_tx_t vlan_dev_hwaccel_hard_start_xmit(struct sk_buff *skb,
361 return ret; 361 return ret;
362} 362}
363 363
364static u16 vlan_dev_select_queue(struct net_device *dev, struct sk_buff *skb)
365{
366 struct net_device *rdev = vlan_dev_info(dev)->real_dev;
367 const struct net_device_ops *ops = rdev->netdev_ops;
368
369 return ops->ndo_select_queue(rdev, skb);
370}
371
364static int vlan_dev_change_mtu(struct net_device *dev, int new_mtu) 372static int vlan_dev_change_mtu(struct net_device *dev, int new_mtu)
365{ 373{
366 /* TODO: gotta make sure the underlying layer can handle it, 374 /* TODO: gotta make sure the underlying layer can handle it,
@@ -688,7 +696,8 @@ static const struct header_ops vlan_header_ops = {
688 .parse = eth_header_parse, 696 .parse = eth_header_parse,
689}; 697};
690 698
691static const struct net_device_ops vlan_netdev_ops, vlan_netdev_accel_ops; 699static const struct net_device_ops vlan_netdev_ops, vlan_netdev_accel_ops,
700 vlan_netdev_ops_sq, vlan_netdev_accel_ops_sq;
692 701
693static int vlan_dev_init(struct net_device *dev) 702static int vlan_dev_init(struct net_device *dev)
694{ 703{
@@ -722,11 +731,17 @@ static int vlan_dev_init(struct net_device *dev)
722 if (real_dev->features & NETIF_F_HW_VLAN_TX) { 731 if (real_dev->features & NETIF_F_HW_VLAN_TX) {
723 dev->header_ops = real_dev->header_ops; 732 dev->header_ops = real_dev->header_ops;
724 dev->hard_header_len = real_dev->hard_header_len; 733 dev->hard_header_len = real_dev->hard_header_len;
725 dev->netdev_ops = &vlan_netdev_accel_ops; 734 if (real_dev->netdev_ops->ndo_select_queue)
735 dev->netdev_ops = &vlan_netdev_accel_ops_sq;
736 else
737 dev->netdev_ops = &vlan_netdev_accel_ops;
726 } else { 738 } else {
727 dev->header_ops = &vlan_header_ops; 739 dev->header_ops = &vlan_header_ops;
728 dev->hard_header_len = real_dev->hard_header_len + VLAN_HLEN; 740 dev->hard_header_len = real_dev->hard_header_len + VLAN_HLEN;
729 dev->netdev_ops = &vlan_netdev_ops; 741 if (real_dev->netdev_ops->ndo_select_queue)
742 dev->netdev_ops = &vlan_netdev_ops_sq;
743 else
744 dev->netdev_ops = &vlan_netdev_ops;
730 } 745 }
731 746
732 if (is_vlan_dev(real_dev)) 747 if (is_vlan_dev(real_dev))
@@ -865,6 +880,56 @@ static const struct net_device_ops vlan_netdev_accel_ops = {
865#endif 880#endif
866}; 881};
867 882
883static const struct net_device_ops vlan_netdev_ops_sq = {
884 .ndo_select_queue = vlan_dev_select_queue,
885 .ndo_change_mtu = vlan_dev_change_mtu,
886 .ndo_init = vlan_dev_init,
887 .ndo_uninit = vlan_dev_uninit,
888 .ndo_open = vlan_dev_open,
889 .ndo_stop = vlan_dev_stop,
890 .ndo_start_xmit = vlan_dev_hard_start_xmit,
891 .ndo_validate_addr = eth_validate_addr,
892 .ndo_set_mac_address = vlan_dev_set_mac_address,
893 .ndo_set_rx_mode = vlan_dev_set_rx_mode,
894 .ndo_set_multicast_list = vlan_dev_set_rx_mode,
895 .ndo_change_rx_flags = vlan_dev_change_rx_flags,
896 .ndo_do_ioctl = vlan_dev_ioctl,
897 .ndo_neigh_setup = vlan_dev_neigh_setup,
898 .ndo_get_stats = vlan_dev_get_stats,
899#if defined(CONFIG_FCOE) || defined(CONFIG_FCOE_MODULE)
900 .ndo_fcoe_ddp_setup = vlan_dev_fcoe_ddp_setup,
901 .ndo_fcoe_ddp_done = vlan_dev_fcoe_ddp_done,
902 .ndo_fcoe_enable = vlan_dev_fcoe_enable,
903 .ndo_fcoe_disable = vlan_dev_fcoe_disable,
904 .ndo_fcoe_get_wwn = vlan_dev_fcoe_get_wwn,
905#endif
906};
907
908static const struct net_device_ops vlan_netdev_accel_ops_sq = {
909 .ndo_select_queue = vlan_dev_select_queue,
910 .ndo_change_mtu = vlan_dev_change_mtu,
911 .ndo_init = vlan_dev_init,
912 .ndo_uninit = vlan_dev_uninit,
913 .ndo_open = vlan_dev_open,
914 .ndo_stop = vlan_dev_stop,
915 .ndo_start_xmit = vlan_dev_hwaccel_hard_start_xmit,
916 .ndo_validate_addr = eth_validate_addr,
917 .ndo_set_mac_address = vlan_dev_set_mac_address,
918 .ndo_set_rx_mode = vlan_dev_set_rx_mode,
919 .ndo_set_multicast_list = vlan_dev_set_rx_mode,
920 .ndo_change_rx_flags = vlan_dev_change_rx_flags,
921 .ndo_do_ioctl = vlan_dev_ioctl,
922 .ndo_neigh_setup = vlan_dev_neigh_setup,
923 .ndo_get_stats = vlan_dev_get_stats,
924#if defined(CONFIG_FCOE) || defined(CONFIG_FCOE_MODULE)
925 .ndo_fcoe_ddp_setup = vlan_dev_fcoe_ddp_setup,
926 .ndo_fcoe_ddp_done = vlan_dev_fcoe_ddp_done,
927 .ndo_fcoe_enable = vlan_dev_fcoe_enable,
928 .ndo_fcoe_disable = vlan_dev_fcoe_disable,
929 .ndo_fcoe_get_wwn = vlan_dev_fcoe_get_wwn,
930#endif
931};
932
868void vlan_setup(struct net_device *dev) 933void vlan_setup(struct net_device *dev)
869{ 934{
870 ether_setup(dev); 935 ether_setup(dev);
diff --git a/net/bluetooth/hci_sysfs.c b/net/bluetooth/hci_sysfs.c
index cafb55b0cea5..05fd125f74fe 100644
--- a/net/bluetooth/hci_sysfs.c
+++ b/net/bluetooth/hci_sysfs.c
@@ -8,8 +8,7 @@
8#include <net/bluetooth/bluetooth.h> 8#include <net/bluetooth/bluetooth.h>
9#include <net/bluetooth/hci_core.h> 9#include <net/bluetooth/hci_core.h>
10 10
11struct class *bt_class = NULL; 11static struct class *bt_class;
12EXPORT_SYMBOL_GPL(bt_class);
13 12
14struct dentry *bt_debugfs = NULL; 13struct dentry *bt_debugfs = NULL;
15EXPORT_SYMBOL_GPL(bt_debugfs); 14EXPORT_SYMBOL_GPL(bt_debugfs);
diff --git a/net/bluetooth/l2cap.c b/net/bluetooth/l2cap.c
index 4db7ae2fe07d..99d68c34e4f1 100644
--- a/net/bluetooth/l2cap.c
+++ b/net/bluetooth/l2cap.c
@@ -40,6 +40,8 @@
40#include <linux/skbuff.h> 40#include <linux/skbuff.h>
41#include <linux/list.h> 41#include <linux/list.h>
42#include <linux/device.h> 42#include <linux/device.h>
43#include <linux/debugfs.h>
44#include <linux/seq_file.h>
43#include <linux/uaccess.h> 45#include <linux/uaccess.h>
44#include <linux/crc16.h> 46#include <linux/crc16.h>
45#include <net/sock.h> 47#include <net/sock.h>
@@ -1000,7 +1002,8 @@ static int l2cap_sock_connect(struct socket *sock, struct sockaddr *addr, int al
1000 1002
1001 BT_DBG("sk %p", sk); 1003 BT_DBG("sk %p", sk);
1002 1004
1003 if (!addr || addr->sa_family != AF_BLUETOOTH) 1005 if (!addr || alen < sizeof(addr->sa_family) ||
1006 addr->sa_family != AF_BLUETOOTH)
1004 return -EINVAL; 1007 return -EINVAL;
1005 1008
1006 memset(&la, 0, sizeof(la)); 1009 memset(&la, 0, sizeof(la));
@@ -2830,6 +2833,11 @@ static inline int l2cap_config_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr
2830 int len = cmd->len - sizeof(*rsp); 2833 int len = cmd->len - sizeof(*rsp);
2831 char req[64]; 2834 char req[64];
2832 2835
2836 if (len > sizeof(req) - sizeof(struct l2cap_conf_req)) {
2837 l2cap_send_disconn_req(conn, sk);
2838 goto done;
2839 }
2840
2833 /* throw out any old stored conf requests */ 2841 /* throw out any old stored conf requests */
2834 result = L2CAP_CONF_SUCCESS; 2842 result = L2CAP_CONF_SUCCESS;
2835 len = l2cap_parse_conf_rsp(sk, rsp->data, 2843 len = l2cap_parse_conf_rsp(sk, rsp->data,
@@ -3937,31 +3945,42 @@ drop:
3937 return 0; 3945 return 0;
3938} 3946}
3939 3947
3940static ssize_t l2cap_sysfs_show(struct class *dev, 3948static int l2cap_debugfs_show(struct seq_file *f, void *p)
3941 struct class_attribute *attr,
3942 char *buf)
3943{ 3949{
3944 struct sock *sk; 3950 struct sock *sk;
3945 struct hlist_node *node; 3951 struct hlist_node *node;
3946 char *str = buf;
3947 3952
3948 read_lock_bh(&l2cap_sk_list.lock); 3953 read_lock_bh(&l2cap_sk_list.lock);
3949 3954
3950 sk_for_each(sk, node, &l2cap_sk_list.head) { 3955 sk_for_each(sk, node, &l2cap_sk_list.head) {
3951 struct l2cap_pinfo *pi = l2cap_pi(sk); 3956 struct l2cap_pinfo *pi = l2cap_pi(sk);
3952 3957
3953 str += sprintf(str, "%s %s %d %d 0x%4.4x 0x%4.4x %d %d %d\n", 3958 seq_printf(f, "%s %s %d %d 0x%4.4x 0x%4.4x %d %d %d\n",
3954 batostr(&bt_sk(sk)->src), batostr(&bt_sk(sk)->dst), 3959 batostr(&bt_sk(sk)->src),
3955 sk->sk_state, __le16_to_cpu(pi->psm), pi->scid, 3960 batostr(&bt_sk(sk)->dst),
3956 pi->dcid, pi->imtu, pi->omtu, pi->sec_level); 3961 sk->sk_state, __le16_to_cpu(pi->psm),
3962 pi->scid, pi->dcid,
3963 pi->imtu, pi->omtu, pi->sec_level);
3957 } 3964 }
3958 3965
3959 read_unlock_bh(&l2cap_sk_list.lock); 3966 read_unlock_bh(&l2cap_sk_list.lock);
3960 3967
3961 return str - buf; 3968 return 0;
3969}
3970
3971static int l2cap_debugfs_open(struct inode *inode, struct file *file)
3972{
3973 return single_open(file, l2cap_debugfs_show, inode->i_private);
3962} 3974}
3963 3975
3964static CLASS_ATTR(l2cap, S_IRUGO, l2cap_sysfs_show, NULL); 3976static const struct file_operations l2cap_debugfs_fops = {
3977 .open = l2cap_debugfs_open,
3978 .read = seq_read,
3979 .llseek = seq_lseek,
3980 .release = single_release,
3981};
3982
3983static struct dentry *l2cap_debugfs;
3965 3984
3966static const struct proto_ops l2cap_sock_ops = { 3985static const struct proto_ops l2cap_sock_ops = {
3967 .family = PF_BLUETOOTH, 3986 .family = PF_BLUETOOTH,
@@ -4021,8 +4040,12 @@ static int __init l2cap_init(void)
4021 goto error; 4040 goto error;
4022 } 4041 }
4023 4042
4024 if (class_create_file(bt_class, &class_attr_l2cap) < 0) 4043 if (bt_debugfs) {
4025 BT_ERR("Failed to create L2CAP info file"); 4044 l2cap_debugfs = debugfs_create_file("l2cap", 0444,
4045 bt_debugfs, NULL, &l2cap_debugfs_fops);
4046 if (!l2cap_debugfs)
4047 BT_ERR("Failed to create L2CAP debug file");
4048 }
4026 4049
4027 BT_INFO("L2CAP ver %s", VERSION); 4050 BT_INFO("L2CAP ver %s", VERSION);
4028 BT_INFO("L2CAP socket layer initialized"); 4051 BT_INFO("L2CAP socket layer initialized");
@@ -4036,7 +4059,7 @@ error:
4036 4059
4037static void __exit l2cap_exit(void) 4060static void __exit l2cap_exit(void)
4038{ 4061{
4039 class_remove_file(bt_class, &class_attr_l2cap); 4062 debugfs_remove(l2cap_debugfs);
4040 4063
4041 if (bt_sock_unregister(BTPROTO_L2CAP) < 0) 4064 if (bt_sock_unregister(BTPROTO_L2CAP) < 0)
4042 BT_ERR("L2CAP socket unregistration failed"); 4065 BT_ERR("L2CAP socket unregistration failed");
diff --git a/net/bluetooth/rfcomm/core.c b/net/bluetooth/rfcomm/core.c
index db8a68e1a5ba..13f114e8b0f9 100644
--- a/net/bluetooth/rfcomm/core.c
+++ b/net/bluetooth/rfcomm/core.c
@@ -33,6 +33,8 @@
33#include <linux/init.h> 33#include <linux/init.h>
34#include <linux/wait.h> 34#include <linux/wait.h>
35#include <linux/device.h> 35#include <linux/device.h>
36#include <linux/debugfs.h>
37#include <linux/seq_file.h>
36#include <linux/net.h> 38#include <linux/net.h>
37#include <linux/mutex.h> 39#include <linux/mutex.h>
38#include <linux/kthread.h> 40#include <linux/kthread.h>
@@ -2098,13 +2100,10 @@ static struct hci_cb rfcomm_cb = {
2098 .security_cfm = rfcomm_security_cfm 2100 .security_cfm = rfcomm_security_cfm
2099}; 2101};
2100 2102
2101static ssize_t rfcomm_dlc_sysfs_show(struct class *dev, 2103static int rfcomm_dlc_debugfs_show(struct seq_file *f, void *x)
2102 struct class_attribute *attr,
2103 char *buf)
2104{ 2104{
2105 struct rfcomm_session *s; 2105 struct rfcomm_session *s;
2106 struct list_head *pp, *p; 2106 struct list_head *pp, *p;
2107 char *str = buf;
2108 2107
2109 rfcomm_lock(); 2108 rfcomm_lock();
2110 2109
@@ -2114,18 +2113,32 @@ static ssize_t rfcomm_dlc_sysfs_show(struct class *dev,
2114 struct sock *sk = s->sock->sk; 2113 struct sock *sk = s->sock->sk;
2115 struct rfcomm_dlc *d = list_entry(pp, struct rfcomm_dlc, list); 2114 struct rfcomm_dlc *d = list_entry(pp, struct rfcomm_dlc, list);
2116 2115
2117 str += sprintf(str, "%s %s %ld %d %d %d %d\n", 2116 seq_printf(f, "%s %s %ld %d %d %d %d\n",
2118 batostr(&bt_sk(sk)->src), batostr(&bt_sk(sk)->dst), 2117 batostr(&bt_sk(sk)->src),
2119 d->state, d->dlci, d->mtu, d->rx_credits, d->tx_credits); 2118 batostr(&bt_sk(sk)->dst),
2119 d->state, d->dlci, d->mtu,
2120 d->rx_credits, d->tx_credits);
2120 } 2121 }
2121 } 2122 }
2122 2123
2123 rfcomm_unlock(); 2124 rfcomm_unlock();
2124 2125
2125 return (str - buf); 2126 return 0;
2126} 2127}
2127 2128
2128static CLASS_ATTR(rfcomm_dlc, S_IRUGO, rfcomm_dlc_sysfs_show, NULL); 2129static int rfcomm_dlc_debugfs_open(struct inode *inode, struct file *file)
2130{
2131 return single_open(file, rfcomm_dlc_debugfs_show, inode->i_private);
2132}
2133
2134static const struct file_operations rfcomm_dlc_debugfs_fops = {
2135 .open = rfcomm_dlc_debugfs_open,
2136 .read = seq_read,
2137 .llseek = seq_lseek,
2138 .release = single_release,
2139};
2140
2141static struct dentry *rfcomm_dlc_debugfs;
2129 2142
2130/* ---- Initialization ---- */ 2143/* ---- Initialization ---- */
2131static int __init rfcomm_init(void) 2144static int __init rfcomm_init(void)
@@ -2142,8 +2155,12 @@ static int __init rfcomm_init(void)
2142 goto unregister; 2155 goto unregister;
2143 } 2156 }
2144 2157
2145 if (class_create_file(bt_class, &class_attr_rfcomm_dlc) < 0) 2158 if (bt_debugfs) {
2146 BT_ERR("Failed to create RFCOMM info file"); 2159 rfcomm_dlc_debugfs = debugfs_create_file("rfcomm_dlc", 0444,
2160 bt_debugfs, NULL, &rfcomm_dlc_debugfs_fops);
2161 if (!rfcomm_dlc_debugfs)
2162 BT_ERR("Failed to create RFCOMM debug file");
2163 }
2147 2164
2148 err = rfcomm_init_ttys(); 2165 err = rfcomm_init_ttys();
2149 if (err < 0) 2166 if (err < 0)
@@ -2171,7 +2188,7 @@ unregister:
2171 2188
2172static void __exit rfcomm_exit(void) 2189static void __exit rfcomm_exit(void)
2173{ 2190{
2174 class_remove_file(bt_class, &class_attr_rfcomm_dlc); 2191 debugfs_remove(rfcomm_dlc_debugfs);
2175 2192
2176 hci_unregister_cb(&rfcomm_cb); 2193 hci_unregister_cb(&rfcomm_cb);
2177 2194
diff --git a/net/bluetooth/rfcomm/sock.c b/net/bluetooth/rfcomm/sock.c
index ca87d6ac6a20..8ed3c37684fa 100644
--- a/net/bluetooth/rfcomm/sock.c
+++ b/net/bluetooth/rfcomm/sock.c
@@ -40,6 +40,8 @@
40#include <linux/skbuff.h> 40#include <linux/skbuff.h>
41#include <linux/list.h> 41#include <linux/list.h>
42#include <linux/device.h> 42#include <linux/device.h>
43#include <linux/debugfs.h>
44#include <linux/seq_file.h>
43#include <net/sock.h> 45#include <net/sock.h>
44 46
45#include <asm/system.h> 47#include <asm/system.h>
@@ -395,7 +397,8 @@ static int rfcomm_sock_connect(struct socket *sock, struct sockaddr *addr, int a
395 397
396 BT_DBG("sk %p", sk); 398 BT_DBG("sk %p", sk);
397 399
398 if (addr->sa_family != AF_BLUETOOTH || alen < sizeof(struct sockaddr_rc)) 400 if (alen < sizeof(struct sockaddr_rc) ||
401 addr->sa_family != AF_BLUETOOTH)
399 return -EINVAL; 402 return -EINVAL;
400 403
401 lock_sock(sk); 404 lock_sock(sk);
@@ -1061,28 +1064,38 @@ done:
1061 return result; 1064 return result;
1062} 1065}
1063 1066
1064static ssize_t rfcomm_sock_sysfs_show(struct class *dev, 1067static int rfcomm_sock_debugfs_show(struct seq_file *f, void *p)
1065 struct class_attribute *attr,
1066 char *buf)
1067{ 1068{
1068 struct sock *sk; 1069 struct sock *sk;
1069 struct hlist_node *node; 1070 struct hlist_node *node;
1070 char *str = buf;
1071 1071
1072 read_lock_bh(&rfcomm_sk_list.lock); 1072 read_lock_bh(&rfcomm_sk_list.lock);
1073 1073
1074 sk_for_each(sk, node, &rfcomm_sk_list.head) { 1074 sk_for_each(sk, node, &rfcomm_sk_list.head) {
1075 str += sprintf(str, "%s %s %d %d\n", 1075 seq_printf(f, "%s %s %d %d\n",
1076 batostr(&bt_sk(sk)->src), batostr(&bt_sk(sk)->dst), 1076 batostr(&bt_sk(sk)->src),
1077 batostr(&bt_sk(sk)->dst),
1077 sk->sk_state, rfcomm_pi(sk)->channel); 1078 sk->sk_state, rfcomm_pi(sk)->channel);
1078 } 1079 }
1079 1080
1080 read_unlock_bh(&rfcomm_sk_list.lock); 1081 read_unlock_bh(&rfcomm_sk_list.lock);
1081 1082
1082 return (str - buf); 1083 return 0;
1083} 1084}
1084 1085
1085static CLASS_ATTR(rfcomm, S_IRUGO, rfcomm_sock_sysfs_show, NULL); 1086static int rfcomm_sock_debugfs_open(struct inode *inode, struct file *file)
1087{
1088 return single_open(file, rfcomm_sock_debugfs_show, inode->i_private);
1089}
1090
1091static const struct file_operations rfcomm_sock_debugfs_fops = {
1092 .open = rfcomm_sock_debugfs_open,
1093 .read = seq_read,
1094 .llseek = seq_lseek,
1095 .release = single_release,
1096};
1097
1098static struct dentry *rfcomm_sock_debugfs;
1086 1099
1087static const struct proto_ops rfcomm_sock_ops = { 1100static const struct proto_ops rfcomm_sock_ops = {
1088 .family = PF_BLUETOOTH, 1101 .family = PF_BLUETOOTH,
@@ -1122,8 +1135,12 @@ int __init rfcomm_init_sockets(void)
1122 if (err < 0) 1135 if (err < 0)
1123 goto error; 1136 goto error;
1124 1137
1125 if (class_create_file(bt_class, &class_attr_rfcomm) < 0) 1138 if (bt_debugfs) {
1126 BT_ERR("Failed to create RFCOMM info file"); 1139 rfcomm_sock_debugfs = debugfs_create_file("rfcomm", 0444,
1140 bt_debugfs, NULL, &rfcomm_sock_debugfs_fops);
1141 if (!rfcomm_sock_debugfs)
1142 BT_ERR("Failed to create RFCOMM debug file");
1143 }
1127 1144
1128 BT_INFO("RFCOMM socket layer initialized"); 1145 BT_INFO("RFCOMM socket layer initialized");
1129 1146
@@ -1137,7 +1154,7 @@ error:
1137 1154
1138void rfcomm_cleanup_sockets(void) 1155void rfcomm_cleanup_sockets(void)
1139{ 1156{
1140 class_remove_file(bt_class, &class_attr_rfcomm); 1157 debugfs_remove(rfcomm_sock_debugfs);
1141 1158
1142 if (bt_sock_unregister(BTPROTO_RFCOMM) < 0) 1159 if (bt_sock_unregister(BTPROTO_RFCOMM) < 0)
1143 BT_ERR("RFCOMM socket layer unregistration failed"); 1160 BT_ERR("RFCOMM socket layer unregistration failed");
diff --git a/net/bluetooth/sco.c b/net/bluetooth/sco.c
index f93b939539bc..ca6b2ad1c3fc 100644
--- a/net/bluetooth/sco.c
+++ b/net/bluetooth/sco.c
@@ -38,6 +38,8 @@
38#include <linux/socket.h> 38#include <linux/socket.h>
39#include <linux/skbuff.h> 39#include <linux/skbuff.h>
40#include <linux/device.h> 40#include <linux/device.h>
41#include <linux/debugfs.h>
42#include <linux/seq_file.h>
41#include <linux/list.h> 43#include <linux/list.h>
42#include <net/sock.h> 44#include <net/sock.h>
43 45
@@ -497,7 +499,8 @@ static int sco_sock_connect(struct socket *sock, struct sockaddr *addr, int alen
497 499
498 BT_DBG("sk %p", sk); 500 BT_DBG("sk %p", sk);
499 501
500 if (addr->sa_family != AF_BLUETOOTH || alen < sizeof(struct sockaddr_sco)) 502 if (alen < sizeof(struct sockaddr_sco) ||
503 addr->sa_family != AF_BLUETOOTH)
501 return -EINVAL; 504 return -EINVAL;
502 505
503 if (sk->sk_state != BT_OPEN && sk->sk_state != BT_BOUND) 506 if (sk->sk_state != BT_OPEN && sk->sk_state != BT_BOUND)
@@ -953,28 +956,36 @@ drop:
953 return 0; 956 return 0;
954} 957}
955 958
956static ssize_t sco_sysfs_show(struct class *dev, 959static int sco_debugfs_show(struct seq_file *f, void *p)
957 struct class_attribute *attr,
958 char *buf)
959{ 960{
960 struct sock *sk; 961 struct sock *sk;
961 struct hlist_node *node; 962 struct hlist_node *node;
962 char *str = buf;
963 963
964 read_lock_bh(&sco_sk_list.lock); 964 read_lock_bh(&sco_sk_list.lock);
965 965
966 sk_for_each(sk, node, &sco_sk_list.head) { 966 sk_for_each(sk, node, &sco_sk_list.head) {
967 str += sprintf(str, "%s %s %d\n", 967 seq_printf(f, "%s %s %d\n", batostr(&bt_sk(sk)->src),
968 batostr(&bt_sk(sk)->src), batostr(&bt_sk(sk)->dst), 968 batostr(&bt_sk(sk)->dst), sk->sk_state);
969 sk->sk_state);
970 } 969 }
971 970
972 read_unlock_bh(&sco_sk_list.lock); 971 read_unlock_bh(&sco_sk_list.lock);
973 972
974 return (str - buf); 973 return 0;
975} 974}
976 975
977static CLASS_ATTR(sco, S_IRUGO, sco_sysfs_show, NULL); 976static int sco_debugfs_open(struct inode *inode, struct file *file)
977{
978 return single_open(file, sco_debugfs_show, inode->i_private);
979}
980
981static const struct file_operations sco_debugfs_fops = {
982 .open = sco_debugfs_open,
983 .read = seq_read,
984 .llseek = seq_lseek,
985 .release = single_release,
986};
987
988static struct dentry *sco_debugfs;
978 989
979static const struct proto_ops sco_sock_ops = { 990static const struct proto_ops sco_sock_ops = {
980 .family = PF_BLUETOOTH, 991 .family = PF_BLUETOOTH,
@@ -1032,8 +1043,12 @@ static int __init sco_init(void)
1032 goto error; 1043 goto error;
1033 } 1044 }
1034 1045
1035 if (class_create_file(bt_class, &class_attr_sco) < 0) 1046 if (bt_debugfs) {
1036 BT_ERR("Failed to create SCO info file"); 1047 sco_debugfs = debugfs_create_file("sco", 0444,
1048 bt_debugfs, NULL, &sco_debugfs_fops);
1049 if (!sco_debugfs)
1050 BT_ERR("Failed to create SCO debug file");
1051 }
1037 1052
1038 BT_INFO("SCO (Voice Link) ver %s", VERSION); 1053 BT_INFO("SCO (Voice Link) ver %s", VERSION);
1039 BT_INFO("SCO socket layer initialized"); 1054 BT_INFO("SCO socket layer initialized");
@@ -1047,7 +1062,7 @@ error:
1047 1062
1048static void __exit sco_exit(void) 1063static void __exit sco_exit(void)
1049{ 1064{
1050 class_remove_file(bt_class, &class_attr_sco); 1065 debugfs_remove(sco_debugfs);
1051 1066
1052 if (bt_sock_unregister(BTPROTO_SCO) < 0) 1067 if (bt_sock_unregister(BTPROTO_SCO) < 0)
1053 BT_ERR("SCO socket unregistration failed"); 1068 BT_ERR("SCO socket unregistration failed");
diff --git a/net/can/bcm.c b/net/can/bcm.c
index e32af52238a2..629ad1debe81 100644
--- a/net/can/bcm.c
+++ b/net/can/bcm.c
@@ -1478,6 +1478,9 @@ static int bcm_connect(struct socket *sock, struct sockaddr *uaddr, int len,
1478 struct sock *sk = sock->sk; 1478 struct sock *sk = sock->sk;
1479 struct bcm_sock *bo = bcm_sk(sk); 1479 struct bcm_sock *bo = bcm_sk(sk);
1480 1480
1481 if (len < sizeof(*addr))
1482 return -EINVAL;
1483
1481 if (bo->bound) 1484 if (bo->bound)
1482 return -EISCONN; 1485 return -EISCONN;
1483 1486
diff --git a/net/core/netpoll.c b/net/core/netpoll.c
index d4ec38fa64e6..6f9206b36dc2 100644
--- a/net/core/netpoll.c
+++ b/net/core/netpoll.c
@@ -614,7 +614,7 @@ void netpoll_print_options(struct netpoll *np)
614 np->name, np->local_port); 614 np->name, np->local_port);
615 printk(KERN_INFO "%s: local IP %pI4\n", 615 printk(KERN_INFO "%s: local IP %pI4\n",
616 np->name, &np->local_ip); 616 np->name, &np->local_ip);
617 printk(KERN_INFO "%s: interface %s\n", 617 printk(KERN_INFO "%s: interface '%s'\n",
618 np->name, np->dev_name); 618 np->name, np->dev_name);
619 printk(KERN_INFO "%s: remote port %d\n", 619 printk(KERN_INFO "%s: remote port %d\n",
620 np->name, np->remote_port); 620 np->name, np->remote_port);
@@ -661,6 +661,9 @@ int netpoll_parse_options(struct netpoll *np, char *opt)
661 if ((delim = strchr(cur, '@')) == NULL) 661 if ((delim = strchr(cur, '@')) == NULL)
662 goto parse_failed; 662 goto parse_failed;
663 *delim = 0; 663 *delim = 0;
664 if (*cur == ' ' || *cur == '\t')
665 printk(KERN_INFO "%s: warning: whitespace"
666 "is not allowed\n", np->name);
664 np->remote_port = simple_strtol(cur, NULL, 10); 667 np->remote_port = simple_strtol(cur, NULL, 10);
665 cur = delim; 668 cur = delim;
666 } 669 }
@@ -708,7 +711,7 @@ int netpoll_parse_options(struct netpoll *np, char *opt)
708 return 0; 711 return 0;
709 712
710 parse_failed: 713 parse_failed:
711 printk(KERN_INFO "%s: couldn't parse config at %s!\n", 714 printk(KERN_INFO "%s: couldn't parse config at '%s'!\n",
712 np->name, cur); 715 np->name, cur);
713 return -1; 716 return -1;
714} 717}
diff --git a/net/ieee802154/af_ieee802154.c b/net/ieee802154/af_ieee802154.c
index bad1c49fd960..01beb6c11205 100644
--- a/net/ieee802154/af_ieee802154.c
+++ b/net/ieee802154/af_ieee802154.c
@@ -126,6 +126,9 @@ static int ieee802154_sock_connect(struct socket *sock, struct sockaddr *uaddr,
126{ 126{
127 struct sock *sk = sock->sk; 127 struct sock *sk = sock->sk;
128 128
129 if (addr_len < sizeof(uaddr->sa_family))
130 return -EINVAL;
131
129 if (uaddr->sa_family == AF_UNSPEC) 132 if (uaddr->sa_family == AF_UNSPEC)
130 return sk->sk_prot->disconnect(sk, flags); 133 return sk->sk_prot->disconnect(sk, flags);
131 134
diff --git a/net/ipv4/af_inet.c b/net/ipv4/af_inet.c
index 55e11906a73a..b5924f178812 100644
--- a/net/ipv4/af_inet.c
+++ b/net/ipv4/af_inet.c
@@ -530,6 +530,8 @@ int inet_dgram_connect(struct socket *sock, struct sockaddr * uaddr,
530{ 530{
531 struct sock *sk = sock->sk; 531 struct sock *sk = sock->sk;
532 532
533 if (addr_len < sizeof(uaddr->sa_family))
534 return -EINVAL;
533 if (uaddr->sa_family == AF_UNSPEC) 535 if (uaddr->sa_family == AF_UNSPEC)
534 return sk->sk_prot->disconnect(sk, flags); 536 return sk->sk_prot->disconnect(sk, flags);
535 537
@@ -573,6 +575,9 @@ int inet_stream_connect(struct socket *sock, struct sockaddr *uaddr,
573 int err; 575 int err;
574 long timeo; 576 long timeo;
575 577
578 if (addr_len < sizeof(uaddr->sa_family))
579 return -EINVAL;
580
576 lock_sock(sk); 581 lock_sock(sk);
577 582
578 if (uaddr->sa_family == AF_UNSPEC) { 583 if (uaddr->sa_family == AF_UNSPEC) {
diff --git a/net/ipv4/devinet.c b/net/ipv4/devinet.c
index c75320ef95c2..d009c6a5d9ad 100644
--- a/net/ipv4/devinet.c
+++ b/net/ipv4/devinet.c
@@ -1194,7 +1194,7 @@ static int inet_dump_ifaddr(struct sk_buff *skb, struct netlink_callback *cb)
1194 hlist_for_each_entry_rcu(dev, node, head, index_hlist) { 1194 hlist_for_each_entry_rcu(dev, node, head, index_hlist) {
1195 if (idx < s_idx) 1195 if (idx < s_idx)
1196 goto cont; 1196 goto cont;
1197 if (idx > s_idx) 1197 if (h > s_h || idx > s_idx)
1198 s_ip_idx = 0; 1198 s_ip_idx = 0;
1199 in_dev = __in_dev_get_rcu(dev); 1199 in_dev = __in_dev_get_rcu(dev);
1200 if (!in_dev) 1200 if (!in_dev)
diff --git a/net/ipv4/fib_trie.c b/net/ipv4/fib_trie.c
index af5d89792860..01ef8ba9025c 100644
--- a/net/ipv4/fib_trie.c
+++ b/net/ipv4/fib_trie.c
@@ -961,7 +961,9 @@ fib_find_node(struct trie *t, u32 key)
961 struct node *n; 961 struct node *n;
962 962
963 pos = 0; 963 pos = 0;
964 n = rcu_dereference(t->trie); 964 n = rcu_dereference_check(t->trie,
965 rcu_read_lock_held() ||
966 lockdep_rtnl_is_held());
965 967
966 while (n != NULL && NODE_TYPE(n) == T_TNODE) { 968 while (n != NULL && NODE_TYPE(n) == T_TNODE) {
967 tn = (struct tnode *) n; 969 tn = (struct tnode *) n;
diff --git a/net/ipv4/ip_gre.c b/net/ipv4/ip_gre.c
index f47c9f76754b..f78402d097b3 100644
--- a/net/ipv4/ip_gre.c
+++ b/net/ipv4/ip_gre.c
@@ -810,11 +810,13 @@ static netdev_tx_t ipgre_tunnel_xmit(struct sk_buff *skb, struct net_device *dev
810 tunnel->err_count = 0; 810 tunnel->err_count = 0;
811 } 811 }
812 812
813 max_headroom = LL_RESERVED_SPACE(tdev) + gre_hlen; 813 max_headroom = LL_RESERVED_SPACE(tdev) + gre_hlen + rt->u.dst.header_len;
814 814
815 if (skb_headroom(skb) < max_headroom || skb_shared(skb)|| 815 if (skb_headroom(skb) < max_headroom || skb_shared(skb)||
816 (skb_cloned(skb) && !skb_clone_writable(skb, 0))) { 816 (skb_cloned(skb) && !skb_clone_writable(skb, 0))) {
817 struct sk_buff *new_skb = skb_realloc_headroom(skb, max_headroom); 817 struct sk_buff *new_skb = skb_realloc_headroom(skb, max_headroom);
818 if (max_headroom > dev->needed_headroom)
819 dev->needed_headroom = max_headroom;
818 if (!new_skb) { 820 if (!new_skb) {
819 ip_rt_put(rt); 821 ip_rt_put(rt);
820 txq->tx_dropped++; 822 txq->tx_dropped++;
diff --git a/net/ipv4/ipmr.c b/net/ipv4/ipmr.c
index 0b9d03c54dc3..d0a6092a67be 100644
--- a/net/ipv4/ipmr.c
+++ b/net/ipv4/ipmr.c
@@ -1616,17 +1616,20 @@ ipmr_fill_mroute(struct sk_buff *skb, struct mfc_cache *c, struct rtmsg *rtm)
1616 int ct; 1616 int ct;
1617 struct rtnexthop *nhp; 1617 struct rtnexthop *nhp;
1618 struct net *net = mfc_net(c); 1618 struct net *net = mfc_net(c);
1619 struct net_device *dev = net->ipv4.vif_table[c->mfc_parent].dev;
1620 u8 *b = skb_tail_pointer(skb); 1619 u8 *b = skb_tail_pointer(skb);
1621 struct rtattr *mp_head; 1620 struct rtattr *mp_head;
1622 1621
1623 if (dev) 1622 /* If cache is unresolved, don't try to parse IIF and OIF */
1624 RTA_PUT(skb, RTA_IIF, 4, &dev->ifindex); 1623 if (c->mfc_parent > MAXVIFS)
1624 return -ENOENT;
1625
1626 if (VIF_EXISTS(net, c->mfc_parent))
1627 RTA_PUT(skb, RTA_IIF, 4, &net->ipv4.vif_table[c->mfc_parent].dev->ifindex);
1625 1628
1626 mp_head = (struct rtattr *)skb_put(skb, RTA_LENGTH(0)); 1629 mp_head = (struct rtattr *)skb_put(skb, RTA_LENGTH(0));
1627 1630
1628 for (ct = c->mfc_un.res.minvif; ct < c->mfc_un.res.maxvif; ct++) { 1631 for (ct = c->mfc_un.res.minvif; ct < c->mfc_un.res.maxvif; ct++) {
1629 if (c->mfc_un.res.ttls[ct] < 255) { 1632 if (VIF_EXISTS(net, ct) && c->mfc_un.res.ttls[ct] < 255) {
1630 if (skb_tailroom(skb) < RTA_ALIGN(RTA_ALIGN(sizeof(*nhp)) + 4)) 1633 if (skb_tailroom(skb) < RTA_ALIGN(RTA_ALIGN(sizeof(*nhp)) + 4))
1631 goto rtattr_failure; 1634 goto rtattr_failure;
1632 nhp = (struct rtnexthop *)skb_put(skb, RTA_ALIGN(sizeof(*nhp))); 1635 nhp = (struct rtnexthop *)skb_put(skb, RTA_ALIGN(sizeof(*nhp)));
diff --git a/net/ipv4/route.c b/net/ipv4/route.c
index 32d396196df8..d413b57be9b3 100644
--- a/net/ipv4/route.c
+++ b/net/ipv4/route.c
@@ -1097,7 +1097,7 @@ static int slow_chain_length(const struct rtable *head)
1097} 1097}
1098 1098
1099static int rt_intern_hash(unsigned hash, struct rtable *rt, 1099static int rt_intern_hash(unsigned hash, struct rtable *rt,
1100 struct rtable **rp, struct sk_buff *skb) 1100 struct rtable **rp, struct sk_buff *skb, int ifindex)
1101{ 1101{
1102 struct rtable *rth, **rthp; 1102 struct rtable *rth, **rthp;
1103 unsigned long now; 1103 unsigned long now;
@@ -1212,11 +1212,16 @@ restart:
1212 slow_chain_length(rt_hash_table[hash].chain) > rt_chain_length_max) { 1212 slow_chain_length(rt_hash_table[hash].chain) > rt_chain_length_max) {
1213 struct net *net = dev_net(rt->u.dst.dev); 1213 struct net *net = dev_net(rt->u.dst.dev);
1214 int num = ++net->ipv4.current_rt_cache_rebuild_count; 1214 int num = ++net->ipv4.current_rt_cache_rebuild_count;
1215 if (!rt_caching(dev_net(rt->u.dst.dev))) { 1215 if (!rt_caching(net)) {
1216 printk(KERN_WARNING "%s: %d rebuilds is over limit, route caching disabled\n", 1216 printk(KERN_WARNING "%s: %d rebuilds is over limit, route caching disabled\n",
1217 rt->u.dst.dev->name, num); 1217 rt->u.dst.dev->name, num);
1218 } 1218 }
1219 rt_emergency_hash_rebuild(dev_net(rt->u.dst.dev)); 1219 rt_emergency_hash_rebuild(net);
1220 spin_unlock_bh(rt_hash_lock_addr(hash));
1221
1222 hash = rt_hash(rt->fl.fl4_dst, rt->fl.fl4_src,
1223 ifindex, rt_genid(net));
1224 goto restart;
1220 } 1225 }
1221 } 1226 }
1222 1227
@@ -1477,7 +1482,7 @@ void ip_rt_redirect(__be32 old_gw, __be32 daddr, __be32 new_gw,
1477 &netevent); 1482 &netevent);
1478 1483
1479 rt_del(hash, rth); 1484 rt_del(hash, rth);
1480 if (!rt_intern_hash(hash, rt, &rt, NULL)) 1485 if (!rt_intern_hash(hash, rt, &rt, NULL, rt->fl.oif))
1481 ip_rt_put(rt); 1486 ip_rt_put(rt);
1482 goto do_next; 1487 goto do_next;
1483 } 1488 }
@@ -1510,7 +1515,8 @@ static struct dst_entry *ipv4_negative_advice(struct dst_entry *dst)
1510 ip_rt_put(rt); 1515 ip_rt_put(rt);
1511 ret = NULL; 1516 ret = NULL;
1512 } else if ((rt->rt_flags & RTCF_REDIRECTED) || 1517 } else if ((rt->rt_flags & RTCF_REDIRECTED) ||
1513 rt->u.dst.expires) { 1518 (rt->u.dst.expires &&
1519 time_after_eq(jiffies, rt->u.dst.expires))) {
1514 unsigned hash = rt_hash(rt->fl.fl4_dst, rt->fl.fl4_src, 1520 unsigned hash = rt_hash(rt->fl.fl4_dst, rt->fl.fl4_src,
1515 rt->fl.oif, 1521 rt->fl.oif,
1516 rt_genid(dev_net(dst->dev))); 1522 rt_genid(dev_net(dst->dev)));
@@ -1930,7 +1936,7 @@ static int ip_route_input_mc(struct sk_buff *skb, __be32 daddr, __be32 saddr,
1930 1936
1931 in_dev_put(in_dev); 1937 in_dev_put(in_dev);
1932 hash = rt_hash(daddr, saddr, dev->ifindex, rt_genid(dev_net(dev))); 1938 hash = rt_hash(daddr, saddr, dev->ifindex, rt_genid(dev_net(dev)));
1933 return rt_intern_hash(hash, rth, NULL, skb); 1939 return rt_intern_hash(hash, rth, NULL, skb, dev->ifindex);
1934 1940
1935e_nobufs: 1941e_nobufs:
1936 in_dev_put(in_dev); 1942 in_dev_put(in_dev);
@@ -2097,7 +2103,7 @@ static int ip_mkroute_input(struct sk_buff *skb,
2097 /* put it into the cache */ 2103 /* put it into the cache */
2098 hash = rt_hash(daddr, saddr, fl->iif, 2104 hash = rt_hash(daddr, saddr, fl->iif,
2099 rt_genid(dev_net(rth->u.dst.dev))); 2105 rt_genid(dev_net(rth->u.dst.dev)));
2100 return rt_intern_hash(hash, rth, NULL, skb); 2106 return rt_intern_hash(hash, rth, NULL, skb, fl->iif);
2101} 2107}
2102 2108
2103/* 2109/*
@@ -2254,7 +2260,7 @@ local_input:
2254 } 2260 }
2255 rth->rt_type = res.type; 2261 rth->rt_type = res.type;
2256 hash = rt_hash(daddr, saddr, fl.iif, rt_genid(net)); 2262 hash = rt_hash(daddr, saddr, fl.iif, rt_genid(net));
2257 err = rt_intern_hash(hash, rth, NULL, skb); 2263 err = rt_intern_hash(hash, rth, NULL, skb, fl.iif);
2258 goto done; 2264 goto done;
2259 2265
2260no_route: 2266no_route:
@@ -2501,7 +2507,7 @@ static int ip_mkroute_output(struct rtable **rp,
2501 if (err == 0) { 2507 if (err == 0) {
2502 hash = rt_hash(oldflp->fl4_dst, oldflp->fl4_src, oldflp->oif, 2508 hash = rt_hash(oldflp->fl4_dst, oldflp->fl4_src, oldflp->oif,
2503 rt_genid(dev_net(dev_out))); 2509 rt_genid(dev_net(dev_out)));
2504 err = rt_intern_hash(hash, rth, rp, NULL); 2510 err = rt_intern_hash(hash, rth, rp, NULL, oldflp->oif);
2505 } 2511 }
2506 2512
2507 return err; 2513 return err;
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
index 6afb6d8662b2..2c75f891914e 100644
--- a/net/ipv4/tcp.c
+++ b/net/ipv4/tcp.c
@@ -1368,6 +1368,7 @@ int tcp_read_sock(struct sock *sk, read_descriptor_t *desc,
1368 sk_eat_skb(sk, skb, 0); 1368 sk_eat_skb(sk, skb, 0);
1369 if (!desc->count) 1369 if (!desc->count)
1370 break; 1370 break;
1371 tp->copied_seq = seq;
1371 } 1372 }
1372 tp->copied_seq = seq; 1373 tp->copied_seq = seq;
1373 1374
diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
index 21b4c9e1a682..1c58b99a54a4 100644
--- a/net/ipv6/addrconf.c
+++ b/net/ipv6/addrconf.c
@@ -3607,7 +3607,7 @@ static int inet6_dump_addr(struct sk_buff *skb, struct netlink_callback *cb,
3607 hlist_for_each_entry_rcu(dev, node, head, index_hlist) { 3607 hlist_for_each_entry_rcu(dev, node, head, index_hlist) {
3608 if (idx < s_idx) 3608 if (idx < s_idx)
3609 goto cont; 3609 goto cont;
3610 if (idx > s_idx) 3610 if (h > s_h || idx > s_idx)
3611 s_ip_idx = 0; 3611 s_ip_idx = 0;
3612 ip_idx = 0; 3612 ip_idx = 0;
3613 idev = __in6_dev_get(dev); 3613 idev = __in6_dev_get(dev);
diff --git a/net/ipv6/ip6mr.c b/net/ipv6/ip6mr.c
index 23e4ac0cc30e..27acfb58650a 100644
--- a/net/ipv6/ip6mr.c
+++ b/net/ipv6/ip6mr.c
@@ -1695,17 +1695,20 @@ ip6mr_fill_mroute(struct sk_buff *skb, struct mfc6_cache *c, struct rtmsg *rtm)
1695 int ct; 1695 int ct;
1696 struct rtnexthop *nhp; 1696 struct rtnexthop *nhp;
1697 struct net *net = mfc6_net(c); 1697 struct net *net = mfc6_net(c);
1698 struct net_device *dev = net->ipv6.vif6_table[c->mf6c_parent].dev;
1699 u8 *b = skb_tail_pointer(skb); 1698 u8 *b = skb_tail_pointer(skb);
1700 struct rtattr *mp_head; 1699 struct rtattr *mp_head;
1701 1700
1702 if (dev) 1701 /* If cache is unresolved, don't try to parse IIF and OIF */
1703 RTA_PUT(skb, RTA_IIF, 4, &dev->ifindex); 1702 if (c->mf6c_parent > MAXMIFS)
1703 return -ENOENT;
1704
1705 if (MIF_EXISTS(net, c->mf6c_parent))
1706 RTA_PUT(skb, RTA_IIF, 4, &net->ipv6.vif6_table[c->mf6c_parent].dev->ifindex);
1704 1707
1705 mp_head = (struct rtattr *)skb_put(skb, RTA_LENGTH(0)); 1708 mp_head = (struct rtattr *)skb_put(skb, RTA_LENGTH(0));
1706 1709
1707 for (ct = c->mfc_un.res.minvif; ct < c->mfc_un.res.maxvif; ct++) { 1710 for (ct = c->mfc_un.res.minvif; ct < c->mfc_un.res.maxvif; ct++) {
1708 if (c->mfc_un.res.ttls[ct] < 255) { 1711 if (MIF_EXISTS(net, ct) && c->mfc_un.res.ttls[ct] < 255) {
1709 if (skb_tailroom(skb) < RTA_ALIGN(RTA_ALIGN(sizeof(*nhp)) + 4)) 1712 if (skb_tailroom(skb) < RTA_ALIGN(RTA_ALIGN(sizeof(*nhp)) + 4))
1710 goto rtattr_failure; 1713 goto rtattr_failure;
1711 nhp = (struct rtnexthop *)skb_put(skb, RTA_ALIGN(sizeof(*nhp))); 1714 nhp = (struct rtnexthop *)skb_put(skb, RTA_ALIGN(sizeof(*nhp)));
diff --git a/net/ipv6/netfilter/ip6table_raw.c b/net/ipv6/netfilter/ip6table_raw.c
index aef31a29de9e..b9cf7cd61923 100644
--- a/net/ipv6/netfilter/ip6table_raw.c
+++ b/net/ipv6/netfilter/ip6table_raw.c
@@ -13,7 +13,7 @@ static const struct xt_table packet_raw = {
13 .valid_hooks = RAW_VALID_HOOKS, 13 .valid_hooks = RAW_VALID_HOOKS,
14 .me = THIS_MODULE, 14 .me = THIS_MODULE,
15 .af = NFPROTO_IPV6, 15 .af = NFPROTO_IPV6,
16 .priority = NF_IP6_PRI_FIRST, 16 .priority = NF_IP6_PRI_RAW,
17}; 17};
18 18
19/* The work comes in here from netfilter.c. */ 19/* The work comes in here from netfilter.c. */
diff --git a/net/ipv6/route.c b/net/ipv6/route.c
index 7fcb0e5d1213..0d7713c5c206 100644
--- a/net/ipv6/route.c
+++ b/net/ipv6/route.c
@@ -890,12 +890,17 @@ static struct dst_entry *ip6_negative_advice(struct dst_entry *dst)
890 struct rt6_info *rt = (struct rt6_info *) dst; 890 struct rt6_info *rt = (struct rt6_info *) dst;
891 891
892 if (rt) { 892 if (rt) {
893 if (rt->rt6i_flags & RTF_CACHE) 893 if (rt->rt6i_flags & RTF_CACHE) {
894 ip6_del_rt(rt); 894 if (rt6_check_expired(rt)) {
895 else 895 ip6_del_rt(rt);
896 dst = NULL;
897 }
898 } else {
896 dst_release(dst); 899 dst_release(dst);
900 dst = NULL;
901 }
897 } 902 }
898 return NULL; 903 return dst;
899} 904}
900 905
901static void ip6_link_failure(struct sk_buff *skb) 906static void ip6_link_failure(struct sk_buff *skb)
diff --git a/net/key/af_key.c b/net/key/af_key.c
index 368707882647..344145f23c34 100644
--- a/net/key/af_key.c
+++ b/net/key/af_key.c
@@ -2129,10 +2129,9 @@ static int key_notify_policy(struct xfrm_policy *xp, int dir, struct km_event *c
2129 int err; 2129 int err;
2130 2130
2131 out_skb = pfkey_xfrm_policy2msg_prep(xp); 2131 out_skb = pfkey_xfrm_policy2msg_prep(xp);
2132 if (IS_ERR(out_skb)) { 2132 if (IS_ERR(out_skb))
2133 err = PTR_ERR(out_skb); 2133 return PTR_ERR(out_skb);
2134 goto out; 2134
2135 }
2136 err = pfkey_xfrm_policy2msg(out_skb, xp, dir); 2135 err = pfkey_xfrm_policy2msg(out_skb, xp, dir);
2137 if (err < 0) 2136 if (err < 0)
2138 return err; 2137 return err;
@@ -2148,7 +2147,6 @@ static int key_notify_policy(struct xfrm_policy *xp, int dir, struct km_event *c
2148 out_hdr->sadb_msg_seq = c->seq; 2147 out_hdr->sadb_msg_seq = c->seq;
2149 out_hdr->sadb_msg_pid = c->pid; 2148 out_hdr->sadb_msg_pid = c->pid;
2150 pfkey_broadcast(out_skb, GFP_ATOMIC, BROADCAST_ALL, NULL, xp_net(xp)); 2149 pfkey_broadcast(out_skb, GFP_ATOMIC, BROADCAST_ALL, NULL, xp_net(xp));
2151out:
2152 return 0; 2150 return 0;
2153 2151
2154} 2152}
diff --git a/net/mac80211/mesh_hwmp.c b/net/mac80211/mesh_hwmp.c
index ce84237ebad3..ccff6133e19a 100644
--- a/net/mac80211/mesh_hwmp.c
+++ b/net/mac80211/mesh_hwmp.c
@@ -391,7 +391,7 @@ static u32 hwmp_route_info_get(struct ieee80211_sub_if_data *sdata,
391 if (SN_GT(mpath->sn, orig_sn) || 391 if (SN_GT(mpath->sn, orig_sn) ||
392 (mpath->sn == orig_sn && 392 (mpath->sn == orig_sn &&
393 action == MPATH_PREQ && 393 action == MPATH_PREQ &&
394 new_metric > mpath->metric)) { 394 new_metric >= mpath->metric)) {
395 process = false; 395 process = false;
396 fresh_info = false; 396 fresh_info = false;
397 } 397 }
@@ -611,7 +611,7 @@ static void hwmp_prep_frame_process(struct ieee80211_sub_if_data *sdata,
611 611
612 mesh_path_sel_frame_tx(MPATH_PREP, flags, orig_addr, 612 mesh_path_sel_frame_tx(MPATH_PREP, flags, orig_addr,
613 cpu_to_le32(orig_sn), 0, target_addr, 613 cpu_to_le32(orig_sn), 0, target_addr,
614 cpu_to_le32(target_sn), mpath->next_hop->sta.addr, hopcount, 614 cpu_to_le32(target_sn), next_hop, hopcount,
615 ttl, cpu_to_le32(lifetime), cpu_to_le32(metric), 615 ttl, cpu_to_le32(lifetime), cpu_to_le32(metric),
616 0, sdata); 616 0, sdata);
617 rcu_read_unlock(); 617 rcu_read_unlock();
diff --git a/net/mac80211/tx.c b/net/mac80211/tx.c
index 08e1f17a4226..db25fa9ef135 100644
--- a/net/mac80211/tx.c
+++ b/net/mac80211/tx.c
@@ -1991,6 +1991,7 @@ static bool ieee80211_tx_pending_skb(struct ieee80211_local *local,
1991void ieee80211_tx_pending(unsigned long data) 1991void ieee80211_tx_pending(unsigned long data)
1992{ 1992{
1993 struct ieee80211_local *local = (struct ieee80211_local *)data; 1993 struct ieee80211_local *local = (struct ieee80211_local *)data;
1994 struct ieee80211_sub_if_data *sdata;
1994 unsigned long flags; 1995 unsigned long flags;
1995 int i; 1996 int i;
1996 bool txok; 1997 bool txok;
@@ -2027,6 +2028,11 @@ void ieee80211_tx_pending(unsigned long data)
2027 if (!txok) 2028 if (!txok)
2028 break; 2029 break;
2029 } 2030 }
2031
2032 if (skb_queue_empty(&local->pending[i]))
2033 list_for_each_entry_rcu(sdata, &local->interfaces, list)
2034 netif_tx_wake_queue(
2035 netdev_get_tx_queue(sdata->dev, i));
2030 } 2036 }
2031 spin_unlock_irqrestore(&local->queue_stop_reason_lock, flags); 2037 spin_unlock_irqrestore(&local->queue_stop_reason_lock, flags);
2032 2038
diff --git a/net/mac80211/util.c b/net/mac80211/util.c
index c453226f06b2..53af57047435 100644
--- a/net/mac80211/util.c
+++ b/net/mac80211/util.c
@@ -279,13 +279,13 @@ static void __ieee80211_wake_queue(struct ieee80211_hw *hw, int queue,
279 /* someone still has this queue stopped */ 279 /* someone still has this queue stopped */
280 return; 280 return;
281 281
282 if (!skb_queue_empty(&local->pending[queue])) 282 if (skb_queue_empty(&local->pending[queue])) {
283 rcu_read_lock();
284 list_for_each_entry_rcu(sdata, &local->interfaces, list)
285 netif_tx_wake_queue(netdev_get_tx_queue(sdata->dev, queue));
286 rcu_read_unlock();
287 } else
283 tasklet_schedule(&local->tx_pending_tasklet); 288 tasklet_schedule(&local->tx_pending_tasklet);
284
285 rcu_read_lock();
286 list_for_each_entry_rcu(sdata, &local->interfaces, list)
287 netif_tx_wake_queue(netdev_get_tx_queue(sdata->dev, queue));
288 rcu_read_unlock();
289} 289}
290 290
291void ieee80211_wake_queue_by_reason(struct ieee80211_hw *hw, int queue, 291void ieee80211_wake_queue_by_reason(struct ieee80211_hw *hw, int queue,
@@ -1097,9 +1097,9 @@ int ieee80211_reconfig(struct ieee80211_local *local)
1097 */ 1097 */
1098 res = drv_start(local); 1098 res = drv_start(local);
1099 if (res) { 1099 if (res) {
1100 WARN(local->suspended, "Harware became unavailable " 1100 WARN(local->suspended, "Hardware became unavailable "
1101 "upon resume. This is could be a software issue" 1101 "upon resume. This could be a software issue "
1102 "prior to suspend or a hardware issue\n"); 1102 "prior to suspend or a hardware issue.\n");
1103 return res; 1103 return res;
1104 } 1104 }
1105 1105
diff --git a/net/netfilter/xt_hashlimit.c b/net/netfilter/xt_hashlimit.c
index 9e9c48963942..215a64835de8 100644
--- a/net/netfilter/xt_hashlimit.c
+++ b/net/netfilter/xt_hashlimit.c
@@ -493,6 +493,7 @@ static void hashlimit_ipv6_mask(__be32 *i, unsigned int p)
493 case 64 ... 95: 493 case 64 ... 95:
494 i[2] = maskl(i[2], p - 64); 494 i[2] = maskl(i[2], p - 64);
495 i[3] = 0; 495 i[3] = 0;
496 break;
496 case 96 ... 127: 497 case 96 ... 127:
497 i[3] = maskl(i[3], p - 96); 498 i[3] = maskl(i[3], p - 96);
498 break; 499 break;
@@ -879,7 +880,8 @@ static void dl_seq_stop(struct seq_file *s, void *v)
879 struct xt_hashlimit_htable *htable = s->private; 880 struct xt_hashlimit_htable *htable = s->private;
880 unsigned int *bucket = (unsigned int *)v; 881 unsigned int *bucket = (unsigned int *)v;
881 882
882 kfree(bucket); 883 if (!IS_ERR(bucket))
884 kfree(bucket);
883 spin_unlock_bh(&htable->lock); 885 spin_unlock_bh(&htable->lock);
884} 886}
885 887
diff --git a/net/netfilter/xt_recent.c b/net/netfilter/xt_recent.c
index 7073dbb8100c..971d172afece 100644
--- a/net/netfilter/xt_recent.c
+++ b/net/netfilter/xt_recent.c
@@ -267,7 +267,7 @@ recent_mt(const struct sk_buff *skb, const struct xt_match_param *par)
267 for (i = 0; i < e->nstamps; i++) { 267 for (i = 0; i < e->nstamps; i++) {
268 if (info->seconds && time_after(time, e->stamps[i])) 268 if (info->seconds && time_after(time, e->stamps[i]))
269 continue; 269 continue;
270 if (info->hit_count && ++hits >= info->hit_count) { 270 if (!info->hit_count || ++hits >= info->hit_count) {
271 ret = !ret; 271 ret = !ret;
272 break; 272 break;
273 } 273 }
diff --git a/net/netlabel/netlabel_domainhash.c b/net/netlabel/netlabel_domainhash.c
index 0bfeaab88ef5..06ab41b6b57a 100644
--- a/net/netlabel/netlabel_domainhash.c
+++ b/net/netlabel/netlabel_domainhash.c
@@ -50,9 +50,12 @@ struct netlbl_domhsh_tbl {
50}; 50};
51 51
52/* Domain hash table */ 52/* Domain hash table */
53/* XXX - updates should be so rare that having one spinlock for the entire 53/* updates should be so rare that having one spinlock for the entire hash table
54 * hash table should be okay */ 54 * should be okay */
55static DEFINE_SPINLOCK(netlbl_domhsh_lock); 55static DEFINE_SPINLOCK(netlbl_domhsh_lock);
56#define netlbl_domhsh_rcu_deref(p) \
57 rcu_dereference_check(p, rcu_read_lock_held() || \
58 lockdep_is_held(&netlbl_domhsh_lock))
56static struct netlbl_domhsh_tbl *netlbl_domhsh = NULL; 59static struct netlbl_domhsh_tbl *netlbl_domhsh = NULL;
57static struct netlbl_dom_map *netlbl_domhsh_def = NULL; 60static struct netlbl_dom_map *netlbl_domhsh_def = NULL;
58 61
@@ -106,7 +109,8 @@ static void netlbl_domhsh_free_entry(struct rcu_head *entry)
106 * Description: 109 * Description:
107 * This is the hashing function for the domain hash table, it returns the 110 * This is the hashing function for the domain hash table, it returns the
108 * correct bucket number for the domain. The caller is responsibile for 111 * correct bucket number for the domain. The caller is responsibile for
109 * calling the rcu_read_[un]lock() functions. 112 * ensuring that the hash table is protected with either a RCU read lock or the
113 * hash table lock.
110 * 114 *
111 */ 115 */
112static u32 netlbl_domhsh_hash(const char *key) 116static u32 netlbl_domhsh_hash(const char *key)
@@ -120,7 +124,7 @@ static u32 netlbl_domhsh_hash(const char *key)
120 124
121 for (iter = 0, val = 0, len = strlen(key); iter < len; iter++) 125 for (iter = 0, val = 0, len = strlen(key); iter < len; iter++)
122 val = (val << 4 | (val >> (8 * sizeof(u32) - 4))) ^ key[iter]; 126 val = (val << 4 | (val >> (8 * sizeof(u32) - 4))) ^ key[iter];
123 return val & (rcu_dereference(netlbl_domhsh)->size - 1); 127 return val & (netlbl_domhsh_rcu_deref(netlbl_domhsh)->size - 1);
124} 128}
125 129
126/** 130/**
@@ -130,7 +134,8 @@ static u32 netlbl_domhsh_hash(const char *key)
130 * Description: 134 * Description:
131 * Searches the domain hash table and returns a pointer to the hash table 135 * Searches the domain hash table and returns a pointer to the hash table
132 * entry if found, otherwise NULL is returned. The caller is responsibile for 136 * entry if found, otherwise NULL is returned. The caller is responsibile for
133 * the rcu hash table locks (i.e. the caller much call rcu_read_[un]lock()). 137 * ensuring that the hash table is protected with either a RCU read lock or the
138 * hash table lock.
134 * 139 *
135 */ 140 */
136static struct netlbl_dom_map *netlbl_domhsh_search(const char *domain) 141static struct netlbl_dom_map *netlbl_domhsh_search(const char *domain)
@@ -141,7 +146,7 @@ static struct netlbl_dom_map *netlbl_domhsh_search(const char *domain)
141 146
142 if (domain != NULL) { 147 if (domain != NULL) {
143 bkt = netlbl_domhsh_hash(domain); 148 bkt = netlbl_domhsh_hash(domain);
144 bkt_list = &rcu_dereference(netlbl_domhsh)->tbl[bkt]; 149 bkt_list = &netlbl_domhsh_rcu_deref(netlbl_domhsh)->tbl[bkt];
145 list_for_each_entry_rcu(iter, bkt_list, list) 150 list_for_each_entry_rcu(iter, bkt_list, list)
146 if (iter->valid && strcmp(iter->domain, domain) == 0) 151 if (iter->valid && strcmp(iter->domain, domain) == 0)
147 return iter; 152 return iter;
@@ -159,8 +164,8 @@ static struct netlbl_dom_map *netlbl_domhsh_search(const char *domain)
159 * Searches the domain hash table and returns a pointer to the hash table 164 * Searches the domain hash table and returns a pointer to the hash table
160 * entry if an exact match is found, if an exact match is not present in the 165 * entry if an exact match is found, if an exact match is not present in the
161 * hash table then the default entry is returned if valid otherwise NULL is 166 * hash table then the default entry is returned if valid otherwise NULL is
162 * returned. The caller is responsibile for the rcu hash table locks 167 * returned. The caller is responsibile ensuring that the hash table is
163 * (i.e. the caller much call rcu_read_[un]lock()). 168 * protected with either a RCU read lock or the hash table lock.
164 * 169 *
165 */ 170 */
166static struct netlbl_dom_map *netlbl_domhsh_search_def(const char *domain) 171static struct netlbl_dom_map *netlbl_domhsh_search_def(const char *domain)
@@ -169,7 +174,7 @@ static struct netlbl_dom_map *netlbl_domhsh_search_def(const char *domain)
169 174
170 entry = netlbl_domhsh_search(domain); 175 entry = netlbl_domhsh_search(domain);
171 if (entry == NULL) { 176 if (entry == NULL) {
172 entry = rcu_dereference(netlbl_domhsh_def); 177 entry = netlbl_domhsh_rcu_deref(netlbl_domhsh_def);
173 if (entry != NULL && !entry->valid) 178 if (entry != NULL && !entry->valid)
174 entry = NULL; 179 entry = NULL;
175 } 180 }
@@ -306,8 +311,11 @@ int netlbl_domhsh_add(struct netlbl_dom_map *entry,
306 struct netlbl_af6list *tmp6; 311 struct netlbl_af6list *tmp6;
307#endif /* IPv6 */ 312#endif /* IPv6 */
308 313
314 /* XXX - we can remove this RCU read lock as the spinlock protects the
315 * entire function, but before we do we need to fixup the
316 * netlbl_af[4,6]list RCU functions to do "the right thing" with
317 * respect to rcu_dereference() when only a spinlock is held. */
309 rcu_read_lock(); 318 rcu_read_lock();
310
311 spin_lock(&netlbl_domhsh_lock); 319 spin_lock(&netlbl_domhsh_lock);
312 if (entry->domain != NULL) 320 if (entry->domain != NULL)
313 entry_old = netlbl_domhsh_search(entry->domain); 321 entry_old = netlbl_domhsh_search(entry->domain);
diff --git a/net/netlabel/netlabel_unlabeled.c b/net/netlabel/netlabel_unlabeled.c
index 852d9d7976b9..3b4fde7622a3 100644
--- a/net/netlabel/netlabel_unlabeled.c
+++ b/net/netlabel/netlabel_unlabeled.c
@@ -114,6 +114,9 @@ struct netlbl_unlhsh_walk_arg {
114/* updates should be so rare that having one spinlock for the entire 114/* updates should be so rare that having one spinlock for the entire
115 * hash table should be okay */ 115 * hash table should be okay */
116static DEFINE_SPINLOCK(netlbl_unlhsh_lock); 116static DEFINE_SPINLOCK(netlbl_unlhsh_lock);
117#define netlbl_unlhsh_rcu_deref(p) \
118 rcu_dereference_check(p, rcu_read_lock_held() || \
119 lockdep_is_held(&netlbl_unlhsh_lock))
117static struct netlbl_unlhsh_tbl *netlbl_unlhsh = NULL; 120static struct netlbl_unlhsh_tbl *netlbl_unlhsh = NULL;
118static struct netlbl_unlhsh_iface *netlbl_unlhsh_def = NULL; 121static struct netlbl_unlhsh_iface *netlbl_unlhsh_def = NULL;
119 122
@@ -235,15 +238,13 @@ static void netlbl_unlhsh_free_iface(struct rcu_head *entry)
235 * Description: 238 * Description:
236 * This is the hashing function for the unlabeled hash table, it returns the 239 * This is the hashing function for the unlabeled hash table, it returns the
237 * bucket number for the given device/interface. The caller is responsible for 240 * bucket number for the given device/interface. The caller is responsible for
238 * calling the rcu_read_[un]lock() functions. 241 * ensuring that the hash table is protected with either a RCU read lock or
242 * the hash table lock.
239 * 243 *
240 */ 244 */
241static u32 netlbl_unlhsh_hash(int ifindex) 245static u32 netlbl_unlhsh_hash(int ifindex)
242{ 246{
243 /* this is taken _almost_ directly from 247 return ifindex & (netlbl_unlhsh_rcu_deref(netlbl_unlhsh)->size - 1);
244 * security/selinux/netif.c:sel_netif_hasfn() as they do pretty much
245 * the same thing */
246 return ifindex & (rcu_dereference(netlbl_unlhsh)->size - 1);
247} 248}
248 249
249/** 250/**
@@ -253,7 +254,8 @@ static u32 netlbl_unlhsh_hash(int ifindex)
253 * Description: 254 * Description:
254 * Searches the unlabeled connection hash table and returns a pointer to the 255 * Searches the unlabeled connection hash table and returns a pointer to the
255 * interface entry which matches @ifindex, otherwise NULL is returned. The 256 * interface entry which matches @ifindex, otherwise NULL is returned. The
256 * caller is responsible for calling the rcu_read_[un]lock() functions. 257 * caller is responsible for ensuring that the hash table is protected with
258 * either a RCU read lock or the hash table lock.
257 * 259 *
258 */ 260 */
259static struct netlbl_unlhsh_iface *netlbl_unlhsh_search_iface(int ifindex) 261static struct netlbl_unlhsh_iface *netlbl_unlhsh_search_iface(int ifindex)
@@ -263,7 +265,7 @@ static struct netlbl_unlhsh_iface *netlbl_unlhsh_search_iface(int ifindex)
263 struct netlbl_unlhsh_iface *iter; 265 struct netlbl_unlhsh_iface *iter;
264 266
265 bkt = netlbl_unlhsh_hash(ifindex); 267 bkt = netlbl_unlhsh_hash(ifindex);
266 bkt_list = &rcu_dereference(netlbl_unlhsh)->tbl[bkt]; 268 bkt_list = &netlbl_unlhsh_rcu_deref(netlbl_unlhsh)->tbl[bkt];
267 list_for_each_entry_rcu(iter, bkt_list, list) 269 list_for_each_entry_rcu(iter, bkt_list, list)
268 if (iter->valid && iter->ifindex == ifindex) 270 if (iter->valid && iter->ifindex == ifindex)
269 return iter; 271 return iter;
@@ -272,33 +274,6 @@ static struct netlbl_unlhsh_iface *netlbl_unlhsh_search_iface(int ifindex)
272} 274}
273 275
274/** 276/**
275 * netlbl_unlhsh_search_iface_def - Search for a matching interface entry
276 * @ifindex: the network interface
277 *
278 * Description:
279 * Searches the unlabeled connection hash table and returns a pointer to the
280 * interface entry which matches @ifindex. If an exact match can not be found
281 * and there is a valid default entry, the default entry is returned, otherwise
282 * NULL is returned. The caller is responsible for calling the
283 * rcu_read_[un]lock() functions.
284 *
285 */
286static struct netlbl_unlhsh_iface *netlbl_unlhsh_search_iface_def(int ifindex)
287{
288 struct netlbl_unlhsh_iface *entry;
289
290 entry = netlbl_unlhsh_search_iface(ifindex);
291 if (entry != NULL)
292 return entry;
293
294 entry = rcu_dereference(netlbl_unlhsh_def);
295 if (entry != NULL && entry->valid)
296 return entry;
297
298 return NULL;
299}
300
301/**
302 * netlbl_unlhsh_add_addr4 - Add a new IPv4 address entry to the hash table 277 * netlbl_unlhsh_add_addr4 - Add a new IPv4 address entry to the hash table
303 * @iface: the associated interface entry 278 * @iface: the associated interface entry
304 * @addr: IPv4 address in network byte order 279 * @addr: IPv4 address in network byte order
@@ -308,8 +283,7 @@ static struct netlbl_unlhsh_iface *netlbl_unlhsh_search_iface_def(int ifindex)
308 * Description: 283 * Description:
309 * Add a new address entry into the unlabeled connection hash table using the 284 * Add a new address entry into the unlabeled connection hash table using the
310 * interface entry specified by @iface. On success zero is returned, otherwise 285 * interface entry specified by @iface. On success zero is returned, otherwise
311 * a negative value is returned. The caller is responsible for calling the 286 * a negative value is returned.
312 * rcu_read_[un]lock() functions.
313 * 287 *
314 */ 288 */
315static int netlbl_unlhsh_add_addr4(struct netlbl_unlhsh_iface *iface, 289static int netlbl_unlhsh_add_addr4(struct netlbl_unlhsh_iface *iface,
@@ -349,8 +323,7 @@ static int netlbl_unlhsh_add_addr4(struct netlbl_unlhsh_iface *iface,
349 * Description: 323 * Description:
350 * Add a new address entry into the unlabeled connection hash table using the 324 * Add a new address entry into the unlabeled connection hash table using the
351 * interface entry specified by @iface. On success zero is returned, otherwise 325 * interface entry specified by @iface. On success zero is returned, otherwise
352 * a negative value is returned. The caller is responsible for calling the 326 * a negative value is returned.
353 * rcu_read_[un]lock() functions.
354 * 327 *
355 */ 328 */
356static int netlbl_unlhsh_add_addr6(struct netlbl_unlhsh_iface *iface, 329static int netlbl_unlhsh_add_addr6(struct netlbl_unlhsh_iface *iface,
@@ -391,8 +364,7 @@ static int netlbl_unlhsh_add_addr6(struct netlbl_unlhsh_iface *iface,
391 * Description: 364 * Description:
392 * Add a new, empty, interface entry into the unlabeled connection hash table. 365 * Add a new, empty, interface entry into the unlabeled connection hash table.
393 * On success a pointer to the new interface entry is returned, on failure NULL 366 * On success a pointer to the new interface entry is returned, on failure NULL
394 * is returned. The caller is responsible for calling the rcu_read_[un]lock() 367 * is returned.
395 * functions.
396 * 368 *
397 */ 369 */
398static struct netlbl_unlhsh_iface *netlbl_unlhsh_add_iface(int ifindex) 370static struct netlbl_unlhsh_iface *netlbl_unlhsh_add_iface(int ifindex)
@@ -415,10 +387,10 @@ static struct netlbl_unlhsh_iface *netlbl_unlhsh_add_iface(int ifindex)
415 if (netlbl_unlhsh_search_iface(ifindex) != NULL) 387 if (netlbl_unlhsh_search_iface(ifindex) != NULL)
416 goto add_iface_failure; 388 goto add_iface_failure;
417 list_add_tail_rcu(&iface->list, 389 list_add_tail_rcu(&iface->list,
418 &rcu_dereference(netlbl_unlhsh)->tbl[bkt]); 390 &netlbl_unlhsh_rcu_deref(netlbl_unlhsh)->tbl[bkt]);
419 } else { 391 } else {
420 INIT_LIST_HEAD(&iface->list); 392 INIT_LIST_HEAD(&iface->list);
421 if (rcu_dereference(netlbl_unlhsh_def) != NULL) 393 if (netlbl_unlhsh_rcu_deref(netlbl_unlhsh_def) != NULL)
422 goto add_iface_failure; 394 goto add_iface_failure;
423 rcu_assign_pointer(netlbl_unlhsh_def, iface); 395 rcu_assign_pointer(netlbl_unlhsh_def, iface);
424 } 396 }
@@ -548,8 +520,7 @@ unlhsh_add_return:
548 * 520 *
549 * Description: 521 * Description:
550 * Remove an IP address entry from the unlabeled connection hash table. 522 * Remove an IP address entry from the unlabeled connection hash table.
551 * Returns zero on success, negative values on failure. The caller is 523 * Returns zero on success, negative values on failure.
552 * responsible for calling the rcu_read_[un]lock() functions.
553 * 524 *
554 */ 525 */
555static int netlbl_unlhsh_remove_addr4(struct net *net, 526static int netlbl_unlhsh_remove_addr4(struct net *net,
@@ -611,8 +582,7 @@ static int netlbl_unlhsh_remove_addr4(struct net *net,
611 * 582 *
612 * Description: 583 * Description:
613 * Remove an IP address entry from the unlabeled connection hash table. 584 * Remove an IP address entry from the unlabeled connection hash table.
614 * Returns zero on success, negative values on failure. The caller is 585 * Returns zero on success, negative values on failure.
615 * responsible for calling the rcu_read_[un]lock() functions.
616 * 586 *
617 */ 587 */
618static int netlbl_unlhsh_remove_addr6(struct net *net, 588static int netlbl_unlhsh_remove_addr6(struct net *net,
@@ -1547,8 +1517,10 @@ int netlbl_unlabel_getattr(const struct sk_buff *skb,
1547 struct netlbl_unlhsh_iface *iface; 1517 struct netlbl_unlhsh_iface *iface;
1548 1518
1549 rcu_read_lock(); 1519 rcu_read_lock();
1550 iface = netlbl_unlhsh_search_iface_def(skb->skb_iif); 1520 iface = netlbl_unlhsh_search_iface(skb->skb_iif);
1551 if (iface == NULL) 1521 if (iface == NULL)
1522 iface = rcu_dereference(netlbl_unlhsh_def);
1523 if (iface == NULL || !iface->valid)
1552 goto unlabel_getattr_nolabel; 1524 goto unlabel_getattr_nolabel;
1553 switch (family) { 1525 switch (family) {
1554 case PF_INET: { 1526 case PF_INET: {
diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c
index 274d977166b7..6464a1972a69 100644
--- a/net/netlink/af_netlink.c
+++ b/net/netlink/af_netlink.c
@@ -683,6 +683,9 @@ static int netlink_connect(struct socket *sock, struct sockaddr *addr,
683 struct netlink_sock *nlk = nlk_sk(sk); 683 struct netlink_sock *nlk = nlk_sk(sk);
684 struct sockaddr_nl *nladdr = (struct sockaddr_nl *)addr; 684 struct sockaddr_nl *nladdr = (struct sockaddr_nl *)addr;
685 685
686 if (alen < sizeof(addr->sa_family))
687 return -EINVAL;
688
686 if (addr->sa_family == AF_UNSPEC) { 689 if (addr->sa_family == AF_UNSPEC) {
687 sk->sk_state = NETLINK_UNCONNECTED; 690 sk->sk_state = NETLINK_UNCONNECTED;
688 nlk->dst_pid = 0; 691 nlk->dst_pid = 0;
diff --git a/net/rxrpc/ar-accept.c b/net/rxrpc/ar-accept.c
index 77228f28fa36..2d744f22a9a1 100644
--- a/net/rxrpc/ar-accept.c
+++ b/net/rxrpc/ar-accept.c
@@ -88,6 +88,11 @@ static int rxrpc_accept_incoming_call(struct rxrpc_local *local,
88 88
89 /* get a notification message to send to the server app */ 89 /* get a notification message to send to the server app */
90 notification = alloc_skb(0, GFP_NOFS); 90 notification = alloc_skb(0, GFP_NOFS);
91 if (!notification) {
92 _debug("no memory");
93 ret = -ENOMEM;
94 goto error_nofree;
95 }
91 rxrpc_new_skb(notification); 96 rxrpc_new_skb(notification);
92 notification->mark = RXRPC_SKB_MARK_NEW_CALL; 97 notification->mark = RXRPC_SKB_MARK_NEW_CALL;
93 98
@@ -189,6 +194,7 @@ invalid_service:
189 ret = -ECONNREFUSED; 194 ret = -ECONNREFUSED;
190error: 195error:
191 rxrpc_free_skb(notification); 196 rxrpc_free_skb(notification);
197error_nofree:
192 _leave(" = %d", ret); 198 _leave(" = %d", ret);
193 return ret; 199 return ret;
194} 200}
diff --git a/net/sched/Kconfig b/net/sched/Kconfig
index 21f9c7678aa3..2f691fb180d1 100644
--- a/net/sched/Kconfig
+++ b/net/sched/Kconfig
@@ -328,13 +328,16 @@ config NET_CLS_FLOW
328 module will be called cls_flow. 328 module will be called cls_flow.
329 329
330config NET_CLS_CGROUP 330config NET_CLS_CGROUP
331 bool "Control Group Classifier" 331 tristate "Control Group Classifier"
332 select NET_CLS 332 select NET_CLS
333 depends on CGROUPS 333 depends on CGROUPS
334 ---help--- 334 ---help---
335 Say Y here if you want to classify packets based on the control 335 Say Y here if you want to classify packets based on the control
336 cgroup of their process. 336 cgroup of their process.
337 337
338 To compile this code as a module, choose M here: the
339 module will be called cls_cgroup.
340
338config NET_EMATCH 341config NET_EMATCH
339 bool "Extended Matches" 342 bool "Extended Matches"
340 select NET_CLS 343 select NET_CLS
diff --git a/net/sched/cls_cgroup.c b/net/sched/cls_cgroup.c
index e4877ca6727c..7f27d2c15e08 100644
--- a/net/sched/cls_cgroup.c
+++ b/net/sched/cls_cgroup.c
@@ -24,6 +24,25 @@ struct cgroup_cls_state
24 u32 classid; 24 u32 classid;
25}; 25};
26 26
27static struct cgroup_subsys_state *cgrp_create(struct cgroup_subsys *ss,
28 struct cgroup *cgrp);
29static void cgrp_destroy(struct cgroup_subsys *ss, struct cgroup *cgrp);
30static int cgrp_populate(struct cgroup_subsys *ss, struct cgroup *cgrp);
31
32struct cgroup_subsys net_cls_subsys = {
33 .name = "net_cls",
34 .create = cgrp_create,
35 .destroy = cgrp_destroy,
36 .populate = cgrp_populate,
37#ifdef CONFIG_NET_CLS_CGROUP
38 .subsys_id = net_cls_subsys_id,
39#else
40#define net_cls_subsys_id net_cls_subsys.subsys_id
41#endif
42 .module = THIS_MODULE,
43};
44
45
27static inline struct cgroup_cls_state *cgrp_cls_state(struct cgroup *cgrp) 46static inline struct cgroup_cls_state *cgrp_cls_state(struct cgroup *cgrp)
28{ 47{
29 return container_of(cgroup_subsys_state(cgrp, net_cls_subsys_id), 48 return container_of(cgroup_subsys_state(cgrp, net_cls_subsys_id),
@@ -79,14 +98,6 @@ static int cgrp_populate(struct cgroup_subsys *ss, struct cgroup *cgrp)
79 return cgroup_add_files(cgrp, ss, ss_files, ARRAY_SIZE(ss_files)); 98 return cgroup_add_files(cgrp, ss, ss_files, ARRAY_SIZE(ss_files));
80} 99}
81 100
82struct cgroup_subsys net_cls_subsys = {
83 .name = "net_cls",
84 .create = cgrp_create,
85 .destroy = cgrp_destroy,
86 .populate = cgrp_populate,
87 .subsys_id = net_cls_subsys_id,
88};
89
90struct cls_cgroup_head 101struct cls_cgroup_head
91{ 102{
92 u32 handle; 103 u32 handle;
@@ -277,12 +288,19 @@ static struct tcf_proto_ops cls_cgroup_ops __read_mostly = {
277 288
278static int __init init_cgroup_cls(void) 289static int __init init_cgroup_cls(void)
279{ 290{
280 return register_tcf_proto_ops(&cls_cgroup_ops); 291 int ret = register_tcf_proto_ops(&cls_cgroup_ops);
292 if (ret)
293 return ret;
294 ret = cgroup_load_subsys(&net_cls_subsys);
295 if (ret)
296 unregister_tcf_proto_ops(&cls_cgroup_ops);
297 return ret;
281} 298}
282 299
283static void __exit exit_cgroup_cls(void) 300static void __exit exit_cgroup_cls(void)
284{ 301{
285 unregister_tcf_proto_ops(&cls_cgroup_ops); 302 unregister_tcf_proto_ops(&cls_cgroup_ops);
303 cgroup_unload_subsys(&net_cls_subsys);
286} 304}
287 305
288module_init(init_cgroup_cls); 306module_init(init_cgroup_cls);
diff --git a/net/socket.c b/net/socket.c
index ae904b58d9f5..ad2e8153c618 100644
--- a/net/socket.c
+++ b/net/socket.c
@@ -2134,6 +2134,10 @@ int __sys_recvmmsg(int fd, struct mmsghdr __user *mmsg, unsigned int vlen,
2134 break; 2134 break;
2135 ++datagrams; 2135 ++datagrams;
2136 2136
2137 /* MSG_WAITFORONE turns on MSG_DONTWAIT after one packet */
2138 if (flags & MSG_WAITFORONE)
2139 flags |= MSG_DONTWAIT;
2140
2137 if (timeout) { 2141 if (timeout) {
2138 ktime_get_ts(timeout); 2142 ktime_get_ts(timeout);
2139 *timeout = timespec_sub(end_time, *timeout); 2143 *timeout = timespec_sub(end_time, *timeout);
diff --git a/net/sunrpc/xprtsock.c b/net/sunrpc/xprtsock.c
index 75ab08eac66b..e4839c07c913 100644
--- a/net/sunrpc/xprtsock.c
+++ b/net/sunrpc/xprtsock.c
@@ -548,8 +548,6 @@ static int xs_udp_send_request(struct rpc_task *task)
548 /* Still some bytes left; set up for a retry later. */ 548 /* Still some bytes left; set up for a retry later. */
549 status = -EAGAIN; 549 status = -EAGAIN;
550 } 550 }
551 if (!transport->sock)
552 goto out;
553 551
554 switch (status) { 552 switch (status) {
555 case -ENOTSOCK: 553 case -ENOTSOCK:
@@ -569,7 +567,7 @@ static int xs_udp_send_request(struct rpc_task *task)
569 * prompts ECONNREFUSED. */ 567 * prompts ECONNREFUSED. */
570 clear_bit(SOCK_ASYNC_NOSPACE, &transport->sock->flags); 568 clear_bit(SOCK_ASYNC_NOSPACE, &transport->sock->flags);
571 } 569 }
572out: 570
573 return status; 571 return status;
574} 572}
575 573
@@ -651,8 +649,6 @@ static int xs_tcp_send_request(struct rpc_task *task)
651 status = -EAGAIN; 649 status = -EAGAIN;
652 break; 650 break;
653 } 651 }
654 if (!transport->sock)
655 goto out;
656 652
657 switch (status) { 653 switch (status) {
658 case -ENOTSOCK: 654 case -ENOTSOCK:
@@ -672,7 +668,7 @@ static int xs_tcp_send_request(struct rpc_task *task)
672 case -ENOTCONN: 668 case -ENOTCONN:
673 clear_bit(SOCK_ASYNC_NOSPACE, &transport->sock->flags); 669 clear_bit(SOCK_ASYNC_NOSPACE, &transport->sock->flags);
674 } 670 }
675out: 671
676 return status; 672 return status;
677} 673}
678 674
diff --git a/net/wireless/reg.c b/net/wireless/reg.c
index ed89c59bb431..81fcafc60150 100644
--- a/net/wireless/reg.c
+++ b/net/wireless/reg.c
@@ -324,7 +324,7 @@ struct reg_regdb_search_request {
324}; 324};
325 325
326static LIST_HEAD(reg_regdb_search_list); 326static LIST_HEAD(reg_regdb_search_list);
327static DEFINE_SPINLOCK(reg_regdb_search_lock); 327static DEFINE_MUTEX(reg_regdb_search_mutex);
328 328
329static void reg_regdb_search(struct work_struct *work) 329static void reg_regdb_search(struct work_struct *work)
330{ 330{
@@ -332,7 +332,7 @@ static void reg_regdb_search(struct work_struct *work)
332 const struct ieee80211_regdomain *curdom, *regdom; 332 const struct ieee80211_regdomain *curdom, *regdom;
333 int i, r; 333 int i, r;
334 334
335 spin_lock(&reg_regdb_search_lock); 335 mutex_lock(&reg_regdb_search_mutex);
336 while (!list_empty(&reg_regdb_search_list)) { 336 while (!list_empty(&reg_regdb_search_list)) {
337 request = list_first_entry(&reg_regdb_search_list, 337 request = list_first_entry(&reg_regdb_search_list,
338 struct reg_regdb_search_request, 338 struct reg_regdb_search_request,
@@ -346,18 +346,16 @@ static void reg_regdb_search(struct work_struct *work)
346 r = reg_copy_regd(&regdom, curdom); 346 r = reg_copy_regd(&regdom, curdom);
347 if (r) 347 if (r)
348 break; 348 break;
349 spin_unlock(&reg_regdb_search_lock);
350 mutex_lock(&cfg80211_mutex); 349 mutex_lock(&cfg80211_mutex);
351 set_regdom(regdom); 350 set_regdom(regdom);
352 mutex_unlock(&cfg80211_mutex); 351 mutex_unlock(&cfg80211_mutex);
353 spin_lock(&reg_regdb_search_lock);
354 break; 352 break;
355 } 353 }
356 } 354 }
357 355
358 kfree(request); 356 kfree(request);
359 } 357 }
360 spin_unlock(&reg_regdb_search_lock); 358 mutex_unlock(&reg_regdb_search_mutex);
361} 359}
362 360
363static DECLARE_WORK(reg_regdb_work, reg_regdb_search); 361static DECLARE_WORK(reg_regdb_work, reg_regdb_search);
@@ -375,9 +373,9 @@ static void reg_regdb_query(const char *alpha2)
375 373
376 memcpy(request->alpha2, alpha2, 2); 374 memcpy(request->alpha2, alpha2, 2);
377 375
378 spin_lock(&reg_regdb_search_lock); 376 mutex_lock(&reg_regdb_search_mutex);
379 list_add_tail(&request->list, &reg_regdb_search_list); 377 list_add_tail(&request->list, &reg_regdb_search_list);
380 spin_unlock(&reg_regdb_search_lock); 378 mutex_unlock(&reg_regdb_search_mutex);
381 379
382 schedule_work(&reg_regdb_work); 380 schedule_work(&reg_regdb_work);
383} 381}
diff --git a/sound/oss/sequencer.c b/sound/oss/sequencer.c
index c79874696bec..e85789e53816 100644
--- a/sound/oss/sequencer.c
+++ b/sound/oss/sequencer.c
@@ -1631,8 +1631,6 @@ unsigned long compute_finetune(unsigned long base_freq, int bend, int range,
1631 } 1631 }
1632 1632
1633 semitones = bend / 100; 1633 semitones = bend / 100;
1634 if (semitones > 99)
1635 semitones = 99;
1636 cents = bend % 100; 1634 cents = bend % 100;
1637 1635
1638 amount = (int) (semitone_tuning[semitones] * multiplier * cent_tuning[cents]) / 10000; 1636 amount = (int) (semitone_tuning[semitones] * multiplier * cent_tuning[cents]) / 10000;
diff --git a/sound/pci/hda/hda_codec.c b/sound/pci/hda/hda_codec.c
index 5bd7cf45f3a5..0e76ac2b2ace 100644
--- a/sound/pci/hda/hda_codec.c
+++ b/sound/pci/hda/hda_codec.c
@@ -1806,6 +1806,8 @@ int snd_hda_add_nid(struct hda_codec *codec, struct snd_kcontrol *kctl,
1806 item->nid = nid; 1806 item->nid = nid;
1807 return 0; 1807 return 0;
1808 } 1808 }
1809 printk(KERN_ERR "hda-codec: no NID for mapping control %s:%d:%d\n",
1810 kctl->id.name, kctl->id.index, index);
1809 return -EINVAL; 1811 return -EINVAL;
1810} 1812}
1811EXPORT_SYMBOL_HDA(snd_hda_add_nid); 1813EXPORT_SYMBOL_HDA(snd_hda_add_nid);
@@ -2884,7 +2886,7 @@ int /*__devinit*/ snd_hda_build_controls(struct hda_bus *bus)
2884 list_for_each_entry(codec, &bus->codec_list, list) { 2886 list_for_each_entry(codec, &bus->codec_list, list) {
2885 int err = snd_hda_codec_build_controls(codec); 2887 int err = snd_hda_codec_build_controls(codec);
2886 if (err < 0) { 2888 if (err < 0) {
2887 printk(KERN_ERR "hda_codec: cannot build controls" 2889 printk(KERN_ERR "hda_codec: cannot build controls "
2888 "for #%d (error %d)\n", codec->addr, err); 2890 "for #%d (error %d)\n", codec->addr, err);
2889 err = snd_hda_codec_reset(codec); 2891 err = snd_hda_codec_reset(codec);
2890 if (err < 0) { 2892 if (err < 0) {
diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c
index da1ac9068aac..8b2915631cc3 100644
--- a/sound/pci/hda/hda_intel.c
+++ b/sound/pci/hda/hda_intel.c
@@ -2271,6 +2271,7 @@ static struct snd_pci_quirk position_fix_list[] __devinitdata = {
2271 SND_PCI_QUIRK(0x1043, 0x813d, "ASUS P5AD2", POS_FIX_LPIB), 2271 SND_PCI_QUIRK(0x1043, 0x813d, "ASUS P5AD2", POS_FIX_LPIB),
2272 SND_PCI_QUIRK(0x1462, 0x1002, "MSI Wind U115", POS_FIX_LPIB), 2272 SND_PCI_QUIRK(0x1462, 0x1002, "MSI Wind U115", POS_FIX_LPIB),
2273 SND_PCI_QUIRK(0x1565, 0x820f, "Biostar Microtech", POS_FIX_LPIB), 2273 SND_PCI_QUIRK(0x1565, 0x820f, "Biostar Microtech", POS_FIX_LPIB),
2274 SND_PCI_QUIRK(0x8086, 0xd601, "eMachines T5212", POS_FIX_LPIB),
2274 {} 2275 {}
2275}; 2276};
2276 2277
@@ -2378,6 +2379,13 @@ static void __devinit check_msi(struct azx *chip)
2378 "hda_intel: msi for device %04x:%04x set to %d\n", 2379 "hda_intel: msi for device %04x:%04x set to %d\n",
2379 q->subvendor, q->subdevice, q->value); 2380 q->subvendor, q->subdevice, q->value);
2380 chip->msi = q->value; 2381 chip->msi = q->value;
2382 return;
2383 }
2384
2385 /* NVidia chipsets seem to cause troubles with MSI */
2386 if (chip->driver_type == AZX_DRIVER_NVIDIA) {
2387 printk(KERN_INFO "hda_intel: Disable MSI for Nvidia chipset\n");
2388 chip->msi = 0;
2381 } 2389 }
2382} 2390}
2383 2391
@@ -2706,6 +2714,7 @@ static DEFINE_PCI_DEVICE_TABLE(azx_ids) = {
2706 { PCI_DEVICE(0x8086, 0x3a6e), .driver_data = AZX_DRIVER_ICH }, 2714 { PCI_DEVICE(0x8086, 0x3a6e), .driver_data = AZX_DRIVER_ICH },
2707 /* PCH */ 2715 /* PCH */
2708 { PCI_DEVICE(0x8086, 0x3b56), .driver_data = AZX_DRIVER_ICH }, 2716 { PCI_DEVICE(0x8086, 0x3b56), .driver_data = AZX_DRIVER_ICH },
2717 { PCI_DEVICE(0x8086, 0x3b57), .driver_data = AZX_DRIVER_ICH },
2709 /* CPT */ 2718 /* CPT */
2710 { PCI_DEVICE(0x8086, 0x1c20), .driver_data = AZX_DRIVER_PCH }, 2719 { PCI_DEVICE(0x8086, 0x1c20), .driver_data = AZX_DRIVER_PCH },
2711 /* SCH */ 2720 /* SCH */
diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
index 3a8371990d75..4ec57633af88 100644
--- a/sound/pci/hda/patch_realtek.c
+++ b/sound/pci/hda/patch_realtek.c
@@ -6477,7 +6477,7 @@ static struct alc_config_preset alc260_presets[] = {
6477 .num_dacs = ARRAY_SIZE(alc260_dac_nids), 6477 .num_dacs = ARRAY_SIZE(alc260_dac_nids),
6478 .dac_nids = alc260_dac_nids, 6478 .dac_nids = alc260_dac_nids,
6479 .num_adc_nids = ARRAY_SIZE(alc260_dual_adc_nids), 6479 .num_adc_nids = ARRAY_SIZE(alc260_dual_adc_nids),
6480 .adc_nids = alc260_adc_nids, 6480 .adc_nids = alc260_dual_adc_nids,
6481 .num_channel_mode = ARRAY_SIZE(alc260_modes), 6481 .num_channel_mode = ARRAY_SIZE(alc260_modes),
6482 .channel_mode = alc260_modes, 6482 .channel_mode = alc260_modes,
6483 .input_mux = &alc260_capture_source, 6483 .input_mux = &alc260_capture_source,
@@ -9195,6 +9195,7 @@ static struct snd_pci_quirk alc882_cfg_tbl[] = {
9195 SND_PCI_QUIRK(0x1462, 0x4314, "MSI", ALC883_TARGA_DIG), 9195 SND_PCI_QUIRK(0x1462, 0x4314, "MSI", ALC883_TARGA_DIG),
9196 SND_PCI_QUIRK(0x1462, 0x4319, "MSI", ALC883_TARGA_DIG), 9196 SND_PCI_QUIRK(0x1462, 0x4319, "MSI", ALC883_TARGA_DIG),
9197 SND_PCI_QUIRK(0x1462, 0x4324, "MSI", ALC883_TARGA_DIG), 9197 SND_PCI_QUIRK(0x1462, 0x4324, "MSI", ALC883_TARGA_DIG),
9198 SND_PCI_QUIRK(0x1462, 0x4570, "MSI Wind Top AE2220", ALC883_TARGA_DIG),
9198 SND_PCI_QUIRK(0x1462, 0x6510, "MSI GX620", ALC883_TARGA_8ch_DIG), 9199 SND_PCI_QUIRK(0x1462, 0x6510, "MSI GX620", ALC883_TARGA_8ch_DIG),
9199 SND_PCI_QUIRK(0x1462, 0x6668, "MSI", ALC883_6ST_DIG), 9200 SND_PCI_QUIRK(0x1462, 0x6668, "MSI", ALC883_6ST_DIG),
9200 SND_PCI_QUIRK(0x1462, 0x7187, "MSI", ALC883_6ST_DIG), 9201 SND_PCI_QUIRK(0x1462, 0x7187, "MSI", ALC883_6ST_DIG),
@@ -9204,6 +9205,7 @@ static struct snd_pci_quirk alc882_cfg_tbl[] = {
9204 SND_PCI_QUIRK(0x1462, 0x7280, "MSI", ALC883_6ST_DIG), 9205 SND_PCI_QUIRK(0x1462, 0x7280, "MSI", ALC883_6ST_DIG),
9205 SND_PCI_QUIRK(0x1462, 0x7327, "MSI", ALC883_6ST_DIG), 9206 SND_PCI_QUIRK(0x1462, 0x7327, "MSI", ALC883_6ST_DIG),
9206 SND_PCI_QUIRK(0x1462, 0x7350, "MSI", ALC883_6ST_DIG), 9207 SND_PCI_QUIRK(0x1462, 0x7350, "MSI", ALC883_6ST_DIG),
9208 SND_PCI_QUIRK(0x1462, 0x7437, "MSI NetOn AP1900", ALC883_TARGA_DIG),
9207 SND_PCI_QUIRK(0x1462, 0xa422, "MSI", ALC883_TARGA_2ch_DIG), 9209 SND_PCI_QUIRK(0x1462, 0xa422, "MSI", ALC883_TARGA_2ch_DIG),
9208 SND_PCI_QUIRK(0x1462, 0xaa08, "MSI", ALC883_TARGA_2ch_DIG), 9210 SND_PCI_QUIRK(0x1462, 0xaa08, "MSI", ALC883_TARGA_2ch_DIG),
9209 9211
@@ -9235,7 +9237,7 @@ static struct snd_pci_quirk alc882_cfg_tbl[] = {
9235 SND_PCI_QUIRK(0x8086, 0x0022, "DX58SO", ALC889_INTEL), 9237 SND_PCI_QUIRK(0x8086, 0x0022, "DX58SO", ALC889_INTEL),
9236 SND_PCI_QUIRK(0x8086, 0x0021, "Intel IbexPeak", ALC889A_INTEL), 9238 SND_PCI_QUIRK(0x8086, 0x0021, "Intel IbexPeak", ALC889A_INTEL),
9237 SND_PCI_QUIRK(0x8086, 0x3b56, "Intel IbexPeak", ALC889A_INTEL), 9239 SND_PCI_QUIRK(0x8086, 0x3b56, "Intel IbexPeak", ALC889A_INTEL),
9238 SND_PCI_QUIRK(0x8086, 0xd601, "D102GGC", ALC883_3ST_6ch), 9240 SND_PCI_QUIRK(0x8086, 0xd601, "D102GGC", ALC882_6ST_DIG),
9239 9241
9240 {} 9242 {}
9241}; 9243};
diff --git a/tools/perf/Documentation/Makefile b/tools/perf/Documentation/Makefile
index bdd3b7ecad0a..bd498d496952 100644
--- a/tools/perf/Documentation/Makefile
+++ b/tools/perf/Documentation/Makefile
@@ -24,7 +24,10 @@ DOC_MAN1=$(patsubst %.txt,%.1,$(MAN1_TXT))
24DOC_MAN5=$(patsubst %.txt,%.5,$(MAN5_TXT)) 24DOC_MAN5=$(patsubst %.txt,%.5,$(MAN5_TXT))
25DOC_MAN7=$(patsubst %.txt,%.7,$(MAN7_TXT)) 25DOC_MAN7=$(patsubst %.txt,%.7,$(MAN7_TXT))
26 26
27# Make the path relative to DESTDIR, not prefix
28ifndef DESTDIR
27prefix?=$(HOME) 29prefix?=$(HOME)
30endif
28bindir?=$(prefix)/bin 31bindir?=$(prefix)/bin
29htmldir?=$(prefix)/share/doc/perf-doc 32htmldir?=$(prefix)/share/doc/perf-doc
30pdfdir?=$(prefix)/share/doc/perf-doc 33pdfdir?=$(prefix)/share/doc/perf-doc
@@ -32,7 +35,6 @@ mandir?=$(prefix)/share/man
32man1dir=$(mandir)/man1 35man1dir=$(mandir)/man1
33man5dir=$(mandir)/man5 36man5dir=$(mandir)/man5
34man7dir=$(mandir)/man7 37man7dir=$(mandir)/man7
35# DESTDIR=
36 38
37ASCIIDOC=asciidoc 39ASCIIDOC=asciidoc
38ASCIIDOC_EXTRA = --unsafe 40ASCIIDOC_EXTRA = --unsafe
diff --git a/tools/perf/Makefile b/tools/perf/Makefile
index 2d537382c686..8a8f52db7e38 100644
--- a/tools/perf/Makefile
+++ b/tools/perf/Makefile
@@ -216,7 +216,10 @@ STRIP ?= strip
216# runtime figures out where they are based on the path to the executable. 216# runtime figures out where they are based on the path to the executable.
217# This can help installing the suite in a relocatable way. 217# This can help installing the suite in a relocatable way.
218 218
219# Make the path relative to DESTDIR, not to prefix
220ifndef DESTDIR
219prefix = $(HOME) 221prefix = $(HOME)
222endif
220bindir_relative = bin 223bindir_relative = bin
221bindir = $(prefix)/$(bindir_relative) 224bindir = $(prefix)/$(bindir_relative)
222mandir = share/man 225mandir = share/man
@@ -233,7 +236,6 @@ sysconfdir = $(prefix)/etc
233ETC_PERFCONFIG = etc/perfconfig 236ETC_PERFCONFIG = etc/perfconfig
234endif 237endif
235lib = lib 238lib = lib
236# DESTDIR=
237 239
238export prefix bindir sharedir sysconfdir 240export prefix bindir sharedir sysconfdir
239 241
@@ -387,6 +389,7 @@ LIB_H += util/thread.h
387LIB_H += util/trace-event.h 389LIB_H += util/trace-event.h
388LIB_H += util/probe-finder.h 390LIB_H += util/probe-finder.h
389LIB_H += util/probe-event.h 391LIB_H += util/probe-event.h
392LIB_H += util/cpumap.h
390 393
391LIB_OBJS += util/abspath.o 394LIB_OBJS += util/abspath.o
392LIB_OBJS += util/alias.o 395LIB_OBJS += util/alias.o
@@ -433,6 +436,7 @@ LIB_OBJS += util/sort.o
433LIB_OBJS += util/hist.o 436LIB_OBJS += util/hist.o
434LIB_OBJS += util/probe-event.o 437LIB_OBJS += util/probe-event.o
435LIB_OBJS += util/util.o 438LIB_OBJS += util/util.o
439LIB_OBJS += util/cpumap.o
436 440
437BUILTIN_OBJS += builtin-annotate.o 441BUILTIN_OBJS += builtin-annotate.o
438 442
diff --git a/tools/perf/builtin-annotate.c b/tools/perf/builtin-annotate.c
index 5ec5de995872..6ad7148451c5 100644
--- a/tools/perf/builtin-annotate.c
+++ b/tools/perf/builtin-annotate.c
@@ -116,7 +116,7 @@ static int perf_session__add_hist_entry(struct perf_session *self,
116 return 0; 116 return 0;
117 } 117 }
118 118
119 he = __perf_session__add_hist_entry(self, al, NULL, count, &hit); 119 he = __perf_session__add_hist_entry(&self->hists, al, NULL, count, &hit);
120 if (he == NULL) 120 if (he == NULL)
121 return -ENOMEM; 121 return -ENOMEM;
122 122
@@ -564,8 +564,8 @@ static int __cmd_annotate(void)
564 if (verbose > 2) 564 if (verbose > 2)
565 dsos__fprintf(stdout); 565 dsos__fprintf(stdout);
566 566
567 perf_session__collapse_resort(session); 567 perf_session__collapse_resort(&session->hists);
568 perf_session__output_resort(session, session->event_total[0]); 568 perf_session__output_resort(&session->hists, session->event_total[0]);
569 perf_session__find_annotations(session); 569 perf_session__find_annotations(session);
570out_delete: 570out_delete:
571 perf_session__delete(session); 571 perf_session__delete(session);
diff --git a/tools/perf/builtin-diff.c b/tools/perf/builtin-diff.c
index 18b3f505f9db..1ea15d8aeed1 100644
--- a/tools/perf/builtin-diff.c
+++ b/tools/perf/builtin-diff.c
@@ -26,7 +26,8 @@ static int perf_session__add_hist_entry(struct perf_session *self,
26 struct addr_location *al, u64 count) 26 struct addr_location *al, u64 count)
27{ 27{
28 bool hit; 28 bool hit;
29 struct hist_entry *he = __perf_session__add_hist_entry(self, al, NULL, 29 struct hist_entry *he = __perf_session__add_hist_entry(&self->hists,
30 al, NULL,
30 count, &hit); 31 count, &hit);
31 if (he == NULL) 32 if (he == NULL)
32 return -ENOMEM; 33 return -ENOMEM;
@@ -114,7 +115,7 @@ static void perf_session__resort_hist_entries(struct perf_session *self)
114 115
115static void perf_session__set_hist_entries_positions(struct perf_session *self) 116static void perf_session__set_hist_entries_positions(struct perf_session *self)
116{ 117{
117 perf_session__output_resort(self, self->events_stats.total); 118 perf_session__output_resort(&self->hists, self->events_stats.total);
118 perf_session__resort_hist_entries(self); 119 perf_session__resort_hist_entries(self);
119} 120}
120 121
@@ -166,13 +167,15 @@ static int __cmd_diff(void)
166 goto out_delete; 167 goto out_delete;
167 } 168 }
168 169
169 perf_session__output_resort(session[1], session[1]->events_stats.total); 170 perf_session__output_resort(&session[1]->hists,
171 session[1]->events_stats.total);
170 if (show_displacement) 172 if (show_displacement)
171 perf_session__set_hist_entries_positions(session[0]); 173 perf_session__set_hist_entries_positions(session[0]);
172 174
173 perf_session__match_hists(session[0], session[1]); 175 perf_session__match_hists(session[0], session[1]);
174 perf_session__fprintf_hists(session[1], session[0], 176 perf_session__fprintf_hists(&session[1]->hists, session[0],
175 show_displacement, stdout); 177 show_displacement, stdout,
178 session[1]->events_stats.total);
176out_delete: 179out_delete:
177 for (i = 0; i < 2; ++i) 180 for (i = 0; i < 2; ++i)
178 perf_session__delete(session[i]); 181 perf_session__delete(session[i]);
diff --git a/tools/perf/builtin-record.c b/tools/perf/builtin-record.c
index 771533ced6a8..3b8b6387c47c 100644
--- a/tools/perf/builtin-record.c
+++ b/tools/perf/builtin-record.c
@@ -22,6 +22,7 @@
22#include "util/debug.h" 22#include "util/debug.h"
23#include "util/session.h" 23#include "util/session.h"
24#include "util/symbol.h" 24#include "util/symbol.h"
25#include "util/cpumap.h"
25 26
26#include <unistd.h> 27#include <unistd.h>
27#include <sched.h> 28#include <sched.h>
@@ -244,6 +245,9 @@ static void create_counter(int counter, int cpu, pid_t pid)
244 245
245 attr->sample_type |= PERF_SAMPLE_IP | PERF_SAMPLE_TID; 246 attr->sample_type |= PERF_SAMPLE_IP | PERF_SAMPLE_TID;
246 247
248 if (nr_counters > 1)
249 attr->sample_type |= PERF_SAMPLE_ID;
250
247 if (freq) { 251 if (freq) {
248 attr->sample_type |= PERF_SAMPLE_PERIOD; 252 attr->sample_type |= PERF_SAMPLE_PERIOD;
249 attr->freq = 1; 253 attr->freq = 1;
@@ -391,6 +395,9 @@ static int process_buildids(void)
391{ 395{
392 u64 size = lseek(output, 0, SEEK_CUR); 396 u64 size = lseek(output, 0, SEEK_CUR);
393 397
398 if (size == 0)
399 return 0;
400
394 session->fd = output; 401 session->fd = output;
395 return __perf_session__process_events(session, post_processing_offset, 402 return __perf_session__process_events(session, post_processing_offset,
396 size - post_processing_offset, 403 size - post_processing_offset,
@@ -418,9 +425,6 @@ static int __cmd_record(int argc, const char **argv)
418 char buf; 425 char buf;
419 426
420 page_size = sysconf(_SC_PAGE_SIZE); 427 page_size = sysconf(_SC_PAGE_SIZE);
421 nr_cpus = sysconf(_SC_NPROCESSORS_ONLN);
422 assert(nr_cpus <= MAX_NR_CPUS);
423 assert(nr_cpus >= 0);
424 428
425 atexit(sig_atexit); 429 atexit(sig_atexit);
426 signal(SIGCHLD, sig_handler); 430 signal(SIGCHLD, sig_handler);
@@ -544,8 +548,9 @@ static int __cmd_record(int argc, const char **argv)
544 if ((!system_wide && !inherit) || profile_cpu != -1) { 548 if ((!system_wide && !inherit) || profile_cpu != -1) {
545 open_counters(profile_cpu, target_pid); 549 open_counters(profile_cpu, target_pid);
546 } else { 550 } else {
551 nr_cpus = read_cpu_map();
547 for (i = 0; i < nr_cpus; i++) 552 for (i = 0; i < nr_cpus; i++)
548 open_counters(i, target_pid); 553 open_counters(cpumap[i], target_pid);
549 } 554 }
550 555
551 if (file_new) { 556 if (file_new) {
diff --git a/tools/perf/builtin-report.c b/tools/perf/builtin-report.c
index cfc655d40bb7..f815de25d0fc 100644
--- a/tools/perf/builtin-report.c
+++ b/tools/perf/builtin-report.c
@@ -45,28 +45,71 @@ static char *pretty_printing_style = default_pretty_printing_style;
45 45
46static char callchain_default_opt[] = "fractal,0.5"; 46static char callchain_default_opt[] = "fractal,0.5";
47 47
48static struct event_stat_id *get_stats(struct perf_session *self,
49 u64 event_stream, u32 type, u64 config)
50{
51 struct rb_node **p = &self->stats_by_id.rb_node;
52 struct rb_node *parent = NULL;
53 struct event_stat_id *iter, *new;
54
55 while (*p != NULL) {
56 parent = *p;
57 iter = rb_entry(parent, struct event_stat_id, rb_node);
58 if (iter->config == config)
59 return iter;
60
61
62 if (config > iter->config)
63 p = &(*p)->rb_right;
64 else
65 p = &(*p)->rb_left;
66 }
67
68 new = malloc(sizeof(struct event_stat_id));
69 if (new == NULL)
70 return NULL;
71 memset(new, 0, sizeof(struct event_stat_id));
72 new->event_stream = event_stream;
73 new->config = config;
74 new->type = type;
75 rb_link_node(&new->rb_node, parent, p);
76 rb_insert_color(&new->rb_node, &self->stats_by_id);
77 return new;
78}
79
48static int perf_session__add_hist_entry(struct perf_session *self, 80static int perf_session__add_hist_entry(struct perf_session *self,
49 struct addr_location *al, 81 struct addr_location *al,
50 struct ip_callchain *chain, u64 count) 82 struct sample_data *data)
51{ 83{
52 struct symbol **syms = NULL, *parent = NULL; 84 struct symbol **syms = NULL, *parent = NULL;
53 bool hit; 85 bool hit;
54 struct hist_entry *he; 86 struct hist_entry *he;
87 struct event_stat_id *stats;
88 struct perf_event_attr *attr;
55 89
56 if ((sort__has_parent || symbol_conf.use_callchain) && chain) 90 if ((sort__has_parent || symbol_conf.use_callchain) && data->callchain)
57 syms = perf_session__resolve_callchain(self, al->thread, 91 syms = perf_session__resolve_callchain(self, al->thread,
58 chain, &parent); 92 data->callchain, &parent);
59 he = __perf_session__add_hist_entry(self, al, parent, count, &hit); 93
94 attr = perf_header__find_attr(data->id, &self->header);
95 if (attr)
96 stats = get_stats(self, data->id, attr->type, attr->config);
97 else
98 stats = get_stats(self, data->id, 0, 0);
99 if (stats == NULL)
100 return -ENOMEM;
101 he = __perf_session__add_hist_entry(&stats->hists, al, parent,
102 data->period, &hit);
60 if (he == NULL) 103 if (he == NULL)
61 return -ENOMEM; 104 return -ENOMEM;
62 105
63 if (hit) 106 if (hit)
64 he->count += count; 107 he->count += data->period;
65 108
66 if (symbol_conf.use_callchain) { 109 if (symbol_conf.use_callchain) {
67 if (!hit) 110 if (!hit)
68 callchain_init(&he->callchain); 111 callchain_init(&he->callchain);
69 append_chain(&he->callchain, chain, syms); 112 append_chain(&he->callchain, data->callchain, syms);
70 free(syms); 113 free(syms);
71 } 114 }
72 115
@@ -86,10 +129,30 @@ static int validate_chain(struct ip_callchain *chain, event_t *event)
86 return 0; 129 return 0;
87} 130}
88 131
132static int add_event_total(struct perf_session *session,
133 struct sample_data *data,
134 struct perf_event_attr *attr)
135{
136 struct event_stat_id *stats;
137
138 if (attr)
139 stats = get_stats(session, data->id, attr->type, attr->config);
140 else
141 stats = get_stats(session, data->id, 0, 0);
142
143 if (!stats)
144 return -ENOMEM;
145
146 stats->stats.total += data->period;
147 session->events_stats.total += data->period;
148 return 0;
149}
150
89static int process_sample_event(event_t *event, struct perf_session *session) 151static int process_sample_event(event_t *event, struct perf_session *session)
90{ 152{
91 struct sample_data data = { .period = 1, }; 153 struct sample_data data = { .period = 1, };
92 struct addr_location al; 154 struct addr_location al;
155 struct perf_event_attr *attr;
93 156
94 event__parse_sample(event, session->sample_type, &data); 157 event__parse_sample(event, session->sample_type, &data);
95 158
@@ -123,12 +186,18 @@ static int process_sample_event(event_t *event, struct perf_session *session)
123 if (al.filtered || (hide_unresolved && al.sym == NULL)) 186 if (al.filtered || (hide_unresolved && al.sym == NULL))
124 return 0; 187 return 0;
125 188
126 if (perf_session__add_hist_entry(session, &al, data.callchain, data.period)) { 189 if (perf_session__add_hist_entry(session, &al, &data)) {
127 pr_debug("problem incrementing symbol count, skipping event\n"); 190 pr_debug("problem incrementing symbol count, skipping event\n");
128 return -1; 191 return -1;
129 } 192 }
130 193
131 session->events_stats.total += data.period; 194 attr = perf_header__find_attr(data.id, &session->header);
195
196 if (add_event_total(session, &data, attr)) {
197 pr_debug("problem adding event count\n");
198 return -1;
199 }
200
132 return 0; 201 return 0;
133} 202}
134 203
@@ -197,6 +266,7 @@ static int __cmd_report(void)
197{ 266{
198 int ret = -EINVAL; 267 int ret = -EINVAL;
199 struct perf_session *session; 268 struct perf_session *session;
269 struct rb_node *next;
200 270
201 session = perf_session__new(input_name, O_RDONLY, force); 271 session = perf_session__new(input_name, O_RDONLY, force);
202 if (session == NULL) 272 if (session == NULL)
@@ -224,10 +294,28 @@ static int __cmd_report(void)
224 if (verbose > 2) 294 if (verbose > 2)
225 dsos__fprintf(stdout); 295 dsos__fprintf(stdout);
226 296
227 perf_session__collapse_resort(session); 297 next = rb_first(&session->stats_by_id);
228 perf_session__output_resort(session, session->events_stats.total); 298 while (next) {
229 fprintf(stdout, "# Samples: %Ld\n#\n", session->events_stats.total); 299 struct event_stat_id *stats;
230 perf_session__fprintf_hists(session, NULL, false, stdout); 300
301 stats = rb_entry(next, struct event_stat_id, rb_node);
302 perf_session__collapse_resort(&stats->hists);
303 perf_session__output_resort(&stats->hists, stats->stats.total);
304 if (rb_first(&session->stats_by_id) ==
305 rb_last(&session->stats_by_id))
306 fprintf(stdout, "# Samples: %Ld\n#\n",
307 stats->stats.total);
308 else
309 fprintf(stdout, "# Samples: %Ld %s\n#\n",
310 stats->stats.total,
311 __event_name(stats->type, stats->config));
312
313 perf_session__fprintf_hists(&stats->hists, NULL, false, stdout,
314 stats->stats.total);
315 fprintf(stdout, "\n\n");
316 next = rb_next(&stats->rb_node);
317 }
318
231 if (sort_order == default_sort_order && 319 if (sort_order == default_sort_order &&
232 parent_pattern == default_parent_pattern) 320 parent_pattern == default_parent_pattern)
233 fprintf(stdout, "#\n# (For a higher level overview, try: perf report --sort comm,dso)\n#\n"); 321 fprintf(stdout, "#\n# (For a higher level overview, try: perf report --sort comm,dso)\n#\n");
diff --git a/tools/perf/builtin-stat.c b/tools/perf/builtin-stat.c
index e8c85d5aec41..95db31cff6fd 100644
--- a/tools/perf/builtin-stat.c
+++ b/tools/perf/builtin-stat.c
@@ -45,6 +45,7 @@
45#include "util/event.h" 45#include "util/event.h"
46#include "util/debug.h" 46#include "util/debug.h"
47#include "util/header.h" 47#include "util/header.h"
48#include "util/cpumap.h"
48 49
49#include <sys/prctl.h> 50#include <sys/prctl.h>
50#include <math.h> 51#include <math.h>
@@ -151,7 +152,7 @@ static void create_perf_stat_counter(int counter, int pid)
151 unsigned int cpu; 152 unsigned int cpu;
152 153
153 for (cpu = 0; cpu < nr_cpus; cpu++) { 154 for (cpu = 0; cpu < nr_cpus; cpu++) {
154 fd[cpu][counter] = sys_perf_event_open(attr, -1, cpu, -1, 0); 155 fd[cpu][counter] = sys_perf_event_open(attr, -1, cpumap[cpu], -1, 0);
155 if (fd[cpu][counter] < 0 && verbose) 156 if (fd[cpu][counter] < 0 && verbose)
156 fprintf(stderr, ERR_PERF_OPEN, counter, 157 fprintf(stderr, ERR_PERF_OPEN, counter,
157 fd[cpu][counter], strerror(errno)); 158 fd[cpu][counter], strerror(errno));
@@ -519,9 +520,10 @@ int cmd_stat(int argc, const char **argv, const char *prefix __used)
519 nr_counters = ARRAY_SIZE(default_attrs); 520 nr_counters = ARRAY_SIZE(default_attrs);
520 } 521 }
521 522
522 nr_cpus = sysconf(_SC_NPROCESSORS_ONLN); 523 if (system_wide)
523 assert(nr_cpus <= MAX_NR_CPUS); 524 nr_cpus = read_cpu_map();
524 assert((int)nr_cpus >= 0); 525 else
526 nr_cpus = 1;
525 527
526 /* 528 /*
527 * We dont want to block the signals - that would cause 529 * We dont want to block the signals - that would cause
diff --git a/tools/perf/builtin-top.c b/tools/perf/builtin-top.c
index 31f2e597800c..0b719e3dde05 100644
--- a/tools/perf/builtin-top.c
+++ b/tools/perf/builtin-top.c
@@ -28,6 +28,7 @@
28#include <linux/rbtree.h> 28#include <linux/rbtree.h>
29#include "util/parse-options.h" 29#include "util/parse-options.h"
30#include "util/parse-events.h" 30#include "util/parse-events.h"
31#include "util/cpumap.h"
31 32
32#include "util/debug.h" 33#include "util/debug.h"
33 34
@@ -1123,7 +1124,7 @@ static void start_counter(int i, int counter)
1123 1124
1124 cpu = profile_cpu; 1125 cpu = profile_cpu;
1125 if (target_pid == -1 && profile_cpu == -1) 1126 if (target_pid == -1 && profile_cpu == -1)
1126 cpu = i; 1127 cpu = cpumap[i];
1127 1128
1128 attr = attrs + counter; 1129 attr = attrs + counter;
1129 1130
@@ -1347,12 +1348,10 @@ int cmd_top(int argc, const char **argv, const char *prefix __used)
1347 attrs[counter].sample_period = default_interval; 1348 attrs[counter].sample_period = default_interval;
1348 } 1349 }
1349 1350
1350 nr_cpus = sysconf(_SC_NPROCESSORS_ONLN);
1351 assert(nr_cpus <= MAX_NR_CPUS);
1352 assert(nr_cpus >= 0);
1353
1354 if (target_pid != -1 || profile_cpu != -1) 1351 if (target_pid != -1 || profile_cpu != -1)
1355 nr_cpus = 1; 1352 nr_cpus = 1;
1353 else
1354 nr_cpus = read_cpu_map();
1356 1355
1357 get_term_dimensions(&winsize); 1356 get_term_dimensions(&winsize);
1358 if (print_entries == 0) { 1357 if (print_entries == 0) {
diff --git a/tools/perf/util/cpumap.c b/tools/perf/util/cpumap.c
new file mode 100644
index 000000000000..4e01490e51e5
--- /dev/null
+++ b/tools/perf/util/cpumap.c
@@ -0,0 +1,59 @@
1#include "util.h"
2#include "../perf.h"
3#include "cpumap.h"
4#include <assert.h>
5#include <stdio.h>
6
7int cpumap[MAX_NR_CPUS];
8
9static int default_cpu_map(void)
10{
11 int nr_cpus, i;
12
13 nr_cpus = sysconf(_SC_NPROCESSORS_ONLN);
14 assert(nr_cpus <= MAX_NR_CPUS);
15 assert((int)nr_cpus >= 0);
16
17 for (i = 0; i < nr_cpus; ++i)
18 cpumap[i] = i;
19
20 return nr_cpus;
21}
22
23int read_cpu_map(void)
24{
25 FILE *onlnf;
26 int nr_cpus = 0;
27 int n, cpu, prev;
28 char sep;
29
30 onlnf = fopen("/sys/devices/system/cpu/online", "r");
31 if (!onlnf)
32 return default_cpu_map();
33
34 sep = 0;
35 prev = -1;
36 for (;;) {
37 n = fscanf(onlnf, "%u%c", &cpu, &sep);
38 if (n <= 0)
39 break;
40 if (prev >= 0) {
41 assert(nr_cpus + cpu - prev - 1 < MAX_NR_CPUS);
42 while (++prev < cpu)
43 cpumap[nr_cpus++] = prev;
44 }
45 assert (nr_cpus < MAX_NR_CPUS);
46 cpumap[nr_cpus++] = cpu;
47 if (n == 2 && sep == '-')
48 prev = cpu;
49 else
50 prev = -1;
51 if (n == 1 || sep == '\n')
52 break;
53 }
54 fclose(onlnf);
55 if (nr_cpus > 0)
56 return nr_cpus;
57
58 return default_cpu_map();
59}
diff --git a/tools/perf/util/cpumap.h b/tools/perf/util/cpumap.h
new file mode 100644
index 000000000000..86c78bb33098
--- /dev/null
+++ b/tools/perf/util/cpumap.h
@@ -0,0 +1,7 @@
1#ifndef __PERF_CPUMAP_H
2#define __PERF_CPUMAP_H
3
4extern int read_cpu_map(void);
5extern int cpumap[];
6
7#endif /* __PERF_CPUMAP_H */
diff --git a/tools/perf/util/event.h b/tools/perf/util/event.h
index 50a7132887f5..a33b94952e34 100644
--- a/tools/perf/util/event.h
+++ b/tools/perf/util/event.h
@@ -99,6 +99,15 @@ struct events_stats {
99 u64 lost; 99 u64 lost;
100}; 100};
101 101
102struct event_stat_id {
103 struct rb_node rb_node;
104 struct rb_root hists;
105 struct events_stats stats;
106 u64 config;
107 u64 event_stream;
108 u32 type;
109};
110
102void event__print_totals(void); 111void event__print_totals(void);
103 112
104struct perf_session; 113struct perf_session;
diff --git a/tools/perf/util/hist.c b/tools/perf/util/hist.c
index 44408c2621cf..2be33c7dbf03 100644
--- a/tools/perf/util/hist.c
+++ b/tools/perf/util/hist.c
@@ -12,12 +12,12 @@ struct callchain_param callchain_param = {
12 * histogram, sorted on item, collects counts 12 * histogram, sorted on item, collects counts
13 */ 13 */
14 14
15struct hist_entry *__perf_session__add_hist_entry(struct perf_session *self, 15struct hist_entry *__perf_session__add_hist_entry(struct rb_root *hists,
16 struct addr_location *al, 16 struct addr_location *al,
17 struct symbol *sym_parent, 17 struct symbol *sym_parent,
18 u64 count, bool *hit) 18 u64 count, bool *hit)
19{ 19{
20 struct rb_node **p = &self->hists.rb_node; 20 struct rb_node **p = &hists->rb_node;
21 struct rb_node *parent = NULL; 21 struct rb_node *parent = NULL;
22 struct hist_entry *he; 22 struct hist_entry *he;
23 struct hist_entry entry = { 23 struct hist_entry entry = {
@@ -53,7 +53,7 @@ struct hist_entry *__perf_session__add_hist_entry(struct perf_session *self,
53 return NULL; 53 return NULL;
54 *he = entry; 54 *he = entry;
55 rb_link_node(&he->rb_node, parent, p); 55 rb_link_node(&he->rb_node, parent, p);
56 rb_insert_color(&he->rb_node, &self->hists); 56 rb_insert_color(&he->rb_node, hists);
57 *hit = false; 57 *hit = false;
58 return he; 58 return he;
59} 59}
@@ -130,7 +130,7 @@ static void collapse__insert_entry(struct rb_root *root, struct hist_entry *he)
130 rb_insert_color(&he->rb_node, root); 130 rb_insert_color(&he->rb_node, root);
131} 131}
132 132
133void perf_session__collapse_resort(struct perf_session *self) 133void perf_session__collapse_resort(struct rb_root *hists)
134{ 134{
135 struct rb_root tmp; 135 struct rb_root tmp;
136 struct rb_node *next; 136 struct rb_node *next;
@@ -140,17 +140,17 @@ void perf_session__collapse_resort(struct perf_session *self)
140 return; 140 return;
141 141
142 tmp = RB_ROOT; 142 tmp = RB_ROOT;
143 next = rb_first(&self->hists); 143 next = rb_first(hists);
144 144
145 while (next) { 145 while (next) {
146 n = rb_entry(next, struct hist_entry, rb_node); 146 n = rb_entry(next, struct hist_entry, rb_node);
147 next = rb_next(&n->rb_node); 147 next = rb_next(&n->rb_node);
148 148
149 rb_erase(&n->rb_node, &self->hists); 149 rb_erase(&n->rb_node, hists);
150 collapse__insert_entry(&tmp, n); 150 collapse__insert_entry(&tmp, n);
151 } 151 }
152 152
153 self->hists = tmp; 153 *hists = tmp;
154} 154}
155 155
156/* 156/*
@@ -183,7 +183,7 @@ static void perf_session__insert_output_hist_entry(struct rb_root *root,
183 rb_insert_color(&he->rb_node, root); 183 rb_insert_color(&he->rb_node, root);
184} 184}
185 185
186void perf_session__output_resort(struct perf_session *self, u64 total_samples) 186void perf_session__output_resort(struct rb_root *hists, u64 total_samples)
187{ 187{
188 struct rb_root tmp; 188 struct rb_root tmp;
189 struct rb_node *next; 189 struct rb_node *next;
@@ -194,18 +194,18 @@ void perf_session__output_resort(struct perf_session *self, u64 total_samples)
194 total_samples * (callchain_param.min_percent / 100); 194 total_samples * (callchain_param.min_percent / 100);
195 195
196 tmp = RB_ROOT; 196 tmp = RB_ROOT;
197 next = rb_first(&self->hists); 197 next = rb_first(hists);
198 198
199 while (next) { 199 while (next) {
200 n = rb_entry(next, struct hist_entry, rb_node); 200 n = rb_entry(next, struct hist_entry, rb_node);
201 next = rb_next(&n->rb_node); 201 next = rb_next(&n->rb_node);
202 202
203 rb_erase(&n->rb_node, &self->hists); 203 rb_erase(&n->rb_node, hists);
204 perf_session__insert_output_hist_entry(&tmp, n, 204 perf_session__insert_output_hist_entry(&tmp, n,
205 min_callchain_hits); 205 min_callchain_hits);
206 } 206 }
207 207
208 self->hists = tmp; 208 *hists = tmp;
209} 209}
210 210
211static size_t callchain__fprintf_left_margin(FILE *fp, int left_margin) 211static size_t callchain__fprintf_left_margin(FILE *fp, int left_margin)
@@ -456,10 +456,10 @@ static size_t hist_entry_callchain__fprintf(FILE *fp, struct hist_entry *self,
456} 456}
457 457
458static size_t hist_entry__fprintf(struct hist_entry *self, 458static size_t hist_entry__fprintf(struct hist_entry *self,
459 struct perf_session *session,
460 struct perf_session *pair_session, 459 struct perf_session *pair_session,
461 bool show_displacement, 460 bool show_displacement,
462 long displacement, FILE *fp) 461 long displacement, FILE *fp,
462 u64 session_total)
463{ 463{
464 struct sort_entry *se; 464 struct sort_entry *se;
465 u64 count, total; 465 u64 count, total;
@@ -474,7 +474,7 @@ static size_t hist_entry__fprintf(struct hist_entry *self,
474 total = pair_session->events_stats.total; 474 total = pair_session->events_stats.total;
475 } else { 475 } else {
476 count = self->count; 476 count = self->count;
477 total = session->events_stats.total; 477 total = session_total;
478 } 478 }
479 479
480 if (total) 480 if (total)
@@ -496,8 +496,8 @@ static size_t hist_entry__fprintf(struct hist_entry *self,
496 496
497 if (total > 0) 497 if (total > 0)
498 old_percent = (count * 100.0) / total; 498 old_percent = (count * 100.0) / total;
499 if (session->events_stats.total > 0) 499 if (session_total > 0)
500 new_percent = (self->count * 100.0) / session->events_stats.total; 500 new_percent = (self->count * 100.0) / session_total;
501 501
502 diff = new_percent - old_percent; 502 diff = new_percent - old_percent;
503 503
@@ -544,16 +544,17 @@ static size_t hist_entry__fprintf(struct hist_entry *self,
544 left_margin -= thread__comm_len(self->thread); 544 left_margin -= thread__comm_len(self->thread);
545 } 545 }
546 546
547 hist_entry_callchain__fprintf(fp, self, session->events_stats.total, 547 hist_entry_callchain__fprintf(fp, self, session_total,
548 left_margin); 548 left_margin);
549 } 549 }
550 550
551 return ret; 551 return ret;
552} 552}
553 553
554size_t perf_session__fprintf_hists(struct perf_session *self, 554size_t perf_session__fprintf_hists(struct rb_root *hists,
555 struct perf_session *pair, 555 struct perf_session *pair,
556 bool show_displacement, FILE *fp) 556 bool show_displacement, FILE *fp,
557 u64 session_total)
557{ 558{
558 struct sort_entry *se; 559 struct sort_entry *se;
559 struct rb_node *nd; 560 struct rb_node *nd;
@@ -641,7 +642,7 @@ size_t perf_session__fprintf_hists(struct perf_session *self,
641 fprintf(fp, "\n#\n"); 642 fprintf(fp, "\n#\n");
642 643
643print_entries: 644print_entries:
644 for (nd = rb_first(&self->hists); nd; nd = rb_next(nd)) { 645 for (nd = rb_first(hists); nd; nd = rb_next(nd)) {
645 struct hist_entry *h = rb_entry(nd, struct hist_entry, rb_node); 646 struct hist_entry *h = rb_entry(nd, struct hist_entry, rb_node);
646 647
647 if (show_displacement) { 648 if (show_displacement) {
@@ -652,8 +653,13 @@ print_entries:
652 displacement = 0; 653 displacement = 0;
653 ++position; 654 ++position;
654 } 655 }
655 ret += hist_entry__fprintf(h, self, pair, show_displacement, 656 ret += hist_entry__fprintf(h, pair, show_displacement,
656 displacement, fp); 657 displacement, fp, session_total);
658 if (h->map == NULL && verbose > 1) {
659 __map_groups__fprintf_maps(&h->thread->mg,
660 MAP__FUNCTION, fp);
661 fprintf(fp, "%.10s end\n", graph_dotted_line);
662 }
657 } 663 }
658 664
659 free(rem_sq_bracket); 665 free(rem_sq_bracket);
diff --git a/tools/perf/util/hist.h b/tools/perf/util/hist.h
index e5f99b24048b..16f360cce5bf 100644
--- a/tools/perf/util/hist.h
+++ b/tools/perf/util/hist.h
@@ -10,8 +10,9 @@ struct perf_session;
10struct hist_entry; 10struct hist_entry;
11struct addr_location; 11struct addr_location;
12struct symbol; 12struct symbol;
13struct rb_root;
13 14
14struct hist_entry *__perf_session__add_hist_entry(struct perf_session *self, 15struct hist_entry *__perf_session__add_hist_entry(struct rb_root *hists,
15 struct addr_location *al, 16 struct addr_location *al,
16 struct symbol *parent, 17 struct symbol *parent,
17 u64 count, bool *hit); 18 u64 count, bool *hit);
@@ -19,9 +20,10 @@ extern int64_t hist_entry__cmp(struct hist_entry *, struct hist_entry *);
19extern int64_t hist_entry__collapse(struct hist_entry *, struct hist_entry *); 20extern int64_t hist_entry__collapse(struct hist_entry *, struct hist_entry *);
20void hist_entry__free(struct hist_entry *); 21void hist_entry__free(struct hist_entry *);
21 22
22void perf_session__output_resort(struct perf_session *self, u64 total_samples); 23void perf_session__output_resort(struct rb_root *hists, u64 total_samples);
23void perf_session__collapse_resort(struct perf_session *self); 24void perf_session__collapse_resort(struct rb_root *hists);
24size_t perf_session__fprintf_hists(struct perf_session *self, 25size_t perf_session__fprintf_hists(struct rb_root *hists,
25 struct perf_session *pair, 26 struct perf_session *pair,
26 bool show_displacement, FILE *fp); 27 bool show_displacement, FILE *fp,
28 u64 session_total);
27#endif /* __PERF_HIST_H */ 29#endif /* __PERF_HIST_H */
diff --git a/tools/perf/util/probe-finder.c b/tools/perf/util/probe-finder.c
index e77dc886760e..1e6c65ebbd80 100644
--- a/tools/perf/util/probe-finder.c
+++ b/tools/perf/util/probe-finder.c
@@ -169,7 +169,7 @@ static const char *cu_find_realpath(Dwarf_Die *cu_die, const char *fname)
169{ 169{
170 Dwarf_Files *files; 170 Dwarf_Files *files;
171 size_t nfiles, i; 171 size_t nfiles, i;
172 const char *src; 172 const char *src = NULL;
173 int ret; 173 int ret;
174 174
175 if (!fname) 175 if (!fname)
diff --git a/tools/perf/util/session.c b/tools/perf/util/session.c
index 0de7258e70a5..eed1cb889008 100644
--- a/tools/perf/util/session.c
+++ b/tools/perf/util/session.c
@@ -70,6 +70,7 @@ struct perf_session *perf_session__new(const char *filename, int mode, bool forc
70 70
71 memcpy(self->filename, filename, len); 71 memcpy(self->filename, filename, len);
72 self->threads = RB_ROOT; 72 self->threads = RB_ROOT;
73 self->stats_by_id = RB_ROOT;
73 self->last_match = NULL; 74 self->last_match = NULL;
74 self->mmap_window = 32; 75 self->mmap_window = 32;
75 self->cwd = NULL; 76 self->cwd = NULL;
diff --git a/tools/perf/util/session.h b/tools/perf/util/session.h
index 31950fcd8a4d..5c33417eebb3 100644
--- a/tools/perf/util/session.h
+++ b/tools/perf/util/session.h
@@ -20,6 +20,7 @@ struct perf_session {
20 struct thread *last_match; 20 struct thread *last_match;
21 struct map *vmlinux_maps[MAP__NR_TYPES]; 21 struct map *vmlinux_maps[MAP__NR_TYPES];
22 struct events_stats events_stats; 22 struct events_stats events_stats;
23 struct rb_root stats_by_id;
23 unsigned long event_total[PERF_RECORD_MAX]; 24 unsigned long event_total[PERF_RECORD_MAX];
24 unsigned long unknown_events; 25 unsigned long unknown_events;
25 struct rb_root hists; 26 struct rb_root hists;
diff --git a/tools/perf/util/thread.c b/tools/perf/util/thread.c
index 21b92162282b..fa968312ee7d 100644
--- a/tools/perf/util/thread.c
+++ b/tools/perf/util/thread.c
@@ -79,8 +79,8 @@ int thread__comm_len(struct thread *self)
79 return self->comm_len; 79 return self->comm_len;
80} 80}
81 81
82static size_t __map_groups__fprintf_maps(struct map_groups *self, 82size_t __map_groups__fprintf_maps(struct map_groups *self,
83 enum map_type type, FILE *fp) 83 enum map_type type, FILE *fp)
84{ 84{
85 size_t printed = fprintf(fp, "%s:\n", map_type__name[type]); 85 size_t printed = fprintf(fp, "%s:\n", map_type__name[type]);
86 struct rb_node *nd; 86 struct rb_node *nd;
@@ -89,7 +89,7 @@ static size_t __map_groups__fprintf_maps(struct map_groups *self,
89 struct map *pos = rb_entry(nd, struct map, rb_node); 89 struct map *pos = rb_entry(nd, struct map, rb_node);
90 printed += fprintf(fp, "Map:"); 90 printed += fprintf(fp, "Map:");
91 printed += map__fprintf(pos, fp); 91 printed += map__fprintf(pos, fp);
92 if (verbose > 1) { 92 if (verbose > 2) {
93 printed += dso__fprintf(pos->dso, type, fp); 93 printed += dso__fprintf(pos->dso, type, fp);
94 printed += fprintf(fp, "--\n"); 94 printed += fprintf(fp, "--\n");
95 } 95 }
@@ -183,8 +183,8 @@ struct thread *perf_session__findnew(struct perf_session *self, pid_t pid)
183 return th; 183 return th;
184} 184}
185 185
186static void map_groups__remove_overlappings(struct map_groups *self, 186static int map_groups__fixup_overlappings(struct map_groups *self,
187 struct map *map) 187 struct map *map)
188{ 188{
189 struct rb_root *root = &self->maps[map->type]; 189 struct rb_root *root = &self->maps[map->type];
190 struct rb_node *next = rb_first(root); 190 struct rb_node *next = rb_first(root);
@@ -209,7 +209,36 @@ static void map_groups__remove_overlappings(struct map_groups *self,
209 * list. 209 * list.
210 */ 210 */
211 list_add_tail(&pos->node, &self->removed_maps[map->type]); 211 list_add_tail(&pos->node, &self->removed_maps[map->type]);
212 /*
213 * Now check if we need to create new maps for areas not
214 * overlapped by the new map:
215 */
216 if (map->start > pos->start) {
217 struct map *before = map__clone(pos);
218
219 if (before == NULL)
220 return -ENOMEM;
221
222 before->end = map->start - 1;
223 map_groups__insert(self, before);
224 if (verbose >= 2)
225 map__fprintf(before, stderr);
226 }
227
228 if (map->end < pos->end) {
229 struct map *after = map__clone(pos);
230
231 if (after == NULL)
232 return -ENOMEM;
233
234 after->start = map->end + 1;
235 map_groups__insert(self, after);
236 if (verbose >= 2)
237 map__fprintf(after, stderr);
238 }
212 } 239 }
240
241 return 0;
213} 242}
214 243
215void maps__insert(struct rb_root *maps, struct map *map) 244void maps__insert(struct rb_root *maps, struct map *map)
@@ -254,7 +283,7 @@ struct map *maps__find(struct rb_root *maps, u64 ip)
254 283
255void thread__insert_map(struct thread *self, struct map *map) 284void thread__insert_map(struct thread *self, struct map *map)
256{ 285{
257 map_groups__remove_overlappings(&self->mg, map); 286 map_groups__fixup_overlappings(&self->mg, map);
258 map_groups__insert(&self->mg, map); 287 map_groups__insert(&self->mg, map);
259} 288}
260 289
diff --git a/tools/perf/util/thread.h b/tools/perf/util/thread.h
index 0a28f39de545..dcf70303e58e 100644
--- a/tools/perf/util/thread.h
+++ b/tools/perf/util/thread.h
@@ -10,6 +10,9 @@ struct map_groups {
10 struct list_head removed_maps[MAP__NR_TYPES]; 10 struct list_head removed_maps[MAP__NR_TYPES];
11}; 11};
12 12
13size_t __map_groups__fprintf_maps(struct map_groups *self,
14 enum map_type type, FILE *fp);
15
13struct thread { 16struct thread {
14 struct rb_node rb_node; 17 struct rb_node rb_node;
15 struct map_groups mg; 18 struct map_groups mg;